diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..89b8ba826 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "vendor/github.com/knative/pkg"] + path = vendor/github.com/knative/pkg + url = https://github.com/knative/pkg.git diff --git a/.travis.yml b/.travis.yml index a7b9cc253..69416cda2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,19 +12,19 @@ go: go_import_path: kubesphere.io/kubesphere before_install: - - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + - go get -u github.com/golang/dep/cmd/dep before_script: - dep ensure -v - docker --version + - bash hack/install_kubebuilder.sh script: - - make test - - bash hack/docker_build.sh + - make all && make test && bash hack/docker_build.sh deploy: skip_cleanup: true provider: script script: bash hack/docker_push.sh on: - branch: master \ No newline at end of file + branch: master diff --git a/Gopkg.lock b/Gopkg.lock index 0f72a64d5..a118c53c9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,117 +2,98 @@ [[projects]] - digest = "1:4d6f036ea3fe636bcb2e89850bcdc62a771354e157cd51b8b22a2de8562bf663" name = "cloud.google.com/go" packages = ["compute/metadata"] - pruneopts = "UT" revision = "f52f9bc132541d2aa914f42100c36d10b1ef7e0c" version = "v0.37.0" [[projects]] - digest = "1:f9ae348e1f793dcf9ed930ed47136a67343dbd6809c5c91391322267f4476892" name = "github.com/Microsoft/go-winio" packages = ["."] - pruneopts = "UT" revision = "1a8911d1ed007260465c3bfbbc785ac6915a0bb8" version = "v0.4.12" [[projects]] - digest = "1:a2682518d905d662d984ef9959984ef87cecb777d379bfa9d9fe40e78069b3e4" name = "github.com/PuerkitoBio/purell" packages = ["."] - pruneopts = "UT" revision = "44968752391892e1b0d0b821ee79e9a85fa13049" version = "v1.1.1" [[projects]] branch = "master" - digest = "1:c739832d67eb1e9cc478a19cc1a1ccd78df0397bf8a32978b759152e205f644b" name = "github.com/PuerkitoBio/urlesc" packages = ["."] - pruneopts = "UT" revision = "de5bf2ad457846296e2031421a34e2568e304e35" [[projects]] - digest = "1:e4c72127d910a96daf869a44f3dd563b86dbe6931a172863a0e99c5ff04b59e4" name = "github.com/Sirupsen/logrus" packages = ["."] - pruneopts = "UT" revision = "dae0fa8d5b0c810a8ab733fbd5510c7cae84eca4" version = "v1.4.0" [[projects]] branch = "master" - digest = "1:e05c7b72aeba7570b1d2f9c6dc9f0373d224b16b70fa886c504de867bffe1c2e" name = "github.com/aead/chacha20" packages = [ ".", - "chacha", + "chacha" ] - pruneopts = "UT" revision = "8b13a72661dae6e9e5dea04f344f0dc95ea29547" +[[projects]] + name = "github.com/appscode/jsonpatch" + packages = ["."] + revision = "7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2" + version = "1.0.0" + [[projects]] branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" packages = ["quantile"] - pruneopts = "UT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] - digest = "1:1f96c755246f1b688f1b2f0d2ba6cff5a94ceb35a7c5d06f7489e5e348ce356d" name = "github.com/bifurcation/mint" packages = [ ".", - "syntax", + "syntax" ] - pruneopts = "UT" - revision = "93c51c6ce11597a26e246fc33a301d62d3439cd2" + revision = "824af65410658916142a7600349144e1289f2110" [[projects]] - digest = "1:166438587ed45ac211dab8a3ecebf4fa0c186d0db63430fb9127bbc2e5fcdc67" name = "github.com/cenkalti/backoff" packages = ["."] - pruneopts = "UT" revision = "1e4cf3da559842a91afcb6ea6141451e6c30c618" version = "v2.1.1" [[projects]] - digest = "1:f438d91be142877c3ad83157992c91de787ddfbddcc2a7da1ef6ef61606cadc4" name = "github.com/cheekybits/genny" packages = ["generic"] - pruneopts = "UT" - revision = "9127e812e1e9e501ce899a18121d316ecb52e4ba" + revision = "d2cf3cdd35ce0d789056c4bc02a4d6349c947caf" + version = "v1.0.0" [[projects]] - digest = "1:6b21090f60571b20b3ddc2c8e48547dffcf409498ed6002c2cada023725ed377" name = "github.com/davecgh/go-spew" packages = ["spew"] - pruneopts = "UT" - revision = "782f4967f2dc4564575ca782fe2d04090b5faca8" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" [[projects]] - digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" name = "github.com/dgrijalva/jwt-go" packages = ["."] - pruneopts = "UT" revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" version = "v3.2.0" [[projects]] branch = "master" - digest = "1:4c7d169280debf9f36b84a0f682094889cccc5dc0db8657f9cffc93b21975a57" name = "github.com/docker/distribution" packages = [ "digestset", - "reference", + "reference" ] - pruneopts = "UT" revision = "6d62eb1d4a3515399431b713fde3ce5a9b40e8d5" [[projects]] - digest = "1:ec821dda59d7dd340498d74f798aa218b2c782bba54a690e866dc4f520d900d5" name = "github.com/docker/docker" packages = [ "api", @@ -134,140 +115,109 @@ "pkg/ioutils", "pkg/longpath", "pkg/system", - "pkg/tlsconfig", + "pkg/tlsconfig" ] - pruneopts = "UT" revision = "90d35abf7b3535c1c319c872900fbd76374e521c" version = "v17.05.0-ce-rc3" [[projects]] branch = "master" - digest = "1:02f8f4889e53a6adbc7bf3f2b9007ce8aecb0b79686fc9f99265246c88063f10" name = "github.com/docker/go-connections" packages = [ "nat", "sockets", - "tlsconfig", + "tlsconfig" ] - pruneopts = "UT" revision = "97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e" [[projects]] - digest = "1:aaf8d8f8ffb92e0c4985a8796655071cb9a1870c585ad50d968f78931b871a98" name = "github.com/docker/go-units" packages = ["."] - pruneopts = "UT" - revision = "8a7beacffa3009a9ac66bad506b18ffdd110cf97" + revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" + version = "v0.3.3" [[projects]] branch = "master" - digest = "1:4841e14252a2cecf11840bd05230412ad469709bbacfc12467e2ce5ad07f339b" name = "github.com/docker/libtrust" packages = ["."] - pruneopts = "UT" revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20" [[projects]] - digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74" name = "github.com/dustin/go-humanize" packages = ["."] - pruneopts = "UT" revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e" version = "v1.0.0" [[projects]] - digest = "1:8ee7b41ace3ba875c17e38ba7780e7cf0d29882338637861e9f13f04f60ecc5c" name = "github.com/emicklei/go-restful" packages = [ ".", - "log", + "log" ] - pruneopts = "UT" revision = "85d198d05a92d31823b852b4a5928114912e8949" version = "v2.9.0" [[projects]] - digest = "1:8ba45daa43acfd9364068b118a6884cb82bc1ef0569dc05260f3b464907e07d9" name = "github.com/emicklei/go-restful-openapi" packages = ["."] - pruneopts = "UT" revision = "b7062368c258c9e8f8cbe9dd2e6aebfa1b747be6" version = "v1.0.0" [[projects]] branch = "master" - digest = "1:50a46ab1d5edbbdd55125b4d37f1bf503d0807c26461f9ad7b358d6006641d09" name = "github.com/flynn/go-shlex" packages = ["."] - pruneopts = "UT" revision = "3f9db97f856818214da2e1057f8ad84803971cff" [[projects]] - digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda" name = "github.com/ghodss/yaml" packages = ["."] - pruneopts = "UT" revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" [[projects]] - digest = "1:2a792dde3ae5dc95000d4dee312523504ea875b097a2b119a2f55d1d1d32ed61" name = "github.com/go-ldap/ldap" packages = ["."] - pruneopts = "UT" revision = "729c20c2694d870bcd631f0dadaecd088bd7ccbc" version = "v3.0.2" [[projects]] - digest = "1:edd2fa4578eb086265db78a9201d15e76b298dfd0d5c379da83e9c61712cf6df" name = "github.com/go-logr/logr" packages = ["."] - pruneopts = "UT" revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" version = "v0.1.0" [[projects]] - digest = "1:d81dfed1aa731d8e4a45d87154ec15ef18da2aa80fa9a2f95bec38577a244a99" name = "github.com/go-logr/zapr" packages = ["."] - pruneopts = "UT" revision = "03f06a783fbb7dfaf3f629c7825480e43a7105e6" version = "v0.1.1" [[projects]] - digest = "1:953a2628e4c5c72856b53f5470ed5e071c55eccf943d798d42908102af2a610f" name = "github.com/go-openapi/jsonpointer" packages = ["."] - pruneopts = "UT" revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" version = "v0.18.0" [[projects]] - digest = "1:81210e0af657a0fb3638932ec68e645236bceefa4c839823db0c4d918f080895" name = "github.com/go-openapi/jsonreference" packages = ["."] - pruneopts = "UT" revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3" version = "v0.18.0" [[projects]] - digest = "1:08656ef9c5a45ddccb7f206ca2d67e12e9fcda4122a83dc0544b5c967267cefa" name = "github.com/go-openapi/spec" packages = ["."] - pruneopts = "UT" revision = "5b6cdde3200976e3ecceb2868706ee39b6aff3e4" version = "v0.18.0" [[projects]] - digest = "1:0005186c6608dd542239ac8e4f4f1e2e7c24d493e999113c46b93332f0362fc0" name = "github.com/go-openapi/swag" packages = ["."] - pruneopts = "UT" revision = "1d29f06aebd59ccdf11ae04aa0334ded96e2d909" version = "v0.18.0" [[projects]] - digest = "1:33082c63746b464db3d1c2c07a1396d860484d97fe857ef9e8668a9b406db09f" name = "github.com/go-redis/redis" packages = [ ".", @@ -276,204 +226,248 @@ "internal/hashtag", "internal/pool", "internal/proto", - "internal/util", + "internal/util" ] - pruneopts = "UT" revision = "d22fde8721cc915a55aeb6b00944a76a92bfeb6e" version = "v6.15.2" [[projects]] - digest = "1:ec6f9bf5e274c833c911923c9193867f3f18788c461f76f05f62bb1510e0ae65" name = "github.com/go-sql-driver/mysql" packages = ["."] - pruneopts = "UT" revision = "72cd26f257d44c1114970e19afddcd812016007e" version = "v1.4.1" [[projects]] - digest = "1:b7a8552c62868d867795b63eaf4f45d3e92d36db82b428e680b9c95a8c33e5b1" + name = "github.com/gobuffalo/envy" + packages = ["."] + revision = "fa0dfdc10b5366ce365b7d9d1755a03e4e797bc5" + version = "v1.6.15" + +[[projects]] name = "github.com/gogo/protobuf" packages = [ "proto", - "sortkeys", + "sortkeys" ] - pruneopts = "UT" - revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02" - version = "v0.5" + revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" + version = "v1.2.1" [[projects]] branch = "master" - digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467" name = "github.com/golang/glog" packages = ["."] - pruneopts = "UT" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] - digest = "1:7672c206322f45b33fac1ae2cb899263533ce0adcc6481d207725560208ec84e" + branch = "master" name = "github.com/golang/groupcache" packages = ["lru"] - pruneopts = "UT" - revision = "02826c3e79038b59d737d3b1c0a1d937f71a4433" + revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b" [[projects]] - digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260" name = "github.com/golang/protobuf" packages = [ "proto", "ptypes", "ptypes/any", "ptypes/duration", - "ptypes/timestamp", + "ptypes/timestamp" ] - pruneopts = "UT" - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" + revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" + version = "v1.3.1" [[projects]] branch = "master" - digest = "1:0bfbe13936953a98ae3cfe8ed6670d396ad81edf069a806d2f6515d7bb6950df" name = "github.com/google/btree" packages = ["."] - pruneopts = "UT" revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" [[projects]] - digest = "1:41bfd4219241b7f7d6e6fdb13fc712576f1337e68e6b895136283b76928fdd66" + branch = "master" name = "github.com/google/gofuzz" packages = ["."] - pruneopts = "UT" - revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c" + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" [[projects]] - digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b" name = "github.com/google/uuid" packages = ["."] - pruneopts = "UT" revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" version = "v1.1.1" [[projects]] - digest = "1:75eb87381d25cc75212f52358df9c3a2719584eaa9685cd510ce28699122f39d" name = "github.com/googleapis/gnostic" packages = [ "OpenAPIv2", "compiler", - "extensions", + "extensions" ] - pruneopts = "UT" - revision = "0c5108395e2debce0d731cf0287ddf7242066aba" + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" + +[[projects]] + name = "github.com/gorilla/mux" + packages = ["."] + revision = "a7962380ca08b5a188038c69871b8d3fbdf31e89" + version = "v1.7.0" [[projects]] - digest = "1:7b5c6e2eeaa9ae5907c391a91c132abfd5c9e8a784a341b5625e750c67e6825d" name = "github.com/gorilla/websocket" packages = ["."] - pruneopts = "UT" revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" version = "v1.4.0" [[projects]] - digest = "1:878f0defa9b853f9acfaf4a162ba450a89d0050eff084f9fe7f5bd15948f172a" + branch = "master" name = "github.com/gregjones/httpcache" packages = [ ".", - "diskcache", + "diskcache" ] - pruneopts = "UT" - revision = "787624de3eb7bd915c329cba748687a3b22666a6" + revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f" [[projects]] - digest = "1:0a48cb9c2757781932550a45ad11dd497722d4018a28077de65674aec7eee90e" name = "github.com/hashicorp/go-syslog" packages = ["."] - pruneopts = "UT" revision = "8d1874e3e8d1862b74e0536851e218c4571066a5" version = "v1.0.0" [[projects]] - digest = "1:3f90d23757c18b1e07bf11494dbe737ee2c44d881c0f41e681611abdadad62fa" + name = "github.com/hashicorp/go-version" + packages = ["."] + revision = "d40cf49b3a77bba84a7afdbd7f1dc295d114efb1" + version = "v1.1.0" + +[[projects]] name = "github.com/hashicorp/golang-lru" packages = [ ".", - "simplelru", + "simplelru" ] - pruneopts = "UT" - revision = "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" + revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" + version = "v0.5.1" + +[[projects]] + name = "github.com/hpcloud/tail" + packages = [ + ".", + "ratelimiter", + "util", + "watch", + "winfile" + ] + revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" + version = "v1.0.0" [[projects]] - digest = "1:3e260afa138eab6492b531a3b3d10ab4cb70512d423faa78b8949dec76e66a21" name = "github.com/imdario/mergo" packages = ["."] - pruneopts = "UT" - revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58" - version = "v0.3.5" + revision = "7c29201646fa3de8506f701213473dd407f19646" + version = "v0.3.7" [[projects]] - digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" name = "github.com/inconshreveable/mousetrap" packages = ["."] - pruneopts = "UT" revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" [[projects]] branch = "master" - digest = "1:6d62fb66dd0a578569f2810928f3ff00baae524f94b8c3d859f1ac714bf4340d" name = "github.com/jimstudt/http-authentication" packages = ["basic"] - pruneopts = "UT" revision = "3eca13d6893afd7ecabe15f4445f5d2872a1b012" [[projects]] - digest = "1:d4e6e8584d0a94ce567d237e19192dae44d57d2767ac7e1d7fbf5626d176381a" name = "github.com/jinzhu/gorm" packages = ["."] - pruneopts = "UT" revision = "472c70caa40267cb89fd8facb07fe6454b578626" version = "v1.9.2" [[projects]] branch = "master" - digest = "1:fd97437fbb6b7dce04132cf06775bd258cce305c44add58eb55ca86c6c325160" name = "github.com/jinzhu/inflection" packages = ["."] - pruneopts = "UT" revision = "04140366298a54a039076d798123ffa108fff46c" [[projects]] - branch = "master" - digest = "1:f5a2051c55d05548d2d4fd23d244027b59fbd943217df8aa3b5e170ac2fd6e1b" - name = "github.com/json-iterator/go" + name = "github.com/joho/godotenv" packages = ["."] - pruneopts = "UT" - revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29" + revision = "23d116af351c84513e1946b527c88823e476be13" + version = "v1.3.0" + +[[projects]] + name = "github.com/json-iterator/go" + packages = ["."] + revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29" + version = "v1.1.6" + +[[projects]] + name = "github.com/kiali/kiali" + packages = [ + "business", + "business/checkers", + "business/checkers/destinationrules", + "business/checkers/gateways", + "business/checkers/virtual_services", + "config", + "config/security", + "graph", + "graph/appender", + "graph/cytoscape", + "graph/options", + "handlers", + "kubernetes", + "kubernetes/kubetest", + "log", + "models", + "prometheus", + "prometheus/internalmetrics", + "status", + "util", + "util/intutil" + ] + revision = "4e0f3eff11a4ab3a743328cac78c9fe9be36498c" + version = "v0.15.0" [[projects]] - digest = "1:2d643962fac133904694fffa959bc3c5dcfdcee38c6f5ffdd99a3c93eb9c835c" name = "github.com/klauspost/cpuid" packages = ["."] - pruneopts = "UT" revision = "e7e905edc00ea8827e58662220139109efea09db" version = "v1.2.0" [[projects]] - digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" + branch = "master" + name = "github.com/knative/pkg" + packages = [ + "apis/istio", + "apis/istio/common/v1alpha1", + "apis/istio/v1alpha3" + ] + revision = "cd278f2d3394c865fda66bca12459e879e0279b8" + +[[projects]] name = "github.com/konsorten/go-windows-terminal-sequences" packages = ["."] - pruneopts = "UT" revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" version = "v1.0.2" [[projects]] branch = "master" - digest = "1:6fe6fc7707d8c37fb305362a324462d898bc1c01eaa2bfe15383ec21f6540c71" + name = "github.com/kubernetes-sigs/application" + packages = [ + "pkg/component", + "pkg/customresource", + "pkg/finalizer", + "pkg/resource" + ] + revision = "4ead7f1b87048b7717b3e474a21fdc07e6bce636" + +[[projects]] + branch = "master" name = "github.com/lucas-clemente/aes12" packages = ["."] - pruneopts = "UT" revision = "cd47fb39b79f867c6e4e5cd39cf7abd799f71670" [[projects]] - digest = "1:e21e74a50aed0bc0dbb10b7855234f27f884835f59a4e88551de0899d5e2ae96" name = "github.com/lucas-clemente/quic-go" packages = [ ".", @@ -486,50 +480,40 @@ "internal/protocol", "internal/utils", "internal/wire", - "qerr", + "qerr" ] - pruneopts = "UT" revision = "714f38d5d0aff85894fd890718b991e361f03e7d" version = "v0.10.1" [[projects]] branch = "master" - digest = "1:08b3484b0665f09be0f1f699a38e877708453795df07a3d6e4cd41fa7a2cabf0" name = "github.com/lucas-clemente/quic-go-certificates" packages = ["."] - pruneopts = "UT" revision = "d2f86524cced5186554df90d92529757d22c1cb6" [[projects]] branch = "master" - digest = "1:df495c9184b4e6cbb9d55652236dbcbe72c65a1c8b6469da50722628cea474e7" name = "github.com/mailru/easyjson" packages = [ "buffer", "jlexer", - "jwriter", + "jwriter" ] - pruneopts = "UT" revision = "1de009706dbeb9d05f18586f0735fcdb7c524481" [[projects]] - branch = "master" - digest = "1:fc2b04b0069d6b10bdef96d278fe20c345794009685ed3c8c7f1a6dc023eefec" - name = "github.com/mattbaird/jsonpatch" + name = "github.com/markbates/inflect" packages = ["."] - pruneopts = "UT" - revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" + revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" + version = "v1.0.4" [[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] - pruneopts = "UT" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" [[projects]] - digest = "1:dfc63b4d7037ed26f191cea82186bf14e604dee1c0e4245bda6711e5ab2e69ca" name = "github.com/mholt/caddy" packages = [ ".", @@ -569,181 +553,240 @@ "caddytls", "onevent", "onevent/hook", - "telemetry", + "telemetry" ] - pruneopts = "UT" revision = "80dfb8b2a7f89b120a627bc4d866a1dc5ed3d92f" version = "v0.11.5" [[projects]] branch = "master" - digest = "1:9c3b94fe155aa680cc04152667739d7f178ca6edd833b5b2fa7e5972f9f49ff3" name = "github.com/mholt/certmagic" packages = ["."] - pruneopts = "UT" revision = "e3e89d1096d76d61680f8eeb8f67649baa6c54b8" [[projects]] - digest = "1:4c195ae33dbe8df4eb3485f3357788f47b614547abcd048cbcc59d9025db6b8d" name = "github.com/miekg/dns" packages = ["."] - pruneopts = "UT" revision = "cc8cd02140663157ce797c6650488d6c8563f31f" version = "v1.1.6" [[projects]] - digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" name = "github.com/mitchellh/go-homedir" packages = ["."] - pruneopts = "UT" revision = "af06845cf3004701891bf4fdb884bfe4920b3727" version = "v1.1.0" [[projects]] - digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" name = "github.com/modern-go/concurrent" packages = ["."] - pruneopts = "UT" revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" version = "1.0.3" [[projects]] - digest = "1:c56ad36f5722eb07926c979d5e80676ee007a9e39e7808577b9d87ec92b00460" name = "github.com/modern-go/reflect2" packages = ["."] - pruneopts = "UT" - revision = "94122c33edd36123c84d5368cfb2b69df93a0ec8" - version = "v1.0.1" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" [[projects]] - digest = "1:b56c589214f01a5601e0821387db484617392d0042f26234bf2da853a2f498a1" name = "github.com/naoina/go-stringutil" packages = ["."] - pruneopts = "UT" revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b" version = "v0.1.0" [[projects]] - digest = "1:f58c3d0e46b64878d00652fedba24ee879725191ab919dca7b62586859281c04" name = "github.com/naoina/toml" packages = [ ".", - "ast", + "ast" ] - pruneopts = "UT" revision = "e6f5723bf2a66af014955e0888881314cf294129" version = "v0.1.1" [[projects]] - digest = "1:ee4d4af67d93cc7644157882329023ce9a7bcfce956a079069a9405521c7cc8d" + name = "github.com/onsi/ginkgo" + packages = [ + ".", + "config", + "internal/codelocation", + "internal/containernode", + "internal/failer", + "internal/leafnodes", + "internal/remote", + "internal/spec", + "internal/spec_iterator", + "internal/specrunner", + "internal/suite", + "internal/testingtproxy", + "internal/writer", + "reporters", + "reporters/stenographer", + "reporters/stenographer/support/go-colorable", + "reporters/stenographer/support/go-isatty", + "types" + ] + revision = "eea6ad008b96acdaa524f5b409513bf062b500ad" + version = "v1.8.0" + +[[projects]] + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "gbytes", + "gexec", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types" + ] + revision = "90e289841c1ed79b7a598a7cd9959750cb5e89e2" + version = "v1.5.0" + +[[projects]] name = "github.com/opencontainers/go-digest" packages = ["."] - pruneopts = "UT" revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" version = "v1.0.0-rc1" [[projects]] - digest = "1:9072181164e616e422cbfbe48ca9ac249a4d76301ca0876c9f56b937cf214a2f" + name = "github.com/openshift/api" + packages = [ + "apps/v1", + "project/v1" + ] + revision = "0d921e363e951d89f583292c60d013c318df64dc" + version = "v3.9.0" + +[[projects]] name = "github.com/pborman/uuid" packages = ["."] - pruneopts = "UT" - revision = "ca53cad383cad2479bbba7f7a1a05797ec1386e4" + revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" + version = "v1.2" [[projects]] branch = "master" - digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" name = "github.com/petar/GoLLRB" packages = ["llrb"] - pruneopts = "UT" revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" [[projects]] - digest = "1:0e7775ebbcf00d8dd28ac663614af924411c868dca3d5aa762af0fae3808d852" name = "github.com/peterbourgon/diskv" packages = ["."] - pruneopts = "UT" revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" [[projects]] - digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747" name = "github.com/pkg/errors" packages = ["."] - pruneopts = "UT" - revision = "839d9e913e063e28dfd0e6c7b7512793e0a48be9" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" [[projects]] - digest = "1:93a746f1060a8acbcf69344862b2ceced80f854170e1caae089b2834c5fbf7f4" name = "github.com/prometheus/client_golang" packages = [ + "api", + "api/prometheus/v1", "prometheus", "prometheus/internal", - "prometheus/promhttp", + "prometheus/promhttp" ] - pruneopts = "UT" revision = "505eaef017263e299324067d40ca2c48f6a2cf50" version = "v0.9.2" [[projects]] branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" name = "github.com/prometheus/client_model" packages = ["go"] - pruneopts = "UT" revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" [[projects]] - digest = "1:35cf6bdf68db765988baa9c4f10cc5d7dda1126a54bd62e252dbcd0b1fc8da90" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model", + "model" ] - pruneopts = "UT" revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250" version = "v0.2.0" [[projects]] branch = "master" - digest = "1:0573a48c92e778ad732779bf1e5064a5085ab3fbd236ad71e53cab5753ef3e6e" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "iostats", "nfs", - "xfs", + "xfs" ] - pruneopts = "UT" revision = "e56f2e22fc761e82a34aca553f6725e2aff4fe6c" [[projects]] - digest = "1:b36a0ede02c4c2aef7df7f91cbbb7bb88a98b5d253509d4f997dda526e50c88c" + name = "github.com/rogpeppe/go-internal" + packages = [ + "modfile", + "module", + "semver" + ] + revision = "1cf9852c553c5b7da2d5a4a091129a7822fed0c9" + version = "v1.2.2" + +[[projects]] name = "github.com/russross/blackfriday" packages = ["."] - pruneopts = "UT" revision = "05f3235734ad95d0016f6a23902f06461fcf567a" version = "v1.5.2" [[projects]] - digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem" + ] + revision = "f4711e4db9e9a1d3887343acb72b2bbfc2f686f5" + version = "v1.2.1" + +[[projects]] name = "github.com/spf13/cobra" packages = ["."] - pruneopts = "UT" revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" version = "v0.0.3" [[projects]] - digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" name = "github.com/spf13/pflag" packages = ["."] - pruneopts = "UT" revision = "298182f68c66c05229eb03ac171abe6e309ee79a" version = "v1.0.3" [[projects]] - digest = "1:4ec126f1327fe6b348ccf2d4974c38ccf2628b10ad30be7b3be53f483db85149" + name = "github.com/stretchr/objx" + packages = ["."] + revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" + version = "v0.1.1" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "mock" + ] + revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" + version = "v1.3.0" + +[[projects]] name = "github.com/xenolf/lego" packages = [ "acme", @@ -761,30 +804,24 @@ "lego", "log", "platform/wait", - "registration", + "registration" ] - pruneopts = "UT" revision = "2952cdaebd4da7cd560e195343bdd3cb78a67643" version = "v2.3.0" [[projects]] - digest = "1:777e729b475d3895c7229552aa10076f0d177daf37c0a72258006d046d329960" name = "go.uber.org/atomic" packages = ["."] - pruneopts = "UT" - revision = "4e336646b2ef9fc6e47be8e21594178f98e5ebcf" - version = "v1.2.0" + revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" + version = "v1.3.2" [[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" name = "go.uber.org/multierr" packages = ["."] - pruneopts = "UT" revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" version = "v1.1.0" [[projects]] - digest = "1:c52caf7bd44f92e54627a31b85baf06a68333a196b3d8d241480a774733dcf8b" name = "go.uber.org/zap" packages = [ ".", @@ -792,15 +829,13 @@ "internal/bufferpool", "internal/color", "internal/exit", - "zapcore", + "zapcore" ] - pruneopts = "UT" revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" version = "v1.9.1" [[projects]] branch = "master" - digest = "1:08c5a7f39562e590d344f565daead275a18635a2904deaed69a339ddb7e52abd" name = "golang.org/x/crypto" packages = [ "curve25519", @@ -809,19 +844,20 @@ "hkdf", "ocsp", "pbkdf2", - "ssh/terminal", + "ssh/terminal" ] - pruneopts = "UT" revision = "a1f597ede03a7bef967a422b5b3a5bd08805a01e" [[projects]] branch = "master" - digest = "1:0781624ec10916ea436c2c1425509012155709d151a4687331d140e7a5df79a8" name = "golang.org/x/net" packages = [ "bpf", "context", "context/ctxhttp", + "html", + "html/atom", + "html/charset", "http/httpguts", "http2", "http2/hpack", @@ -831,77 +867,93 @@ "internal/socks", "ipv4", "ipv6", - "proxy", + "proxy" ] - pruneopts = "UT" revision = "9f648a60d9775ef5c977e7669d1673a7a67bef33" [[projects]] - digest = "1:ad764db92ed977f803ff0f59a7a957bf65cc4e8ae9dfd08228e1f54ea40392e0" + branch = "master" name = "golang.org/x/oauth2" packages = [ ".", "google", "internal", "jws", - "jwt", + "jwt" ] - pruneopts = "UT" - revision = "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" + revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3" [[projects]] branch = "master" - digest = "1:426092407910bee4a539be5d377abbaf629bae216c8eb2f80409c8cbe2647508" name = "golang.org/x/sys" packages = [ "cpu", "unix", - "windows", + "windows" ] - pruneopts = "UT" revision = "fead79001313d15903fb4605b4a1b781532cd93e" [[projects]] - digest = "1:0c56024909189aee3364b7f21a95a27459f718aa7c199a5c111c36cfffd9eaef" name = "golang.org/x/text" packages = [ "collate", "collate/build", + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", "internal/colltab", "internal/gen", "internal/tag", "internal/triegen", "internal/ucd", + "internal/utf8internal", "language", + "runes", "secure/bidirule", "transform", "unicode/bidi", "unicode/cldr", "unicode/norm", "unicode/rangetable", - "width", + "width" ] - pruneopts = "UT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] - digest = "1:d37b0ef2944431fe9e8ef35c6fffc8990d9e2ca300588df94a6890f3649ae365" + branch = "master" name = "golang.org/x/time" packages = ["rate"] - pruneopts = "UT" - revision = "f51c12702a4d776e4c1fa9b0fabab841babae631" + revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" [[projects]] branch = "master" - digest = "1:6bb2e1cbcf253307a88a531a75aa4d38fecb8d56a0dc9e837e288fe3189954f6" name = "golang.org/x/tools" - packages = ["container/intsets"] - pruneopts = "UT" + packages = [ + "container/intsets", + "go/ast/astutil", + "go/gcexportdata", + "go/internal/cgo", + "go/internal/gcimporter", + "go/internal/packagesdriver", + "go/packages", + "go/types/typeutil", + "imports", + "internal/fastwalk", + "internal/gopathwalk", + "internal/module", + "internal/semver" + ] revision = "8b67d361bba210f5fbb3c1a0fc121e0847b10b57" [[projects]] - digest = "1:7bc25c2efff76b31f146caf630c617be9b666c6164f0632050466fbec0500125" name = "google.golang.org/appengine" packages = [ ".", @@ -914,58 +966,59 @@ "internal/modules", "internal/remote_api", "internal/urlfetch", - "urlfetch", + "urlfetch" ] - pruneopts = "UT" revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" version = "v1.4.0" [[projects]] - digest = "1:af07c44dc04418be522bfd4e21ca9130d58169ea084e3a883e23772003a381c4" name = "gopkg.in/asn1-ber.v1" packages = ["."] - pruneopts = "UT" revision = "f715ec2f112d1e4195b827ad68cf44017a3ef2b1" version = "v1.3" [[projects]] - digest = "1:ef72505cf098abdd34efeea032103377bec06abb61d8a06f002d5d296a4b1185" - name = "gopkg.in/inf.v0" + name = "gopkg.in/fsnotify.v1" packages = ["."] - pruneopts = "UT" - revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" - version = "v0.9.0" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + source = "https://github.com/fsnotify/fsnotify.git" + version = "v1.4.7" + +[[projects]] + name = "gopkg.in/inf.v0" + packages = ["."] + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" [[projects]] - digest = "1:c805e517269b0ba4c21ded5836019ed7d16953d4026cb7d00041d039c7906be9" name = "gopkg.in/natefinch/lumberjack.v2" packages = ["."] - pruneopts = "UT" revision = "a96e63847dc3c67d17befa69c303767e2f84e54f" version = "v2.1" [[projects]] - digest = "1:c0c30f47f9c16f227ba82f0bdfd14fa968453c30b7677a07903b3b4f34b98d49" name = "gopkg.in/square/go-jose.v2" packages = [ ".", "cipher", - "json", + "json" ] - pruneopts = "UT" revision = "628223f44a71f715d2881ea69afc795a1e9c01be" version = "v2.3.0" [[projects]] - digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + branch = "v1" + name = "gopkg.in/tomb.v1" + packages = ["."] + revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" + +[[projects]] name = "gopkg.in/yaml.v2" packages = ["."] - pruneopts = "UT" revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" version = "v2.2.2" [[projects]] - digest = "1:26a67eb988225c6a0600c1af0b35e795ac4d23a9c40a7aa178fa2adc0670f1f7" name = "k8s.io/api" packages = [ "admission/v1beta1", @@ -974,6 +1027,7 @@ "apps/v1", "apps/v1beta1", "apps/v1beta2", + "auditregistration/v1alpha1", "authentication/v1", "authentication/v1beta1", "authorization/v1", @@ -999,19 +1053,30 @@ "settings/v1alpha1", "storage/v1", "storage/v1alpha1", - "storage/v1beta1", + "storage/v1beta1" ] - pruneopts = "UT" - revision = "b503174bad5991eb66f18247f52e41c3258f6348" - version = "kubernetes-1.12.3" + revision = "05914d821849570fba9eacfb29466f2d8d3cd229" + version = "kubernetes-1.13.1" + +[[projects]] + name = "k8s.io/apiextensions-apiserver" + packages = [ + "pkg/apis/apiextensions", + "pkg/apis/apiextensions/v1beta1", + "pkg/client/clientset/clientset", + "pkg/client/clientset/clientset/scheme", + "pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + ] + revision = "0fe22c71c47604641d9aa352c785b7912c200562" + version = "kubernetes-1.13.1" [[projects]] - digest = "1:3c38a27df3152aa083018cb7a8d7b5bd5af5e808733ebbc6ae5b5fe10f8b0f84" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", "pkg/api/meta", "pkg/api/resource", + "pkg/api/validation/path", "pkg/apis/meta/internalversion", "pkg/apis/meta/v1", "pkg/apis/meta/v1/unstructured", @@ -1052,26 +1117,23 @@ "pkg/version", "pkg/watch", "third_party/forked/golang/json", - "third_party/forked/golang/reflect", + "third_party/forked/golang/reflect" ] - pruneopts = "UT" - revision = "eddba98df674a16931d2d4ba75edc3a389bf633a" - version = "kubernetes-1.12.3" + revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd" + version = "kubernetes-1.13.0" [[projects]] - branch = "release-1.6" - digest = "1:0bb8d107f448b9499b72fe47a21b5e8c5defcc4cc3db07cae8ea5c28a57bd720" + branch = "master" name = "k8s.io/apiserver" packages = [ + "pkg/apis/audit", "pkg/authentication/user", "pkg/authorization/authorizer", - "pkg/endpoints/request", + "pkg/endpoints/request" ] - pruneopts = "UT" - revision = "0b01a50e7638172c5ce338ec9aeb7ffc3407c64a" + revision = "39e839dff03462945cc991fb9dac4469c951772d" [[projects]] - digest = "1:7b85a80c96c7294b896204fca216c35d10e63fd0a021b23c5b5fe00f68932420" name = "k8s.io/client-go" packages = [ "discovery", @@ -1084,6 +1146,8 @@ "informers/apps/v1", "informers/apps/v1beta1", "informers/apps/v1beta2", + "informers/auditregistration", + "informers/auditregistration/v1alpha1", "informers/autoscaling", "informers/autoscaling/v1", "informers/autoscaling/v2beta1", @@ -1127,6 +1191,7 @@ "kubernetes/typed/apps/v1", "kubernetes/typed/apps/v1beta1", "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/auditregistration/v1alpha1", "kubernetes/typed/authentication/v1", "kubernetes/typed/authentication/v1beta1", "kubernetes/typed/authorization/v1", @@ -1158,6 +1223,7 @@ "listers/apps/v1", "listers/apps/v1beta1", "listers/apps/v1beta2", + "listers/auditregistration/v1alpha1", "listers/autoscaling/v1", "listers/autoscaling/v2beta1", "listers/autoscaling/v2beta2", @@ -1211,111 +1277,145 @@ "util/integer", "util/jsonpath", "util/retry", - "util/workqueue", + "util/workqueue" ] - pruneopts = "UT" - revision = "d082d5923d3cc0bfbb066ee5fbdea3d0ca79acf8" - version = "kubernetes-1.12.3" + revision = "8d9ed539ba3134352c586810e749e58df4e94e4f" + version = "kubernetes-1.13.1" [[projects]] - digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f" + branch = "release-1.13" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "pkg/util" + ] + revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae" + +[[projects]] + branch = "master" + name = "k8s.io/gengo" + packages = [ + "args", + "examples/deepcopy-gen/generators", + "examples/defaulter-gen/generators", + "examples/set-gen/sets", + "generator", + "namer", + "parser", + "types" + ] + revision = "b90029ef6cd877cb3f422d75b3a07707e3aac6b7" + +[[projects]] + name = "k8s.io/klog" + packages = ["."] + revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0" + version = "v0.2.0" + +[[projects]] + branch = "master" name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] - pruneopts = "UT" - revision = "c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d" + revision = "15615b16d372105f0c69ff47dfe7402926a65aaa" [[projects]] - digest = "1:ceff1906e568c23d7a337fdda89900d6155aaf93db7cb6da73488ffb649641e1" name = "k8s.io/kubernetes" packages = ["pkg/util/slice"] - pruneopts = "UT" revision = "c27b913fddd1a6c480c229191a087698aa92f0b1" version = "v1.13.4" [[projects]] - digest = "1:08ac675dd3b2e69bbae58d4f82674833c0e1e66043cfe1dc235a4f6c874944c2" + branch = "master" + name = "sigs.k8s.io/application" + packages = ["pkg/apis/app/v1beta1"] + revision = "4ead7f1b87048b7717b3e474a21fdc07e6bce636" + +[[projects]] name = "sigs.k8s.io/controller-runtime" packages = [ "pkg/cache", "pkg/cache/internal", "pkg/client", "pkg/client/apiutil", + "pkg/client/config", + "pkg/controller", + "pkg/controller/controllerutil", + "pkg/envtest", + "pkg/envtest/printer", + "pkg/event", + "pkg/handler", + "pkg/internal/controller", + "pkg/internal/controller/metrics", "pkg/internal/recorder", "pkg/leaderelection", "pkg/manager", "pkg/metrics", "pkg/patch", + "pkg/predicate", + "pkg/reconcile", "pkg/recorder", "pkg/runtime/inject", "pkg/runtime/log", + "pkg/runtime/scheme", "pkg/runtime/signals", + "pkg/source", + "pkg/source/internal", + "pkg/webhook", "pkg/webhook/admission", "pkg/webhook/admission/types", + "pkg/webhook/internal/cert", + "pkg/webhook/internal/cert/generator", + "pkg/webhook/internal/cert/writer", + "pkg/webhook/internal/cert/writer/atomic", "pkg/webhook/internal/metrics", - "pkg/webhook/types", + "pkg/webhook/types" ] - pruneopts = "UT" - revision = "f6f0bc9611363b43664d08fb097ab13243ef621d" + revision = "12d98582e72927b6cd0123e2b4e819f9341ce62c" + version = "v0.1.10" + +[[projects]] + name = "sigs.k8s.io/controller-tools" + packages = [ + "cmd/controller-gen", + "pkg/crd/generator", + "pkg/crd/util", + "pkg/internal/codegen", + "pkg/internal/codegen/parse", + "pkg/internal/general", + "pkg/rbac", + "pkg/util", + "pkg/webhook", + "pkg/webhook/internal" + ] + revision = "fbf141159251d035089e7acdd5a343f8cec91b94" version = "v0.1.9" +[[projects]] + name = "sigs.k8s.io/testing_frameworks" + packages = [ + "integration", + "integration/addr", + "integration/internal" + ] + revision = "d348cb12705b516376e0c323bacca72b00a78425" + version = "v0.1.1" + +[[projects]] + name = "sigs.k8s.io/yaml" + packages = ["."] + revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" + version = "v1.1.0" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = [ - "github.com/dgrijalva/jwt-go", - "github.com/docker/docker/api/types", - "github.com/docker/docker/client", - "github.com/emicklei/go-restful", - "github.com/emicklei/go-restful-openapi", - "github.com/go-ldap/ldap", - "github.com/go-openapi/spec", - "github.com/go-redis/redis", - "github.com/go-sql-driver/mysql", - "github.com/golang/glog", - "github.com/jinzhu/gorm", - "github.com/json-iterator/go", - "github.com/mholt/caddy", - "github.com/mholt/caddy/caddy/caddymain", - "github.com/mholt/caddy/caddyhttp/httpserver", - "github.com/mitchellh/go-homedir", - "github.com/spf13/cobra", - "github.com/spf13/pflag", - "golang.org/x/tools/container/intsets", - "gopkg.in/yaml.v2", - "k8s.io/api/apps/v1", - "k8s.io/api/apps/v1beta2", - "k8s.io/api/batch/v1", - "k8s.io/api/batch/v1beta1", - "k8s.io/api/core/v1", - "k8s.io/api/extensions/v1beta1", - "k8s.io/api/policy/v1beta1", - "k8s.io/api/rbac/v1", - "k8s.io/api/storage/v1", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/api/resource", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/runtime", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apiserver/pkg/authorization/authorizer", - "k8s.io/apiserver/pkg/endpoints/request", - "k8s.io/client-go/informers", - "k8s.io/client-go/informers/core/v1", - "k8s.io/client-go/informers/rbac/v1", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/plugin/pkg/client/auth/gcp", - "k8s.io/client-go/rest", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/util/workqueue", - "k8s.io/kubernetes/pkg/util/slice", - "sigs.k8s.io/controller-runtime/pkg/manager", - "sigs.k8s.io/controller-runtime/pkg/runtime/log", - "sigs.k8s.io/controller-runtime/pkg/runtime/signals", - ] + inputs-digest = "9c1d98a69374a4d7fbddc9df92f3ba83c5d4a0753b887577c161b2802f9c9494" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index d7822842e..1194798a1 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -1,93 +1,68 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - +required = [ + "github.com/emicklei/go-restful", + "github.com/onsi/ginkgo", # for test framework + "github.com/onsi/gomega", # for test matchers + "k8s.io/gengo/examples/defaulter-gen/generators", + "k8s.io/gengo/examples/deepcopy-gen/generators", + "k8s.io/client-go/plugin/pkg/client/auth/gcp", # for development against gcp + "k8s.io/code-generator/cmd/client-gen", + "sigs.k8s.io/controller-tools/cmd/controller-gen", # for crd/rbac generation + "sigs.k8s.io/controller-runtime/pkg/client/config", + "sigs.k8s.io/controller-runtime/pkg/controller", + "sigs.k8s.io/controller-runtime/pkg/handler", + "sigs.k8s.io/controller-runtime/pkg/manager", + "sigs.k8s.io/controller-runtime/pkg/runtime/signals", + "sigs.k8s.io/controller-runtime/pkg/source", + "sigs.k8s.io/testing_frameworks/integration", # for integration testing + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", +] [[constraint]] name = "github.com/docker/docker" version = "v17.05.0-ce" -[[constraint]] - name = "github.com/emicklei/go-restful" - version = "2.7.1" - -[[constraint]] - name = "github.com/emicklei/go-restful-swagger12" - version = "1.0.1" - -[[constraint]] - branch = "master" - name = "github.com/golang/glog" - -[[constraint]] - name = "github.com/spf13/pflag" - version = "1.0.1" +[[override]] + name = "k8s.io/api" + version = "kubernetes-1.13.1" [[override]] - name = "gopkg.in/igm/sockjs-go.v2" - version = "2.0.0" - -[[constraint]] - name = "gopkg.in/yaml.v2" - version = "2.2.1" - -[[constraint]] - name = "k8s.io/api" - version = "kubernetes-1.12.3" - -[[constraint]] name = "k8s.io/apimachinery" - version = "kubernetes-1.12.3" + version = "kubernetes-1.13.1" [[constraint]] + name = "k8s.io/code-generator" + version = "kubernetes-1.13.1" + +[[override]] name = "k8s.io/client-go" - version = "kubernetes-1.12.3" + version = "kubernetes-1.13.1" [[constraint]] name = "k8s.io/kubernetes" - version = "1.12.3" + version = "1.13.1" [[constraint]] - name = "github.com/mholt/caddy" - version = "v0.11.5" + name="sigs.k8s.io/controller-runtime" + version="v0.1.7" [[constraint]] - name = "sigs.k8s.io/controller-runtime" - version = "v0.1.9" + name="sigs.k8s.io/controller-tools" + version="v0.1.7" [[override]] - name = "sigs.k8s.io/controller-tools" - version = "v0.1.9" - -[[override]] - name = "k8s.io/component-base" - branch = "master" + name="github.com/bifurcation/mint" + revision="824af65410658916142a7600349144e1289f2110" [prune] go-tests = true unused-packages = true + non-go = true + +[[prune.project]] + name = "k8s.io/code-generator" + unused-packages = false + non-go = false + # To use reference package: # vendor/github.com/docker/docker/client/container_commit.go:17: undefined: reference.ParseNormalizedNamed @@ -106,20 +81,24 @@ name = "github.com/docker/go-connections" branch = "master" -# To use reference package: -# vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go:104:3: unknown field 'CaseSensitive' in struct literal of type jsoniter.Config -[[override]] - name = "github.com/json-iterator/go" - branch = "master" - - # For dependency below: Refer to issue https://github.com/golang/dep/issues/1799 [[override]] name = "gopkg.in/fsnotify.v1" source = "https://github.com/fsnotify/fsnotify.git" version = "v1.4.7" - [[override]] name = "github.com/russross/blackfriday" - version = "v1.5.2" \ No newline at end of file + version = "v1.5.2" + +[[constraint]] + branch = "master" + name = "sigs.k8s.io/application" + +[[constraint]] + name = "github.com/kiali/kiali" + version = "0.15.0" + +[[constraint]] + name = "github.com/gorilla/mux" + version = "1.7.0" diff --git a/Makefile b/Makefile index a35477c12..394b1bd17 100644 --- a/Makefile +++ b/Makefile @@ -8,18 +8,6 @@ BIN ?= ks-apiserver IMG ?= kubespheredev/ks-apiserver OUTPUT_DIR=bin -define get_diff_files - $(eval DIFF_FILES=$(shell git diff --name-only --diff-filter=ad | grep -E "^(test|cmd|pkg)/.+\.go")) -endef -define get_build_flags - $(eval SHORT_VERSION=$(shell git describe --tags --always --dirty="-dev")) - $(eval SHA1_VERSION=$(shell git show --quiet --pretty=format:%H)) - $(eval DATE=$(shell date +'%Y-%m-%dT%H:%M:%S')) - $(eval BUILD_FLAG= -X $(TRAG.Version).ShortVersion="$(SHORT_VERSION)" \ - -X $(TRAG.Version).GitSha1Version="$(SHA1_VERSION)" \ - -X $(TRAG.Version).BuildDate="$(DATE)") -endef - define ALL_HELP_INFO # Build code. # @@ -62,6 +50,18 @@ fmt: vet: go vet ./pkg/... ./cmd/... +# Generate manifests e.g. CRD, RBAC etc. +manifests: + go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go all + +deploy: manifests + kubectl apply -f config/crds + kustomize build config/default | kubectl apply -f - + +# Generate DeepCopy to implement runtime.Object +deepcopy: + ./vendor/k8s.io/code-generator/generate-groups.sh deepcopy kubesphere.io/kubesphere/pkg/client kubesphere.io/kubesphere/pkg/apis "servicemesh:v1alpha2" + # Generate code generate: ifndef GOPATH @@ -75,7 +75,7 @@ docker-build: all # Run tests test: generate fmt vet - go test ./pkg/... ./cmd/... -coverprofile cover.out + export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=1m; go test ./pkg/... ./cmd/... -coverprofile cover.out .PHONY: clean clean: diff --git a/build/controller-manager/Dockerfile b/build/controller-manager/Dockerfile new file mode 100644 index 000000000..a655999ba --- /dev/null +++ b/build/controller-manager/Dockerfile @@ -0,0 +1,18 @@ +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. +FROM golang:1.10.3 as controller-manager-builder + +COPY / /go/src/kubesphere.io/kubesphere +WORKDIR /go/src/kubesphere.io/kubesphere + +RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build --ldflags "-extldflags -static" -o controller-manager ./cmd/controller-manager/ + +FROM alpine:3.7 +RUN apk add --update ca-certificates && update-ca-certificates +COPY --from=controller-manager-builder /go/src/kubesphere.io/kubesphere/controller-manager /usr/local/bin/ +CMD controller-manager diff --git a/cmd/controller-manager/controller-manager.go b/cmd/controller-manager/controller-manager.go index f18095d01..7ce0805fc 100644 --- a/cmd/controller-manager/controller-manager.go +++ b/cmd/controller-manager/controller-manager.go @@ -20,11 +20,14 @@ package main import ( "flag" + + "os" + "sigs.k8s.io/application/pkg/apis/app/v1beta1" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/simple/client/k8s" "kubesphere.io/kubesphere/pkg/simple/controller/namespace" - "os" "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" @@ -66,6 +69,13 @@ func main() { os.Exit(1) } + log.Info("Print all known types") + for k, v := range mgr.GetScheme().AllKnownTypes() { + if k.Group == v1beta1.SchemeGroupVersion.Group { + log.Info(k.String() + " / " + v.String()) + } + } + // Setup all Controllers log.Info("Setting up controller") if err := controller.AddToManager(mgr); err != nil { diff --git a/cmd/ks-apiserver/apiserver.go b/cmd/ks-apiserver/apiserver.go index 38548483c..623c22782 100644 --- a/cmd/ks-apiserver/apiserver.go +++ b/cmd/ks-apiserver/apiserver.go @@ -25,6 +25,7 @@ import ( _ "kubesphere.io/kubesphere/pkg/apis/monitoring/install" _ "kubesphere.io/kubesphere/pkg/apis/operations/install" _ "kubesphere.io/kubesphere/pkg/apis/resources/install" + _ "kubesphere.io/kubesphere/pkg/apis/servicemesh/metrics/install" ) func main() { diff --git a/cmd/ks-apiserver/app/options/options.go b/cmd/ks-apiserver/app/options/options.go new file mode 100644 index 000000000..62e41ac1c --- /dev/null +++ b/cmd/ks-apiserver/app/options/options.go @@ -0,0 +1,34 @@ +package options + +import ( + "github.com/spf13/pflag" + genericoptions "kubesphere.io/kubesphere/pkg/options" +) + +type ServerRunOptions struct { + GenericServerRunOptions *genericoptions.ServerRunOptions + + // istio pilot discovery service url + IstioPilotServiceURL string + OpenPitrixServer string + OpenPitrixProxyToken string +} + +func NewServerRunOptions() *ServerRunOptions { + + s := ServerRunOptions{ + GenericServerRunOptions: genericoptions.NewServerRunOptions(), + IstioPilotServiceURL: "http://istio-pilot.istio-system.svc:8080/version", + } + + return &s +} + +func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { + + s.GenericServerRunOptions.AddFlags(fs) + + fs.StringVar(&s.IstioPilotServiceURL, "istio-pilot-service-url", "http://istio-pilot.istio-system.svc:8080/version", "istio pilot discovery service url") + fs.StringVar(&s.OpenPitrixServer, "openpitrix-server", "http://openpitrix-api-gateway.openpitrix-system.svc", "openpitrix server") + fs.StringVar(&s.OpenPitrixProxyToken, "openpitrix-proxy-token", "", "openpitrix proxy token") +} diff --git a/cmd/ks-apiserver/app/server.go b/cmd/ks-apiserver/app/server.go index 119eaadb3..7bf170b90 100644 --- a/cmd/ks-apiserver/app/server.go +++ b/cmd/ks-apiserver/app/server.go @@ -21,18 +21,23 @@ import ( goflag "flag" "fmt" "github.com/golang/glog" + kconfig "github.com/kiali/kiali/config" "github.com/spf13/cobra" + "github.com/spf13/pflag" + "kubesphere.io/kubesphere/cmd/ks-apiserver/app/options" "kubesphere.io/kubesphere/pkg/apiserver/runtime" "kubesphere.io/kubesphere/pkg/filter" "kubesphere.io/kubesphere/pkg/informers" - "kubesphere.io/kubesphere/pkg/options" + "kubesphere.io/kubesphere/pkg/models/applications" "kubesphere.io/kubesphere/pkg/signals" + "kubesphere.io/kubesphere/pkg/simple/client/prometheus" "log" "net/http" + "net/url" ) func NewAPIServerCommand() *cobra.Command { - s := options.SharedOptions + s := options.NewServerRunOptions() cmd := &cobra.Command{ Use: "ks-apiserver", @@ -44,14 +49,22 @@ cluster's shared state through which all other components interact.`, }, } - cmd.Flags().AddFlagSet(s.CommandLine) + s.AddFlags(cmd.Flags()) cmd.Flags().AddGoFlagSet(goflag.CommandLine) glog.CopyStandardLogTo("INFO") + return cmd } func Run(s *options.ServerRunOptions) error { + pflag.VisitAll(func(flag *pflag.Flag) { + log.Printf("FLAG: --%s=%q", flag.Name, flag.Value) + }) + + applications.OpenPitrixServer = s.OpenPitrixServer + applications.OpenPitrixProxyToken = s.OpenPitrixProxyToken + var err error waitForResourceSync() @@ -59,19 +72,55 @@ func Run(s *options.ServerRunOptions) error { container := runtime.Container container.Filter(filter.Logging) - log.Printf("Server listening on %d.", s.InsecurePort) + log.Printf("Server listening on %d.", s.GenericServerRunOptions.InsecurePort) - if s.InsecurePort != 0 { - err = http.ListenAndServe(fmt.Sprintf("%s:%d", s.BindAddress, s.InsecurePort), container) + for _, webservice := range container.RegisteredWebServices() { + for _, route := range webservice.Routes() { + log.Printf(route.Path) + } } - if s.SecurePort != 0 && len(s.TlsCertFile) > 0 && len(s.TlsPrivateKey) > 0 { - err = http.ListenAndServeTLS(fmt.Sprintf("%s:%d", s.BindAddress, s.SecurePort), s.TlsCertFile, s.TlsPrivateKey, container) + initializeKialiConfig(s) + + if s.GenericServerRunOptions.InsecurePort != 0 { + err = http.ListenAndServe(fmt.Sprintf("%s:%d", s.GenericServerRunOptions.BindAddress, s.GenericServerRunOptions.InsecurePort), container) + } + + if s.GenericServerRunOptions.SecurePort != 0 && len(s.GenericServerRunOptions.TlsCertFile) > 0 && len(s.GenericServerRunOptions.TlsPrivateKey) > 0 { + err = http.ListenAndServeTLS(fmt.Sprintf("%s:%d", s.GenericServerRunOptions.BindAddress, s.GenericServerRunOptions.SecurePort), s.GenericServerRunOptions.TlsCertFile, s.GenericServerRunOptions.TlsPrivateKey, container) } return err } +func initializeKialiConfig(s *options.ServerRunOptions) { + // Initialize kiali config + config := kconfig.NewConfig() + + // Exclude system namespaces + config.API.Namespaces.Exclude = []string{"istio-system", "kubesphere*", "kube*"} + config.InCluster = false + + // Set default prometheus service url + config.ExternalServices.PrometheusServiceURL = "http://prometheus.kubesphere-monitoring-system.svc:9090" + + // ugly hack to get prometheus service url + if pflag.Parsed() && pflag.Lookup("prometheus-endpoint") != nil { + // Set prometheus + endpoint, err := url.Parse(prometheus.PrometheusAPIEndpoint) + if err != nil { + config.ExternalServices.PrometheusServiceURL = endpoint.Path + } + } + + config.ExternalServices.PrometheusCustomMetricsURL = config.ExternalServices.PrometheusServiceURL + + // Set istio pilot discovery service url + config.ExternalServices.Istio.UrlServiceVersion = s.IstioPilotServiceURL + + kconfig.Set(config) +} + func waitForResourceSync() { stopChan := signals.SetupSignalHandler() @@ -96,6 +145,7 @@ func waitForResourceSync() { informerFactory.Apps().V1().StatefulSets().Lister() informerFactory.Apps().V1().Deployments().Lister() informerFactory.Apps().V1().DaemonSets().Lister() + informerFactory.Apps().V1().ReplicaSets().Lister() informerFactory.Batch().V1().Jobs().Lister() informerFactory.Batch().V1beta1().CronJobs().Lister() diff --git a/cmd/ks-iam/app/options/options.go b/cmd/ks-iam/app/options/options.go new file mode 100644 index 000000000..4fd808f7c --- /dev/null +++ b/cmd/ks-iam/app/options/options.go @@ -0,0 +1,27 @@ +package options + +import ( + "github.com/spf13/pflag" + genericoptions "kubesphere.io/kubesphere/pkg/options" +) + +type ServerRunOptions struct { + GenericServerRunOptions *genericoptions.ServerRunOptions + AdminEmail string + AdminPassword string + TokenExpireTime string +} + +func NewServerRunOptions() *ServerRunOptions { + s := &ServerRunOptions{ + GenericServerRunOptions: genericoptions.NewServerRunOptions(), + } + return s +} + +func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&s.AdminEmail, "admin-email", "admin@kubesphere.io", "default administrator's email") + fs.StringVar(&s.AdminPassword, "admin-password", "passw0rd", "default administrator's password") + fs.StringVar(&s.TokenExpireTime, "token-expire-time", "24h", "token expire time") + s.GenericServerRunOptions.AddFlags(fs) +} diff --git a/cmd/ks-iam/app/server.go b/cmd/ks-iam/app/server.go index 14a8f3f4f..24a9de24d 100644 --- a/cmd/ks-iam/app/server.go +++ b/cmd/ks-iam/app/server.go @@ -22,18 +22,20 @@ import ( "fmt" "github.com/golang/glog" "github.com/spf13/cobra" + "github.com/spf13/pflag" + "kubesphere.io/kubesphere/cmd/ks-iam/app/options" "kubesphere.io/kubesphere/pkg/apiserver/runtime" "kubesphere.io/kubesphere/pkg/filter" "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/models/iam" - "kubesphere.io/kubesphere/pkg/options" "kubesphere.io/kubesphere/pkg/signals" "log" "net/http" + "time" ) func NewAPIServerCommand() *cobra.Command { - s := options.SharedOptions + s := options.NewServerRunOptions() cmd := &cobra.Command{ Use: "ks-iam", @@ -44,17 +46,29 @@ cluster's shared state through which all other components interact.`, return Run(s) }, } - cmd.Flags().AddFlagSet(s.CommandLine) + s.AddFlags(cmd.Flags()) cmd.Flags().AddGoFlagSet(goflag.CommandLine) glog.CopyStandardLogTo("INFO") + return cmd } func Run(s *options.ServerRunOptions) error { + pflag.VisitAll(func(flag *pflag.Flag) { + log.Printf("FLAG: --%s=%q", flag.Name, flag.Value) + }) + var err error - err = iam.DatabaseInit() + + expireTime, err := time.ParseDuration(s.TokenExpireTime) + + if err != nil { + return err + } + + err = iam.Init(s.AdminEmail, s.AdminPassword, expireTime) if err != nil { return err @@ -65,14 +79,12 @@ func Run(s *options.ServerRunOptions) error { container := runtime.Container container.Filter(filter.Logging) - log.Printf("Server listening on %d.", s.InsecurePort) - - if s.InsecurePort != 0 { - err = http.ListenAndServe(fmt.Sprintf("%s:%d", s.BindAddress, s.InsecurePort), container) + if s.GenericServerRunOptions.InsecurePort != 0 { + err = http.ListenAndServe(fmt.Sprintf("%s:%d", s.GenericServerRunOptions.BindAddress, s.GenericServerRunOptions.InsecurePort), container) } - if s.SecurePort != 0 && len(s.TlsCertFile) > 0 && len(s.TlsPrivateKey) > 0 { - err = http.ListenAndServeTLS(fmt.Sprintf("%s:%d", s.BindAddress, s.SecurePort), s.TlsCertFile, s.TlsPrivateKey, container) + if s.GenericServerRunOptions.SecurePort != 0 && len(s.GenericServerRunOptions.TlsCertFile) > 0 && len(s.GenericServerRunOptions.TlsPrivateKey) > 0 { + err = http.ListenAndServeTLS(fmt.Sprintf("%s:%d", s.GenericServerRunOptions.BindAddress, s.GenericServerRunOptions.SecurePort), s.GenericServerRunOptions.TlsCertFile, s.GenericServerRunOptions.TlsPrivateKey, container) } return err diff --git a/config/crds/app_v1beta1_application.yaml b/config/crds/app_v1beta1_application.yaml new file mode 100644 index 000000000..6851fc7d3 --- /dev/null +++ b/config/crds/app_v1beta1_application.yaml @@ -0,0 +1,239 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: applications.app.k8s.io +spec: + group: app.k8s.io + names: + kind: Application + plural: applications + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + assemblyPhase: + type: string + componentKinds: + items: + type: object + type: array + descriptor: + properties: + description: + type: string + icons: + items: + properties: + size: + type: string + src: + type: string + type: + type: string + required: + - src + type: object + type: array + keywords: + items: + type: string + type: array + links: + items: + properties: + description: + type: string + url: + type: string + type: object + type: array + maintainers: + items: + properties: + email: + type: string + name: + type: string + url: + type: string + type: object + type: array + notes: + type: string + owners: + items: + properties: + email: + type: string + name: + type: string + url: + type: string + type: object + type: array + type: + type: string + version: + type: string + type: object + info: + items: + properties: + name: + type: string + type: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + key: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + resourceVersion: + type: string + uid: + type: string + type: object + ingressRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + host: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + path: + type: string + resourceVersion: + type: string + uid: + type: string + type: object + secretKeyRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + key: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + resourceVersion: + type: string + uid: + type: string + type: object + serviceRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + path: + type: string + port: + format: int32 + type: integer + resourceVersion: + type: string + uid: + type: string + type: object + type: + type: string + type: object + type: object + type: array + selector: + type: object + type: object + status: + properties: + components: + items: + properties: + group: + type: string + kind: + type: string + link: + type: string + name: + type: string + status: + type: string + type: object + type: array + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + lastUpdateTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - type + - status + type: object + type: array + observedGeneration: + format: int64 + type: integer + type: object + version: v1beta1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/istio-crds.yaml b/config/crds/istio-crds.yaml new file mode 100644 index 000000000..acdf5392b --- /dev/null +++ b/config/crds/istio-crds.yaml @@ -0,0 +1,1116 @@ +# {{ if or .Values.global.crds (semverCompare ">=2.10.0-0" .Capabilities.TillerVersion.SemVer) }} +# these CRDs only make sense when pilot is enabled +# {{- if .Values.pilot.enabled }} +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: virtualservices.networking.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: istio-pilot +spec: + group: networking.istio.io + names: + kind: VirtualService + listKind: VirtualServiceList + plural: virtualservices + singular: virtualservice + categories: + - istio-io + - networking-istio-io + scope: Namespaced + version: v1alpha3 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: destinationrules.networking.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: istio-pilot +spec: + group: networking.istio.io + names: + kind: DestinationRule + listKind: DestinationRuleList + plural: destinationrules + singular: destinationrule + categories: + - istio-io + - networking-istio-io + scope: Namespaced + version: v1alpha3 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: serviceentries.networking.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: istio-pilot +spec: + group: networking.istio.io + names: + kind: ServiceEntry + listKind: ServiceEntryList + plural: serviceentries + singular: serviceentry + categories: + - istio-io + - networking-istio-io + scope: Namespaced + version: v1alpha3 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: gateways.networking.istio.io + annotations: + "helm.sh/hook": crd-install + "helm.sh/hook-weight": "-5" + labels: + app: istio-pilot +spec: + group: networking.istio.io + names: + kind: Gateway + plural: gateways + singular: gateway + categories: + - istio-io + - networking-istio-io + scope: Namespaced + version: v1alpha3 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: envoyfilters.networking.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: istio-pilot +spec: + group: networking.istio.io + names: + kind: EnvoyFilter + plural: envoyfilters + singular: envoyfilter + categories: + - istio-io + - networking-istio-io + scope: Namespaced + version: v1alpha3 +--- +# {{- end }} + +# these CRDs only make sense when security is enabled +# {{- if .Values.security.enabled }} +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + annotations: + "helm.sh/hook": crd-install + name: policies.authentication.istio.io +spec: + group: authentication.istio.io + names: + kind: Policy + plural: policies + singular: policy + categories: + - istio-io + - authentication-istio-io + scope: Namespaced + version: v1alpha1 +--- +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + annotations: + "helm.sh/hook": crd-install + name: meshpolicies.authentication.istio.io +spec: + group: authentication.istio.io + names: + kind: MeshPolicy + listKind: MeshPolicyList + plural: meshpolicies + singular: meshpolicy + categories: + - istio-io + - authentication-istio-io + scope: Cluster + version: v1alpha1 +--- +# {{- end }} + +# {{- if .Values.mixer.enabled }} +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + annotations: + "helm.sh/hook": crd-install + name: httpapispecbindings.config.istio.io +spec: + group: config.istio.io + names: + kind: HTTPAPISpecBinding + plural: httpapispecbindings + singular: httpapispecbinding + categories: + - istio-io + - apim-istio-io + scope: Namespaced + version: v1alpha2 +--- +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + annotations: + "helm.sh/hook": crd-install + name: httpapispecs.config.istio.io +spec: + group: config.istio.io + names: + kind: HTTPAPISpec + plural: httpapispecs + singular: httpapispec + categories: + - istio-io + - apim-istio-io + scope: Namespaced + version: v1alpha2 +--- +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + annotations: + "helm.sh/hook": crd-install + name: quotaspecbindings.config.istio.io +spec: + group: config.istio.io + names: + kind: QuotaSpecBinding + plural: quotaspecbindings + singular: quotaspecbinding + categories: + - istio-io + - apim-istio-io + scope: Namespaced + version: v1alpha2 +--- +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + annotations: + "helm.sh/hook": crd-install + name: quotaspecs.config.istio.io +spec: + group: config.istio.io + names: + kind: QuotaSpec + plural: quotaspecs + singular: quotaspec + categories: + - istio-io + - apim-istio-io + scope: Namespaced + version: v1alpha2 +--- + +# Mixer CRDs +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: rules.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: istio.io.mixer + istio: core +spec: + group: config.istio.io + names: + kind: rule + plural: rules + singular: rule + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: attributemanifests.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: istio.io.mixer + istio: core +spec: + group: config.istio.io + names: + kind: attributemanifest + plural: attributemanifests + singular: attributemanifest + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: bypasses.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: bypass + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: bypass + plural: bypasses + singular: bypass + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: circonuses.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: circonus + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: circonus + plural: circonuses + singular: circonus + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: deniers.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: denier + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: denier + plural: deniers + singular: denier + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: fluentds.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: fluentd + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: fluentd + plural: fluentds + singular: fluentd + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: kubernetesenvs.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: kubernetesenv + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: kubernetesenv + plural: kubernetesenvs + singular: kubernetesenv + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: listcheckers.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: listchecker + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: listchecker + plural: listcheckers + singular: listchecker + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: memquotas.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: memquota + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: memquota + plural: memquotas + singular: memquota + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: noops.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: noop + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: noop + plural: noops + singular: noop + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: opas.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: opa + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: opa + plural: opas + singular: opa + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: prometheuses.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: prometheus + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: prometheus + plural: prometheuses + singular: prometheus + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: rbacs.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: rbac + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: rbac + plural: rbacs + singular: rbac + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: redisquotas.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + package: redisquota + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: redisquota + plural: redisquotas + singular: redisquota + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: servicecontrols.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: servicecontrol + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: servicecontrol + plural: servicecontrols + singular: servicecontrol + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 + +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: signalfxs.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: signalfx + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: signalfx + plural: signalfxs + singular: signalfx + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: solarwindses.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: solarwinds + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: solarwinds + plural: solarwindses + singular: solarwinds + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: stackdrivers.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: stackdriver + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: stackdriver + plural: stackdrivers + singular: stackdriver + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: statsds.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: statsd + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: statsd + plural: statsds + singular: statsd + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: stdios.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: stdio + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: stdio + plural: stdios + singular: stdio + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: apikeys.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: apikey + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: apikey + plural: apikeys + singular: apikey + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: authorizations.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: authorization + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: authorization + plural: authorizations + singular: authorization + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: checknothings.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: checknothing + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: checknothing + plural: checknothings + singular: checknothing + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: kuberneteses.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: adapter.template.kubernetes + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: kubernetes + plural: kuberneteses + singular: kubernetes + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: listentries.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: listentry + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: listentry + plural: listentries + singular: listentry + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: logentries.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: logentry + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: logentry + plural: logentries + singular: logentry + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: edges.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: edge + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: edge + plural: edges + singular: edge + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: metrics.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: metric + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: metric + plural: metrics + singular: metric + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: quotas.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: quota + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: quota + plural: quotas + singular: quota + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: reportnothings.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: reportnothing + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: reportnothing + plural: reportnothings + singular: reportnothing + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: servicecontrolreports.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: servicecontrolreport + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: servicecontrolreport + plural: servicecontrolreports + singular: servicecontrolreport + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: tracespans.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: tracespan + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: tracespan + plural: tracespans + singular: tracespan + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: rbacconfigs.rbac.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: istio.io.mixer + istio: rbac +spec: + group: rbac.istio.io + names: + kind: RbacConfig + plural: rbacconfigs + singular: rbacconfig + categories: + - istio-io + - rbac-istio-io + scope: Namespaced + version: v1alpha1 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: serviceroles.rbac.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: istio.io.mixer + istio: rbac +spec: + group: rbac.istio.io + names: + kind: ServiceRole + plural: serviceroles + singular: servicerole + categories: + - istio-io + - rbac-istio-io + scope: Namespaced + version: v1alpha1 +--- + +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: servicerolebindings.rbac.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: istio.io.mixer + istio: rbac +spec: + group: rbac.istio.io + names: + kind: ServiceRoleBinding + plural: servicerolebindings + singular: servicerolebinding + categories: + - istio-io + - rbac-istio-io + scope: Namespaced + version: v1alpha1 +--- +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: adapters.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: adapter + istio: mixer-adapter +spec: + group: config.istio.io + names: + kind: adapter + plural: adapters + singular: adapter + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: instances.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: instance + istio: mixer-instance +spec: + group: config.istio.io + names: + kind: instance + plural: instances + singular: instance + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: templates.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: template + istio: mixer-template +spec: + group: config.istio.io + names: + kind: template + plural: templates + singular: template + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- +kind: CustomResourceDefinition +apiVersion: apiextensions.k8s.io/v1beta1 +metadata: + name: handlers.config.istio.io + annotations: + "helm.sh/hook": crd-install + labels: + app: mixer + package: handler + istio: mixer-handler +spec: + group: config.istio.io + names: + kind: handler + plural: handlers + singular: handler + categories: + - istio-io + - policy-istio-io + scope: Namespaced + version: v1alpha2 +--- +# {{- end }} +# {{ end }} \ No newline at end of file diff --git a/config/crds/istio_v1alpha3_destinationrule.yaml b/config/crds/istio_v1alpha3_destinationrule.yaml new file mode 100644 index 000000000..a20b44af8 --- /dev/null +++ b/config/crds/istio_v1alpha3_destinationrule.yaml @@ -0,0 +1,763 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: destinationrules.istio.kubesphere.io +spec: + group: istio.kubesphere.io + names: + kind: DestinationRule + plural: destinationrules + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + host: + description: 'REQUIRED. The name of a service from the service registry. + Service names are looked up from the platform''s service registry + (e.g., Kubernetes services, Consul services, etc.) and from the hosts + declared by [ServiceEntries](#ServiceEntry). Rules defined for services + that do not exist in the service registry will be ignored. *Note + for Kubernetes users*: When short names are used (e.g. "reviews" instead + of "reviews.default.svc.cluster.local"), Istio will interpret the + short name based on the namespace of the rule, not the service. A + rule in the "default" namespace containing a host "reviews will be + interpreted as "reviews.default.svc.cluster.local", irrespective of + the actual namespace associated with the reviews service. _To avoid + potential misconfigurations, it is recommended to always use fully + qualified domain names over short names._ Note that the host field + applies to both HTTP and TCP services.' + type: string + subsets: + description: One or more named sets that represent individual versions + of a service. Traffic policies can be overridden at subset level. + items: + properties: + labels: + description: REQUIRED. Labels apply a filter over the endpoints + of a service in the service registry. See route rules for examples + of usage. + type: object + name: + description: REQUIRED. Name of the subset. The service name and + the subset name can be used for traffic splitting in a route + rule. + type: string + trafficPolicy: + description: Traffic policies that apply to this subset. Subsets + inherit the traffic policies specified at the DestinationRule + level. Settings specified at the subset level will override + the corresponding settings specified at the DestinationRule + level. + properties: + connectionPool: + description: Settings controlling the volume of connections + to an upstream service + properties: + http: + description: HTTP connection pool settings. + properties: + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. Setting this parameter to 1 disables + keep alive. + format: int32 + type: integer + maxRetries: + description: Maximum number of retries that can be + outstanding to all hosts in a cluster at a given + time. Defaults to 3. + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: REQUIRED. Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: REQUIRED. Lifetime of the cookie. + type: string + required: + - name + - ttl + type: object + httpHeaderName: + description: 'It is required to specify exactly one + of the fields as hash key: HttpHeaderName, HttpCookie, + or UseSourceIP. Hash based on a specific HTTP header.' + type: string + minimumRingSize: + description: The minimum number of virtual nodes to + use for the hash ring. Defaults to 1024. Larger + ring sizes result in more granular load distributions. + If the number of hosts in the load balancing pool + is larger than the ring size, each host will be + assigned a single virtual node. + format: int64 + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + simple: + description: 'It is required to specify exactly one of + the fields: Simple or ConsistentHash' + type: string + type: object + outlierDetection: + description: Settings controlling eviction of unhealthy hosts + from the load balancing pool + properties: + baseEjectionTime: + description: 'Minimum ejection duration. A host will remain + ejected for a period equal to the product of minimum + ejection duration and the number of times the host has + been ejected. This technique allows the system to automatically + increase the ejection period for unhealthy upstream + servers. format: 1h/1m/1s/1ms. MUST BE >=1ms. Default + is 30s.' + type: string + consecutiveErrors: + description: Number of errors before a host is ejected + from the connection pool. Defaults to 5. When the upstream + host is accessed over HTTP, a 5xx return code qualifies + as an error. When the upstream host is accessed over + an opaque TCP connection, connect timeouts and connection + error/failure events qualify as an error. + format: int32 + type: integer + interval: + description: 'Time interval between ejection sweep analysis. + format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is 10s.' + type: string + maxEjectionPercent: + description: Maximum % of hosts in the load balancing + pool for the upstream service that can be ejected. Defaults + to 10%. + format: int32 + type: integer + type: object + portLevelSettings: + description: Traffic policies specific to individual ports. + Note that port level settings will override the destination-level + settings. Traffic settings specified at the destination-level + will not be inherited when overridden by port-level settings, + i.e. default values will be applied to fields omitted in + port-level traffic policies. + items: + properties: + connectionPool: + description: Settings controlling the volume of connections + to an upstream service + properties: + http: + description: HTTP connection pool settings. + properties: + maxRequestsPerConnection: + description: Maximum number of requests per + connection to a backend. Setting this parameter + to 1 disables keep alive. + format: int32 + type: integer + maxRetries: + description: Maximum number of retries that + can be outstanding to all hosts in a cluster + at a given time. Defaults to 3. + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and TCP + upstream connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer + algorithms. + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: REQUIRED. Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: REQUIRED. Lifetime of the cookie. + type: string + required: + - name + - ttl + type: object + httpHeaderName: + description: 'It is required to specify exactly + one of the fields as hash key: HttpHeaderName, + HttpCookie, or UseSourceIP. Hash based on + a specific HTTP header.' + type: string + minimumRingSize: + description: The minimum number of virtual nodes + to use for the hash ring. Defaults to 1024. + Larger ring sizes result in more granular + load distributions. If the number of hosts + in the load balancing pool is larger than + the ring size, each host will be assigned + a single virtual node. + format: int64 + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + simple: + description: 'It is required to specify exactly + one of the fields: Simple or ConsistentHash' + type: string + type: object + outlierDetection: + description: Settings controlling eviction of unhealthy + hosts from the load balancing pool + properties: + baseEjectionTime: + description: 'Minimum ejection duration. A host + will remain ejected for a period equal to the + product of minimum ejection duration and the number + of times the host has been ejected. This technique + allows the system to automatically increase the + ejection period for unhealthy upstream servers. + format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is + 30s.' + type: string + consecutiveErrors: + description: Number of errors before a host is ejected + from the connection pool. Defaults to 5. When + the upstream host is accessed over HTTP, a 5xx + return code qualifies as an error. When the upstream + host is accessed over an opaque TCP connection, + connect timeouts and connection error/failure + events qualify as an error. + format: int32 + type: integer + interval: + description: 'Time interval between ejection sweep + analysis. format: 1h/1m/1s/1ms. MUST BE >=1ms. + Default is 10s.' + type: string + maxEjectionPercent: + description: Maximum % of hosts in the load balancing + pool for the upstream service that can be ejected. + Defaults to 10%. + format: int32 + type: integer + type: object + port: + description: Specifies the port name or number of a + port on the destination service on which this policy + is being applied. Names must comply with DNS label + syntax (rfc1035) and therefore cannot collide with + numbers. If there are multiple ports on a service + with the same protocol the names should be of the + form -. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + tls: + description: TLS related settings for connections to + the upstream service. + properties: + caCertificates: + description: 'OPTIONAL: The path to the file containing + certificate authority certificates to use in verifying + a presented server certificate. If omitted, the + proxy will not verify the server''s certificate. + Should be empty if mode is `ISTIO_MUTUAL`.' + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. The path + to the file holding the client-side TLS certificate + to use. Should be empty if mode is `ISTIO_MUTUAL`. + type: string + mode: + description: 'REQUIRED: Indicates whether connections + to this port should be secured using TLS. The + value of this field determines how TLS is enforced.' + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. The path + to the file holding the client's private key. + Should be empty if mode is `ISTIO_MUTUAL`. + type: string + sni: + description: SNI string to present to the server + during TLS handshake. Should be empty if mode + is `ISTIO_MUTUAL`. + type: string + subjectAltNames: + description: A list of alternate names to verify + the subject identity in the certificate. If specified, + the proxy will verify that the server certificate's + subject alt name matches one of the specified + values. Should be empty if mode is `ISTIO_MUTUAL`. + items: + type: string + type: array + required: + - mode + type: object + required: + - port + type: object + type: array + tls: + description: TLS related settings for connections to the upstream + service. + properties: + caCertificates: + description: 'OPTIONAL: The path to the file containing + certificate authority certificates to use in verifying + a presented server certificate. If omitted, the proxy + will not verify the server''s certificate. Should be + empty if mode is `ISTIO_MUTUAL`.' + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. The path to + the file holding the client-side TLS certificate to + use. Should be empty if mode is `ISTIO_MUTUAL`. + type: string + mode: + description: 'REQUIRED: Indicates whether connections + to this port should be secured using TLS. The value + of this field determines how TLS is enforced.' + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. The path to + the file holding the client's private key. Should be + empty if mode is `ISTIO_MUTUAL`. + type: string + sni: + description: SNI string to present to the server during + TLS handshake. Should be empty if mode is `ISTIO_MUTUAL`. + type: string + subjectAltNames: + description: A list of alternate names to verify the subject + identity in the certificate. If specified, the proxy + will verify that the server certificate's subject alt + name matches one of the specified values. Should be + empty if mode is `ISTIO_MUTUAL`. + items: + type: string + type: array + required: + - mode + type: object + type: object + required: + - name + - labels + type: object + type: array + trafficPolicy: + description: Traffic policies to apply (load balancing policy, connection + pool sizes, outlier detection). + properties: + connectionPool: + description: Settings controlling the volume of connections to an + upstream service + properties: + http: + description: HTTP connection pool settings. + properties: + maxRequestsPerConnection: + description: Maximum number of requests per connection to + a backend. Setting this parameter to 1 disables keep alive. + format: int32 + type: integer + maxRetries: + description: Maximum number of retries that can be outstanding + to all hosts in a cluster at a given time. Defaults to + 3. + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and TCP upstream connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections to + a destination host. + format: int32 + type: integer + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: REQUIRED. Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: REQUIRED. Lifetime of the cookie. + type: string + required: + - name + - ttl + type: object + httpHeaderName: + description: 'It is required to specify exactly one of the + fields as hash key: HttpHeaderName, HttpCookie, or UseSourceIP. + Hash based on a specific HTTP header.' + type: string + minimumRingSize: + description: The minimum number of virtual nodes to use + for the hash ring. Defaults to 1024. Larger ring sizes + result in more granular load distributions. If the number + of hosts in the load balancing pool is larger than the + ring size, each host will be assigned a single virtual + node. + format: int64 + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + simple: + description: 'It is required to specify exactly one of the fields: + Simple or ConsistentHash' + type: string + type: object + outlierDetection: + description: Settings controlling eviction of unhealthy hosts from + the load balancing pool + properties: + baseEjectionTime: + description: 'Minimum ejection duration. A host will remain + ejected for a period equal to the product of minimum ejection + duration and the number of times the host has been ejected. + This technique allows the system to automatically increase + the ejection period for unhealthy upstream servers. format: + 1h/1m/1s/1ms. MUST BE >=1ms. Default is 30s.' + type: string + consecutiveErrors: + description: Number of errors before a host is ejected from + the connection pool. Defaults to 5. When the upstream host + is accessed over HTTP, a 5xx return code qualifies as an error. + When the upstream host is accessed over an opaque TCP connection, + connect timeouts and connection error/failure events qualify + as an error. + format: int32 + type: integer + interval: + description: 'Time interval between ejection sweep analysis. + format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is 10s.' + type: string + maxEjectionPercent: + description: Maximum % of hosts in the load balancing pool for + the upstream service that can be ejected. Defaults to 10%. + format: int32 + type: integer + type: object + portLevelSettings: + description: Traffic policies specific to individual ports. Note + that port level settings will override the destination-level settings. + Traffic settings specified at the destination-level will not be + inherited when overridden by port-level settings, i.e. default + values will be applied to fields omitted in port-level traffic + policies. + items: + properties: + connectionPool: + description: Settings controlling the volume of connections + to an upstream service + properties: + http: + description: HTTP connection pool settings. + properties: + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. Setting this parameter to 1 disables + keep alive. + format: int32 + type: integer + maxRetries: + description: Maximum number of retries that can be + outstanding to all hosts in a cluster at a given + time. Defaults to 3. + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: REQUIRED. Name of the cookie. + type: string + path: + description: Path to set for the cookie. + type: string + ttl: + description: REQUIRED. Lifetime of the cookie. + type: string + required: + - name + - ttl + type: object + httpHeaderName: + description: 'It is required to specify exactly one + of the fields as hash key: HttpHeaderName, HttpCookie, + or UseSourceIP. Hash based on a specific HTTP header.' + type: string + minimumRingSize: + description: The minimum number of virtual nodes to + use for the hash ring. Defaults to 1024. Larger + ring sizes result in more granular load distributions. + If the number of hosts in the load balancing pool + is larger than the ring size, each host will be + assigned a single virtual node. + format: int64 + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + simple: + description: 'It is required to specify exactly one of + the fields: Simple or ConsistentHash' + type: string + type: object + outlierDetection: + description: Settings controlling eviction of unhealthy hosts + from the load balancing pool + properties: + baseEjectionTime: + description: 'Minimum ejection duration. A host will remain + ejected for a period equal to the product of minimum + ejection duration and the number of times the host has + been ejected. This technique allows the system to automatically + increase the ejection period for unhealthy upstream + servers. format: 1h/1m/1s/1ms. MUST BE >=1ms. Default + is 30s.' + type: string + consecutiveErrors: + description: Number of errors before a host is ejected + from the connection pool. Defaults to 5. When the upstream + host is accessed over HTTP, a 5xx return code qualifies + as an error. When the upstream host is accessed over + an opaque TCP connection, connect timeouts and connection + error/failure events qualify as an error. + format: int32 + type: integer + interval: + description: 'Time interval between ejection sweep analysis. + format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is 10s.' + type: string + maxEjectionPercent: + description: Maximum % of hosts in the load balancing + pool for the upstream service that can be ejected. Defaults + to 10%. + format: int32 + type: integer + type: object + port: + description: Specifies the port name or number of a port on + the destination service on which this policy is being applied. Names + must comply with DNS label syntax (rfc1035) and therefore + cannot collide with numbers. If there are multiple ports + on a service with the same protocol the names should be + of the form -. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + tls: + description: TLS related settings for connections to the upstream + service. + properties: + caCertificates: + description: 'OPTIONAL: The path to the file containing + certificate authority certificates to use in verifying + a presented server certificate. If omitted, the proxy + will not verify the server''s certificate. Should be + empty if mode is `ISTIO_MUTUAL`.' + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. The path to + the file holding the client-side TLS certificate to + use. Should be empty if mode is `ISTIO_MUTUAL`. + type: string + mode: + description: 'REQUIRED: Indicates whether connections + to this port should be secured using TLS. The value + of this field determines how TLS is enforced.' + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. The path to + the file holding the client's private key. Should be + empty if mode is `ISTIO_MUTUAL`. + type: string + sni: + description: SNI string to present to the server during + TLS handshake. Should be empty if mode is `ISTIO_MUTUAL`. + type: string + subjectAltNames: + description: A list of alternate names to verify the subject + identity in the certificate. If specified, the proxy + will verify that the server certificate's subject alt + name matches one of the specified values. Should be + empty if mode is `ISTIO_MUTUAL`. + items: + type: string + type: array + required: + - mode + type: object + required: + - port + type: object + type: array + tls: + description: TLS related settings for connections to the upstream + service. + properties: + caCertificates: + description: 'OPTIONAL: The path to the file containing certificate + authority certificates to use in verifying a presented server + certificate. If omitted, the proxy will not verify the server''s + certificate. Should be empty if mode is `ISTIO_MUTUAL`.' + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. The path to the file + holding the client-side TLS certificate to use. Should be + empty if mode is `ISTIO_MUTUAL`. + type: string + mode: + description: 'REQUIRED: Indicates whether connections to this + port should be secured using TLS. The value of this field + determines how TLS is enforced.' + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. The path to the file + holding the client's private key. Should be empty if mode + is `ISTIO_MUTUAL`. + type: string + sni: + description: SNI string to present to the server during TLS + handshake. Should be empty if mode is `ISTIO_MUTUAL`. + type: string + subjectAltNames: + description: A list of alternate names to verify the subject + identity in the certificate. If specified, the proxy will + verify that the server certificate's subject alt name matches + one of the specified values. Should be empty if mode is `ISTIO_MUTUAL`. + items: + type: string + type: array + required: + - mode + type: object + type: object + required: + - host + type: object + required: + - spec + version: v1alpha3 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/istio_v1alpha3_gateway.yaml b/config/crds/istio_v1alpha3_gateway.yaml new file mode 100644 index 000000000..72a81f4e8 --- /dev/null +++ b/config/crds/istio_v1alpha3_gateway.yaml @@ -0,0 +1,129 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: gateways.istio.kubesphere.io +spec: + group: istio.kubesphere.io + names: + kind: Gateway + plural: gateways + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + selector: + description: One or more labels that indicate a specific set of pods/VMs + on which this gateway configuration should be applied. If no selectors + are provided, the gateway will be implemented by the default istio-ingress + controller. + type: object + servers: + description: 'REQUIRED: A list of server specifications.' + items: + properties: + hosts: + description: A list of hosts exposed by this gateway. While typically + applicable to HTTP services, it can also be used for TCP services + using TLS with SNI. Standard DNS wildcard prefix syntax is permitted. A + VirtualService that is bound to a gateway must having a matching + host in its default destination. Specifically one of the VirtualService + destination hosts is a strict suffix of a gateway host or a + gateway host is a suffix of one of the VirtualService hosts. + items: + type: string + type: array + port: + description: 'REQUIRED: The Port on which the proxy should listen + for incoming connections' + properties: + name: + description: Label assigned to the port. + type: string + number: + description: 'REQUIRED: A valid non-negative integer port + number.' + format: int64 + type: integer + protocol: + description: 'REQUIRED: The protocol exposed on the port. + MUST BE one of HTTP|HTTPS|GRPC|HTTP2|MONGO|TCP.' + type: string + required: + - number + - protocol + type: object + tls: + description: Set of TLS related options that govern the server's + behavior. Use these options to control if all http requests + should be redirected to https, and the TLS modes to use. + properties: + caCertificates: + description: REQUIRED if mode is "MUTUAL". The path to a file + containing certificate authority certificates to use in + verifying a presented client side certificate. + type: string + httpsRedirect: + description: If set to true, the load balancer will send a + 302 redirect for all http connections, asking the clients + to use HTTPS. + type: boolean + mode: + description: 'Optional: Indicates whether connections to this + port should be secured using TLS. The value of this field + determines how TLS is enforced.' + type: string + privateKey: + description: REQUIRED if mode is "SIMPLE" or "MUTUAL". The + path to the file holding the server's private key. + type: string + serverCertificate: + description: REQUIRED if mode is "SIMPLE" or "MUTUAL". The + path to the file holding the server-side TLS certificate + to use. + type: string + subjectAltNames: + description: A list of alternate names to verify the subject + identity in the certificate presented by the client. + items: + type: string + type: array + required: + - httpsRedirect + - serverCertificate + - privateKey + - caCertificates + - subjectAltNames + type: object + required: + - port + type: object + type: array + required: + - servers + type: object + required: + - spec + version: v1alpha3 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/istio_v1alpha3_virtualservice.yaml b/config/crds/istio_v1alpha3_virtualservice.yaml new file mode 100644 index 000000000..23ef3f681 --- /dev/null +++ b/config/crds/istio_v1alpha3_virtualservice.yaml @@ -0,0 +1,695 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: virtualservices.istio.kubesphere.io +spec: + group: istio.kubesphere.io + names: + kind: VirtualService + plural: virtualservices + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + gateways: + description: The names of gateways and sidecars that should apply these + routes. A single VirtualService is used for sidecars inside the mesh + as well as for one or more gateways. The selection condition imposed + by this field can be overridden using the source field in the match + conditions of HTTP/TCP routes. The reserved word "mesh" is used to + imply all the sidecars in the mesh. When this field is omitted, the + default gateway ("mesh") will be used, which would apply the rule + to all sidecars in the mesh. If a list of gateway names is provided, + the rules will apply only to the gateways. To apply the rules to both + gateways and sidecars, specify "mesh" as one of the gateway names. + items: + type: string + type: array + hosts: + description: REQUIRED. The destination address for traffic captured + by this virtual service. Could be a DNS name with wildcard prefix + or a CIDR prefix. Depending on the platform, short-names can also + be used instead of a FQDN (i.e. has no dots in the name). In such + a scenario, the FQDN of the host would be derived based on the underlying + platform. For example on Kubernetes, when hosts contains a short + name, Istio will interpret the short name based on the namespace of + the rule. Thus, when a client namespace applies a rule in the "default" + namespace containing a name "reviews, Istio will setup routes to the + "reviews.default.svc.cluster.local" service. However, if a different + name such as "reviews.sales.svc.cluster.local" is used, it would be + treated as a FQDN during virtual host matching. In Consul, a plain + service name would be resolved to the FQDN "reviews.service.consul". Note + that the hosts field applies to both HTTP and TCP services. Service + inside the mesh, i.e., those found in the service registry, must always + be referred to using their alphanumeric names. IP addresses or CIDR + prefixes are allowed only for services defined via the Gateway. + items: + type: string + type: array + http: + description: An ordered list of route rules for HTTP traffic. The first + rule matching an incoming request is used. + items: + properties: + appendHeaders: + description: Additional HTTP headers to add before forwarding + a request to the destination service. + type: object + corsPolicy: + description: Cross-Origin Resource Sharing policy + properties: + allowCredentials: + description: Indicates whether the caller is allowed to send + the actual request (not the preflight) using credentials. + Translates to Access-Control-Allow-Credentials header. + type: boolean + allowHeaders: + description: List of HTTP headers that can be used when requesting + the resource. Serialized to Access-Control-Allow-Methods + header. + items: + type: string + type: array + allowMethods: + description: List of HTTP methods allowed to access the resource. + The content will be serialized into the Access-Control-Allow-Methods + header. + items: + type: string + type: array + allowOrigin: + description: The list of origins that are allowed to perform + CORS requests. The content will be serialized into the Access-Control-Allow-Origin + header. Wildcard * will allow all origins. + items: + type: string + type: array + exposeHeaders: + description: A white list of HTTP headers that the browsers + are allowed to access. Serialized into Access-Control-Expose-Headers + header. + items: + type: string + type: array + maxAge: + description: Specifies how long the the results of a preflight + request can be cached. Translates to the Access-Control-Max-Age + header. + type: string + type: object + fault: + description: Fault injection policy to apply on HTTP traffic. + properties: + abort: + description: Abort Http request attempts and return error + codes back to downstream service, giving the impression + that the upstream service is faulty. + properties: + httpStatus: + description: REQUIRED. HTTP status code to use to abort + the Http request. + format: int64 + type: integer + percent: + description: Percentage of requests to be aborted with + the error code provided (0-100). + format: int64 + type: integer + required: + - httpStatus + type: object + delay: + description: Delay requests before forwarding, emulating various + failures such as network issues, overloaded upstream service, + etc. + properties: + exponentialDelay: + description: (-- Add a delay (based on an exponential + function) before forwarding the request. mean delay + needed to derive the exponential delay values --) + type: string + fixedDelay: + description: 'REQUIRED. Add a fixed delay before forwarding + the request. Format: 1h/1m/1s/1ms. MUST be >=1ms.' + type: string + percent: + description: Percentage of requests on which the delay + will be injected (0-100). + format: int64 + type: integer + required: + - fixedDelay + type: object + type: object + match: + description: Match conditions to be satisfied for the rule to + be activated. All conditions inside a single match block have + AND semantics, while the list of match blocks have OR semantics. + The rule is matched if any one of the match blocks succeed. + items: + properties: + authority: + description: 'HTTP Authority values are case-sensitive and + formatted as follows: - `exact: "value"` for exact string + match - `prefix: "value"` for prefix-based match - `regex: + "value"` for ECMAscript style regex-based match' + properties: + exact: + description: exact string match + type: string + prefix: + description: prefix-based match + type: string + regex: + description: ECMAscript style regex-based match + type: string + suffix: + description: suffix-based match. + type: string + type: object + gateways: + description: Names of gateways where the rule should be + applied to. Gateway names at the top of the VirtualService + (if any) are overridden. The gateway match is independent + of sourceLabels. + items: + type: string + type: array + headers: + description: 'The header keys must be lowercase and use + hyphen as the separator, e.g. _x-request-id_. Header + values are case-sensitive and formatted as follows: - + `exact: "value"` for exact string match - `prefix: "value"` + for prefix-based match - `regex: "value"` for ECMAscript + style regex-based match **Note:** The keys `uri`, `scheme`, + `method`, and `authority` will be ignored.' + type: object + method: + description: 'HTTP Method values are case-sensitive and + formatted as follows: - `exact: "value"` for exact string + match - `prefix: "value"` for prefix-based match - `regex: + "value"` for ECMAscript style regex-based match' + properties: + exact: + description: exact string match + type: string + prefix: + description: prefix-based match + type: string + regex: + description: ECMAscript style regex-based match + type: string + suffix: + description: suffix-based match. + type: string + type: object + port: + description: Specifies the ports on the host that is being + addressed. Many services only expose a single port or + label ports with the protocols they support, in these + cases it is not required to explicitly select the port. + format: int32 + type: integer + scheme: + description: 'URI Scheme values are case-sensitive and formatted + as follows: - `exact: "value"` for exact string match - + `prefix: "value"` for prefix-based match - `regex: "value"` + for ECMAscript style regex-based match' + properties: + exact: + description: exact string match + type: string + prefix: + description: prefix-based match + type: string + regex: + description: ECMAscript style regex-based match + type: string + suffix: + description: suffix-based match. + type: string + type: object + sourceLabels: + description: One or more labels that constrain the applicability + of a rule to workloads with the given labels. If the VirtualService + has a list of gateways specified at the top, it should + include the reserved gateway `mesh` in order for this + field to be applicable. + type: object + uri: + description: 'URI to match values are case-sensitive and + formatted as follows: - `exact: "value"` for exact string + match - `prefix: "value"` for prefix-based match - `regex: + "value"` for ECMAscript style regex-based match' + properties: + exact: + description: exact string match + type: string + prefix: + description: prefix-based match + type: string + regex: + description: ECMAscript style regex-based match + type: string + suffix: + description: suffix-based match. + type: string + type: object + type: object + type: array + mirror: + description: Mirror HTTP traffic to a another destination in addition + to forwarding the requests to the intended destination. Mirrored + traffic is on a best effort basis where the sidecar/gateway + will not wait for the mirrored cluster to respond before returning + the response from the original destination. Statistics will + be generated for the mirrored destination. + properties: + host: + description: 'REQUIRED. The name of a service from the service + registry. Service names are looked up from the platform''s + service registry (e.g., Kubernetes services, Consul services, + etc.) and from the hosts declared by [ServiceEntry](#ServiceEntry). + Traffic forwarded to destinations that are not found in + either of the two, will be dropped. *Note for Kubernetes + users*: When short names are used (e.g. "reviews" instead + of "reviews.default.svc.cluster.local"), Istio will interpret + the short name based on the namespace of the rule, not the + service. A rule in the "default" namespace containing a + host "reviews will be interpreted as "reviews.default.svc.cluster.local", + irrespective of the actual namespace associated with the + reviews service. _To avoid potential misconfigurations, + it is recommended to always use fully qualified domain names + over short names._' + type: string + port: + description: Specifies the port on the host that is being + addressed. If a service exposes only a single port it is + not required to explicitly select the port. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the service. Applicable + only to services within the mesh. The subset must be defined + in a corresponding DestinationRule. + type: string + required: + - host + type: object + redirect: + description: A http rule can either redirect or forward (default) + traffic. If traffic passthrough option is specified in the rule, + route/redirect will be ignored. The redirect primitive can be + used to send a HTTP 302 redirect to a different URI or Authority. + properties: + authority: + description: On a redirect, overwrite the Authority/Host portion + of the URL with this value. + type: string + uri: + description: On a redirect, overwrite the Path portion of + the URL with this value. Note that the entire path will + be replaced, irrespective of the request URI being matched + as an exact path or prefix. + type: string + type: object + removeResponseHeaders: + description: Http headers to remove before returning the response + to the caller + type: object + retries: + description: Retry policy for HTTP requests. + properties: + attempts: + description: REQUIRED. Number of retries for a given request. + The interval between retries will be determined automatically + (25ms+). Actual number of retries attempted depends on the + httpReqTimeout. + format: int64 + type: integer + perTryTimeout: + description: 'Timeout per retry attempt for a given request. + format: 1h/1m/1s/1ms. MUST BE >=1ms.' + type: string + required: + - attempts + - perTryTimeout + type: object + rewrite: + description: Rewrite HTTP URIs and Authority headers. Rewrite + cannot be used with Redirect primitive. Rewrite will be performed + before forwarding. + properties: + authority: + description: rewrite the Authority/Host header with this value. + type: string + uri: + description: rewrite the path (or the prefix) portion of the + URI with this value. If the original URI was matched based + on prefix, the value provided in this field will replace + the corresponding matched prefix. + type: string + type: object + route: + description: A http rule can either redirect or forward (default) + traffic. The forwarding target can be one of several versions + of a service (see glossary in beginning of document). Weights + associated with the service version determine the proportion + of traffic it receives. + items: + properties: + destination: + description: REQUIRED. Destination uniquely identifies the + instances of a service to which the request/connection + should be forwarded to. + properties: + host: + description: 'REQUIRED. The name of a service from the + service registry. Service names are looked up from + the platform''s service registry (e.g., Kubernetes + services, Consul services, etc.) and from the hosts + declared by [ServiceEntry](#ServiceEntry). Traffic + forwarded to destinations that are not found in either + of the two, will be dropped. *Note for Kubernetes + users*: When short names are used (e.g. "reviews" + instead of "reviews.default.svc.cluster.local"), Istio + will interpret the short name based on the namespace + of the rule, not the service. A rule in the "default" + namespace containing a host "reviews will be interpreted + as "reviews.default.svc.cluster.local", irrespective + of the actual namespace associated with the reviews + service. _To avoid potential misconfigurations, it + is recommended to always use fully qualified domain + names over short names._' + type: string + port: + description: Specifies the port on the host that is + being addressed. If a service exposes only a single + port it is not required to explicitly select the port. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the service. + Applicable only to services within the mesh. The subset + must be defined in a corresponding DestinationRule. + type: string + required: + - host + type: object + weight: + description: REQUIRED. The proportion of traffic to be forwarded + to the service version. (0-100). Sum of weights across + destinations SHOULD BE == 100. If there is only destination + in a rule, the weight value is assumed to be 100. + format: int64 + type: integer + required: + - destination + - weight + type: object + type: array + timeout: + description: Timeout for HTTP requests. + type: string + websocketUpgrade: + description: Indicates that a HTTP/1.1 client connection to this + particular route should be allowed (and expected) to upgrade + to a WebSocket connection. The default is false. Istio's reference + sidecar implementation (Envoy) expects the first request to + this route to contain the WebSocket upgrade headers. Otherwise, + the request will be rejected. Note that Websocket allows secondary + protocol negotiation which may then be subject to further routing + rules based on the protocol selected. + type: boolean + type: object + type: array + tcp: + description: An ordered list of route rules for TCP traffic. The first + rule matching an incoming request is used. + items: + properties: + match: + description: Match conditions to be satisfied for the rule to + be activated. All conditions inside a single match block have + AND semantics, while the list of match blocks have OR semantics. + The rule is matched if any one of the match blocks succeed. + items: + properties: + destinationSubnets: + description: IPv4 or IPv6 ip address of destination with + optional subnet. E.g., a.b.c.d/xx form or just a.b.c.d. + items: + type: string + type: array + gateways: + description: Names of gateways where the rule should be + applied to. Gateway names at the top of the VirtualService + (if any) are overridden. The gateway match is independent + of sourceLabels. + items: + type: string + type: array + port: + description: Specifies the port on the host that is being + addressed. Many services only expose a single port or + label ports with the protocols they support, in these + cases it is not required to explicitly select the port. + format: int64 + type: integer + sourceLabels: + description: One or more labels that constrain the applicability + of a rule to workloads with the given labels. If the VirtualService + has a list of gateways specified at the top, it should + include the reserved gateway `mesh` in order for this + field to be applicable. + type: object + type: object + type: array + route: + description: The destinations to which the connection should be + forwarded to. Weights must add to 100%. + items: + properties: + destination: + description: REQUIRED. Destination uniquely identifies the + instances of a service to which the request/connection + should be forwarded to. + properties: + host: + description: 'REQUIRED. The name of a service from the + service registry. Service names are looked up from + the platform''s service registry (e.g., Kubernetes + services, Consul services, etc.) and from the hosts + declared by [ServiceEntry](#ServiceEntry). Traffic + forwarded to destinations that are not found in either + of the two, will be dropped. *Note for Kubernetes + users*: When short names are used (e.g. "reviews" + instead of "reviews.default.svc.cluster.local"), Istio + will interpret the short name based on the namespace + of the rule, not the service. A rule in the "default" + namespace containing a host "reviews will be interpreted + as "reviews.default.svc.cluster.local", irrespective + of the actual namespace associated with the reviews + service. _To avoid potential misconfigurations, it + is recommended to always use fully qualified domain + names over short names._' + type: string + port: + description: Specifies the port on the host that is + being addressed. If a service exposes only a single + port it is not required to explicitly select the port. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the service. + Applicable only to services within the mesh. The subset + must be defined in a corresponding DestinationRule. + type: string + required: + - host + type: object + weight: + description: REQUIRED. The proportion of traffic to be forwarded + to the service version. (0-100). Sum of weights across + destinations SHOULD BE == 100. If there is only destination + in a rule, the weight value is assumed to be 100. + format: int64 + type: integer + required: + - destination + - weight + type: object + type: array + required: + - match + - route + type: object + type: array + tls: + items: + properties: + match: + description: REQUIRED. Match conditions to be satisfied for the + rule to be activated. All conditions inside a single match block + have AND semantics, while the list of match blocks have OR semantics. + The rule is matched if any one of the match blocks succeed. + items: + properties: + destinationSubnets: + description: IPv4 or IPv6 ip addresses of destination with + optional subnet. E.g., a.b.c.d/xx form or just a.b.c.d. + items: + type: string + type: array + gateways: + description: Names of gateways where the rule should be + applied to. Gateway names at the top of the VirtualService + (if any) are overridden. The gateway match is independent + of sourceLabels. + items: + type: string + type: array + port: + description: Specifies the port on the host that is being + addressed. Many services only expose a single port or + label ports with the protocols they support, in these + cases it is not required to explicitly select the port. + format: int64 + type: integer + sniHosts: + description: REQUIRED. SNI (server name indicator) to match + on. Wildcard prefixes can be used in the SNI value, e.g., + *.com will match foo.example.com as well as example.com. + An SNI value must be a subset (i.e., fall within the domain) + of the corresponding virtual service's hosts + items: + type: string + type: array + sourceLabels: + description: One or more labels that constrain the applicability + of a rule to workloads with the given labels. If the VirtualService + has a list of gateways specified at the top, it should + include the reserved gateway `mesh` in order for this + field to be applicable. + type: object + required: + - sniHosts + type: object + type: array + route: + description: The destination to which the connection should be + forwarded to. + items: + properties: + destination: + description: REQUIRED. Destination uniquely identifies the + instances of a service to which the request/connection + should be forwarded to. + properties: + host: + description: 'REQUIRED. The name of a service from the + service registry. Service names are looked up from + the platform''s service registry (e.g., Kubernetes + services, Consul services, etc.) and from the hosts + declared by [ServiceEntry](#ServiceEntry). Traffic + forwarded to destinations that are not found in either + of the two, will be dropped. *Note for Kubernetes + users*: When short names are used (e.g. "reviews" + instead of "reviews.default.svc.cluster.local"), Istio + will interpret the short name based on the namespace + of the rule, not the service. A rule in the "default" + namespace containing a host "reviews will be interpreted + as "reviews.default.svc.cluster.local", irrespective + of the actual namespace associated with the reviews + service. _To avoid potential misconfigurations, it + is recommended to always use fully qualified domain + names over short names._' + type: string + port: + description: Specifies the port on the host that is + being addressed. If a service exposes only a single + port it is not required to explicitly select the port. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the service. + Applicable only to services within the mesh. The subset + must be defined in a corresponding DestinationRule. + type: string + required: + - host + type: object + weight: + description: REQUIRED. The proportion of traffic to be forwarded + to the service version. (0-100). Sum of weights across + destinations SHOULD BE == 100. If there is only destination + in a rule, the weight value is assumed to be 100. + format: int64 + type: integer + required: + - destination + - weight + type: object + type: array + required: + - match + - route + type: object + type: array + required: + - hosts + type: object + required: + - spec + version: v1alpha3 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/servicemesh_v1alpha2_strategy.yaml b/config/crds/servicemesh_v1alpha2_strategy.yaml new file mode 100644 index 000000000..8bd8515bd --- /dev/null +++ b/config/crds/servicemesh_v1alpha2_strategy.yaml @@ -0,0 +1,762 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: strategies.servicemesh.kubesphere.io +spec: + group: servicemesh.kubesphere.io + names: + kind: Strategy + plural: strategies + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + paused: + description: Indicates that the strategy is paused and will not be processed + by the strategy controller + type: boolean + selector: + description: Label selector for virtual services. + type: object + template: + description: Template describes the virtual service that will be created. + properties: + metadata: + description: Metadata of the virtual services created from this + template + type: object + spec: + description: Spec indicates the behavior of a virtual service. + properties: + gateways: + description: The names of gateways and sidecars that should + apply these routes. A single VirtualService is used for sidecars + inside the mesh as well as for one or more gateways. The selection + condition imposed by this field can be overridden using the + source field in the match conditions of HTTP/TCP routes. The + reserved word "mesh" is used to imply all the sidecars in + the mesh. When this field is omitted, the default gateway + ("mesh") will be used, which would apply the rule to all sidecars + in the mesh. If a list of gateway names is provided, the rules + will apply only to the gateways. To apply the rules to both + gateways and sidecars, specify "mesh" as one of the gateway + names. + items: + type: string + type: array + hosts: + description: REQUIRED. The destination address for traffic captured + by this virtual service. Could be a DNS name with wildcard + prefix or a CIDR prefix. Depending on the platform, short-names + can also be used instead of a FQDN (i.e. has no dots in the + name). In such a scenario, the FQDN of the host would be derived + based on the underlying platform. For example on Kubernetes, + when hosts contains a short name, Istio will interpret the + short name based on the namespace of the rule. Thus, when + a client namespace applies a rule in the "default" namespace + containing a name "reviews, Istio will setup routes to the + "reviews.default.svc.cluster.local" service. However, if a + different name such as "reviews.sales.svc.cluster.local" is + used, it would be treated as a FQDN during virtual host matching. + In Consul, a plain service name would be resolved to the FQDN + "reviews.service.consul". Note that the hosts field applies + to both HTTP and TCP services. Service inside the mesh, i.e., + those found in the service registry, must always be referred + to using their alphanumeric names. IP addresses or CIDR prefixes + are allowed only for services defined via the Gateway. + items: + type: string + type: array + http: + description: An ordered list of route rules for HTTP traffic. + The first rule matching an incoming request is used. + items: + properties: + appendHeaders: + description: Additional HTTP headers to add before forwarding + a request to the destination service. + type: object + corsPolicy: + description: Cross-Origin Resource Sharing policy + properties: + allowCredentials: + description: Indicates whether the caller is allowed + to send the actual request (not the preflight) using + credentials. Translates to Access-Control-Allow-Credentials + header. + type: boolean + allowHeaders: + description: List of HTTP headers that can be used + when requesting the resource. Serialized to Access-Control-Allow-Methods + header. + items: + type: string + type: array + allowMethods: + description: List of HTTP methods allowed to access + the resource. The content will be serialized into + the Access-Control-Allow-Methods header. + items: + type: string + type: array + allowOrigin: + description: The list of origins that are allowed + to perform CORS requests. The content will be serialized + into the Access-Control-Allow-Origin header. Wildcard + * will allow all origins. + items: + type: string + type: array + exposeHeaders: + description: A white list of HTTP headers that the + browsers are allowed to access. Serialized into + Access-Control-Expose-Headers header. + items: + type: string + type: array + maxAge: + description: Specifies how long the the results of + a preflight request can be cached. Translates to + the Access-Control-Max-Age header. + type: string + type: object + fault: + description: Fault injection policy to apply on HTTP traffic. + properties: + abort: + description: Abort Http request attempts and return + error codes back to downstream service, giving the + impression that the upstream service is faulty. + properties: + httpStatus: + description: REQUIRED. HTTP status code to use + to abort the Http request. + format: int64 + type: integer + percent: + description: Percentage of requests to be aborted + with the error code provided (0-100). + format: int64 + type: integer + required: + - httpStatus + type: object + delay: + description: Delay requests before forwarding, emulating + various failures such as network issues, overloaded + upstream service, etc. + properties: + exponentialDelay: + description: (-- Add a delay (based on an exponential + function) before forwarding the request. mean + delay needed to derive the exponential delay + values --) + type: string + fixedDelay: + description: 'REQUIRED. Add a fixed delay before + forwarding the request. Format: 1h/1m/1s/1ms. + MUST be >=1ms.' + type: string + percent: + description: Percentage of requests on which the + delay will be injected (0-100). + format: int64 + type: integer + required: + - fixedDelay + type: object + type: object + match: + description: Match conditions to be satisfied for the + rule to be activated. All conditions inside a single + match block have AND semantics, while the list of match + blocks have OR semantics. The rule is matched if any + one of the match blocks succeed. + items: + properties: + authority: + description: 'HTTP Authority values are case-sensitive + and formatted as follows: - `exact: "value"` + for exact string match - `prefix: "value"` for + prefix-based match - `regex: "value"` for ECMAscript + style regex-based match' + properties: + exact: + description: exact string match + type: string + prefix: + description: prefix-based match + type: string + regex: + description: ECMAscript style regex-based match + type: string + suffix: + description: suffix-based match. + type: string + type: object + gateways: + description: Names of gateways where the rule should + be applied to. Gateway names at the top of the + VirtualService (if any) are overridden. The gateway + match is independent of sourceLabels. + items: + type: string + type: array + headers: + description: 'The header keys must be lowercase + and use hyphen as the separator, e.g. _x-request-id_. Header + values are case-sensitive and formatted as follows: - + `exact: "value"` for exact string match - `prefix: + "value"` for prefix-based match - `regex: "value"` + for ECMAscript style regex-based match **Note:** + The keys `uri`, `scheme`, `method`, and `authority` + will be ignored.' + type: object + method: + description: 'HTTP Method values are case-sensitive + and formatted as follows: - `exact: "value"` + for exact string match - `prefix: "value"` for + prefix-based match - `regex: "value"` for ECMAscript + style regex-based match' + properties: + exact: + description: exact string match + type: string + prefix: + description: prefix-based match + type: string + regex: + description: ECMAscript style regex-based match + type: string + suffix: + description: suffix-based match. + type: string + type: object + port: + description: Specifies the ports on the host that + is being addressed. Many services only expose + a single port or label ports with the protocols + they support, in these cases it is not required + to explicitly select the port. + format: int32 + type: integer + scheme: + description: 'URI Scheme values are case-sensitive + and formatted as follows: - `exact: "value"` + for exact string match - `prefix: "value"` for + prefix-based match - `regex: "value"` for ECMAscript + style regex-based match' + properties: + exact: + description: exact string match + type: string + prefix: + description: prefix-based match + type: string + regex: + description: ECMAscript style regex-based match + type: string + suffix: + description: suffix-based match. + type: string + type: object + sourceLabels: + description: One or more labels that constrain the + applicability of a rule to workloads with the + given labels. If the VirtualService has a list + of gateways specified at the top, it should include + the reserved gateway `mesh` in order for this + field to be applicable. + type: object + uri: + description: 'URI to match values are case-sensitive + and formatted as follows: - `exact: "value"` + for exact string match - `prefix: "value"` for + prefix-based match - `regex: "value"` for ECMAscript + style regex-based match' + properties: + exact: + description: exact string match + type: string + prefix: + description: prefix-based match + type: string + regex: + description: ECMAscript style regex-based match + type: string + suffix: + description: suffix-based match. + type: string + type: object + type: object + type: array + mirror: + description: Mirror HTTP traffic to a another destination + in addition to forwarding the requests to the intended + destination. Mirrored traffic is on a best effort basis + where the sidecar/gateway will not wait for the mirrored + cluster to respond before returning the response from + the original destination. Statistics will be generated + for the mirrored destination. + properties: + host: + description: 'REQUIRED. The name of a service from + the service registry. Service names are looked up + from the platform''s service registry (e.g., Kubernetes + services, Consul services, etc.) and from the hosts + declared by [ServiceEntry](#ServiceEntry). Traffic + forwarded to destinations that are not found in + either of the two, will be dropped. *Note for Kubernetes + users*: When short names are used (e.g. "reviews" + instead of "reviews.default.svc.cluster.local"), + Istio will interpret the short name based on the + namespace of the rule, not the service. A rule in + the "default" namespace containing a host "reviews + will be interpreted as "reviews.default.svc.cluster.local", + irrespective of the actual namespace associated + with the reviews service. _To avoid potential misconfigurations, + it is recommended to always use fully qualified + domain names over short names._' + type: string + port: + description: Specifies the port on the host that is + being addressed. If a service exposes only a single + port it is not required to explicitly select the + port. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the service. + Applicable only to services within the mesh. The + subset must be defined in a corresponding DestinationRule. + type: string + required: + - host + type: object + redirect: + description: A http rule can either redirect or forward + (default) traffic. If traffic passthrough option is + specified in the rule, route/redirect will be ignored. + The redirect primitive can be used to send a HTTP 302 + redirect to a different URI or Authority. + properties: + authority: + description: On a redirect, overwrite the Authority/Host + portion of the URL with this value. + type: string + uri: + description: On a redirect, overwrite the Path portion + of the URL with this value. Note that the entire + path will be replaced, irrespective of the request + URI being matched as an exact path or prefix. + type: string + type: object + removeResponseHeaders: + description: Http headers to remove before returning the + response to the caller + type: object + retries: + description: Retry policy for HTTP requests. + properties: + attempts: + description: REQUIRED. Number of retries for a given + request. The interval between retries will be determined + automatically (25ms+). Actual number of retries + attempted depends on the httpReqTimeout. + format: int64 + type: integer + perTryTimeout: + description: 'Timeout per retry attempt for a given + request. format: 1h/1m/1s/1ms. MUST BE >=1ms.' + type: string + required: + - attempts + - perTryTimeout + type: object + rewrite: + description: Rewrite HTTP URIs and Authority headers. + Rewrite cannot be used with Redirect primitive. Rewrite + will be performed before forwarding. + properties: + authority: + description: rewrite the Authority/Host header with + this value. + type: string + uri: + description: rewrite the path (or the prefix) portion + of the URI with this value. If the original URI + was matched based on prefix, the value provided + in this field will replace the corresponding matched + prefix. + type: string + type: object + route: + description: A http rule can either redirect or forward + (default) traffic. The forwarding target can be one + of several versions of a service (see glossary in beginning + of document). Weights associated with the service version + determine the proportion of traffic it receives. + items: + properties: + destination: + description: REQUIRED. Destination uniquely identifies + the instances of a service to which the request/connection + should be forwarded to. + properties: + host: + description: 'REQUIRED. The name of a service + from the service registry. Service names are + looked up from the platform''s service registry + (e.g., Kubernetes services, Consul services, + etc.) and from the hosts declared by [ServiceEntry](#ServiceEntry). + Traffic forwarded to destinations that are + not found in either of the two, will be dropped. *Note + for Kubernetes users*: When short names are + used (e.g. "reviews" instead of "reviews.default.svc.cluster.local"), + Istio will interpret the short name based + on the namespace of the rule, not the service. + A rule in the "default" namespace containing + a host "reviews will be interpreted as "reviews.default.svc.cluster.local", + irrespective of the actual namespace associated + with the reviews service. _To avoid potential + misconfigurations, it is recommended to always + use fully qualified domain names over short + names._' + type: string + port: + description: Specifies the port on the host + that is being addressed. If a service exposes + only a single port it is not required to explicitly + select the port. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the + service. Applicable only to services within + the mesh. The subset must be defined in a + corresponding DestinationRule. + type: string + required: + - host + type: object + weight: + description: REQUIRED. The proportion of traffic + to be forwarded to the service version. (0-100). + Sum of weights across destinations SHOULD BE == + 100. If there is only destination in a rule, the + weight value is assumed to be 100. + format: int64 + type: integer + required: + - destination + - weight + type: object + type: array + timeout: + description: Timeout for HTTP requests. + type: string + websocketUpgrade: + description: Indicates that a HTTP/1.1 client connection + to this particular route should be allowed (and expected) + to upgrade to a WebSocket connection. The default is + false. Istio's reference sidecar implementation (Envoy) + expects the first request to this route to contain the + WebSocket upgrade headers. Otherwise, the request will + be rejected. Note that Websocket allows secondary protocol + negotiation which may then be subject to further routing + rules based on the protocol selected. + type: boolean + type: object + type: array + tcp: + description: An ordered list of route rules for TCP traffic. + The first rule matching an incoming request is used. + items: + properties: + match: + description: Match conditions to be satisfied for the + rule to be activated. All conditions inside a single + match block have AND semantics, while the list of match + blocks have OR semantics. The rule is matched if any + one of the match blocks succeed. + items: + properties: + destinationSubnets: + description: IPv4 or IPv6 ip address of destination + with optional subnet. E.g., a.b.c.d/xx form or + just a.b.c.d. + items: + type: string + type: array + gateways: + description: Names of gateways where the rule should + be applied to. Gateway names at the top of the + VirtualService (if any) are overridden. The gateway + match is independent of sourceLabels. + items: + type: string + type: array + port: + description: Specifies the port on the host that + is being addressed. Many services only expose + a single port or label ports with the protocols + they support, in these cases it is not required + to explicitly select the port. + format: int64 + type: integer + sourceLabels: + description: One or more labels that constrain the + applicability of a rule to workloads with the + given labels. If the VirtualService has a list + of gateways specified at the top, it should include + the reserved gateway `mesh` in order for this + field to be applicable. + type: object + type: object + type: array + route: + description: The destinations to which the connection + should be forwarded to. Weights must add to 100%. + items: + properties: + destination: + description: REQUIRED. Destination uniquely identifies + the instances of a service to which the request/connection + should be forwarded to. + properties: + host: + description: 'REQUIRED. The name of a service + from the service registry. Service names are + looked up from the platform''s service registry + (e.g., Kubernetes services, Consul services, + etc.) and from the hosts declared by [ServiceEntry](#ServiceEntry). + Traffic forwarded to destinations that are + not found in either of the two, will be dropped. *Note + for Kubernetes users*: When short names are + used (e.g. "reviews" instead of "reviews.default.svc.cluster.local"), + Istio will interpret the short name based + on the namespace of the rule, not the service. + A rule in the "default" namespace containing + a host "reviews will be interpreted as "reviews.default.svc.cluster.local", + irrespective of the actual namespace associated + with the reviews service. _To avoid potential + misconfigurations, it is recommended to always + use fully qualified domain names over short + names._' + type: string + port: + description: Specifies the port on the host + that is being addressed. If a service exposes + only a single port it is not required to explicitly + select the port. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the + service. Applicable only to services within + the mesh. The subset must be defined in a + corresponding DestinationRule. + type: string + required: + - host + type: object + weight: + description: REQUIRED. The proportion of traffic + to be forwarded to the service version. (0-100). + Sum of weights across destinations SHOULD BE == + 100. If there is only destination in a rule, the + weight value is assumed to be 100. + format: int64 + type: integer + required: + - destination + - weight + type: object + type: array + required: + - match + - route + type: object + type: array + tls: + items: + properties: + match: + description: REQUIRED. Match conditions to be satisfied + for the rule to be activated. All conditions inside + a single match block have AND semantics, while the list + of match blocks have OR semantics. The rule is matched + if any one of the match blocks succeed. + items: + properties: + destinationSubnets: + description: IPv4 or IPv6 ip addresses of destination + with optional subnet. E.g., a.b.c.d/xx form or + just a.b.c.d. + items: + type: string + type: array + gateways: + description: Names of gateways where the rule should + be applied to. Gateway names at the top of the + VirtualService (if any) are overridden. The gateway + match is independent of sourceLabels. + items: + type: string + type: array + port: + description: Specifies the port on the host that + is being addressed. Many services only expose + a single port or label ports with the protocols + they support, in these cases it is not required + to explicitly select the port. + format: int64 + type: integer + sniHosts: + description: REQUIRED. SNI (server name indicator) + to match on. Wildcard prefixes can be used in + the SNI value, e.g., *.com will match foo.example.com + as well as example.com. An SNI value must be a + subset (i.e., fall within the domain) of the corresponding + virtual service's hosts + items: + type: string + type: array + sourceLabels: + description: One or more labels that constrain the + applicability of a rule to workloads with the + given labels. If the VirtualService has a list + of gateways specified at the top, it should include + the reserved gateway `mesh` in order for this + field to be applicable. + type: object + required: + - sniHosts + type: object + type: array + route: + description: The destination to which the connection should + be forwarded to. + items: + properties: + destination: + description: REQUIRED. Destination uniquely identifies + the instances of a service to which the request/connection + should be forwarded to. + properties: + host: + description: 'REQUIRED. The name of a service + from the service registry. Service names are + looked up from the platform''s service registry + (e.g., Kubernetes services, Consul services, + etc.) and from the hosts declared by [ServiceEntry](#ServiceEntry). + Traffic forwarded to destinations that are + not found in either of the two, will be dropped. *Note + for Kubernetes users*: When short names are + used (e.g. "reviews" instead of "reviews.default.svc.cluster.local"), + Istio will interpret the short name based + on the namespace of the rule, not the service. + A rule in the "default" namespace containing + a host "reviews will be interpreted as "reviews.default.svc.cluster.local", + irrespective of the actual namespace associated + with the reviews service. _To avoid potential + misconfigurations, it is recommended to always + use fully qualified domain names over short + names._' + type: string + port: + description: Specifies the port on the host + that is being addressed. If a service exposes + only a single port it is not required to explicitly + select the port. + properties: + name: + description: Valid port name + type: string + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the + service. Applicable only to services within + the mesh. The subset must be defined in a + corresponding DestinationRule. + type: string + required: + - host + type: object + weight: + description: REQUIRED. The proportion of traffic + to be forwarded to the service version. (0-100). + Sum of weights across destinations SHOULD BE == + 100. If there is only destination in a rule, the + weight value is assumed to be 100. + format: int64 + type: integer + required: + - destination + - weight + type: object + type: array + required: + - match + - route + type: object + type: array + required: + - hosts + type: object + type: object + type: + description: Strategy type + type: string + type: object + status: + type: object + version: v1alpha2 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 52351df1e..69d3bc9b9 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -24,9 +24,9 @@ resources: # Comment the following 3 lines if you want to disable # the auth proxy (https://github.com/brancz/kube-rbac-proxy) # which protects your /metrics endpoint. -- ../rbac/auth_proxy_service.yaml -- ../rbac/auth_proxy_role.yaml -- ../rbac/auth_proxy_role_binding.yaml +#- ../rbac/auth_proxy_service.yaml +#- ../rbac/auth_proxy_role.yaml +#- ../rbac/auth_proxy_role_binding.yaml patches: - manager_image_patch.yaml diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index cbcc6d0f3..9b745f8e9 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 + image: quay.io/coreos/kube-rbac-proxy:v0.4.0 args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index fcbf39dc6..a81e2d323 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: IMAGE_URL + - image: kubespheredev/controller-manager:latest name: manager diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index b718bb4ec..e27902b1f 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -43,8 +43,8 @@ spec: spec: containers: - command: - - /manager - image: controller:latest + - ./controller-manager + image: kubespheredev/controller-manager:latest imagePullPolicy: Always name: manager env: diff --git a/config/rbac/rbac_role.yaml b/config/rbac/rbac_role.yaml new file mode 100644 index 000000000..0662cc129 --- /dev/null +++ b/config/rbac/rbac_role.yaml @@ -0,0 +1,87 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - networking.istio.io + resources: + - virtualservices + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - networking.istio.io + resources: + - virtualservices/status + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - servicemesh.kubesphere.io + resources: + - strategies + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - servicemesh.kubesphere.io + resources: + - strategies/status + verbs: + - get + - update + - patch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - update + - patch + - delete diff --git a/config/rbac/rbac_role_binding.yaml b/config/rbac/rbac_role_binding.yaml new file mode 100644 index 000000000..c1033e23f --- /dev/null +++ b/config/rbac/rbac_role_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/config/samples/servicemesh_v1alpha2_strategy.yaml b/config/samples/servicemesh_v1alpha2_strategy.yaml new file mode 100644 index 000000000..cfc60055c --- /dev/null +++ b/config/samples/servicemesh_v1alpha2_strategy.yaml @@ -0,0 +1,26 @@ +apiVersion: servicemesh.kubesphere.io/v1alpha2 +kind: Strategy +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: strategy-sample +spec: + # Add fields here + type: Canary + selector: + matchLabels: + "servicemesh.kubesphere.io/type": "canary" + template: + spec: + hosts: + - details + http: + - route: + - destination: + host: "details" + subset: v1 + weight: 60 + - destination: + host: "details" + subset: v2 + weight: 40 diff --git a/hack/custom-boilerplate.go.txt b/hack/custom-boilerplate.go.txt new file mode 100644 index 000000000..c7a8ca820 --- /dev/null +++ b/hack/custom-boilerplate.go.txt @@ -0,0 +1,16 @@ +/* +Copyright 2019 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + diff --git a/hack/gobuild.sh b/hack/gobuild.sh index f8f76c827..0622ec940 100755 --- a/hack/gobuild.sh +++ b/hack/gobuild.sh @@ -29,8 +29,8 @@ fi ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" -OUTPUT_DIR=${ROOTDIR}/bin -BUILDPATH=${ROOTDIR}/${1:?"path to build"} +OUTPUT_DIR=bin +BUILDPATH=./${1:?"path to build"} OUT=${OUTPUT_DIR}/${1:?"output path"} set -e @@ -42,4 +42,4 @@ GOBINARY=${GOBINARY:-go} # forgoing -i (incremental build) because it will be deprecated by tool chain. time GOOS=${BUILD_GOOS} GOARCH=${BUILD_GOARCH} ${GOBINARY} build \ -o ${OUT} \ - ${BUILDPATH} \ No newline at end of file + ${BUILDPATH} diff --git a/hack/install_kubebuilder.sh b/hack/install_kubebuilder.sh new file mode 100755 index 000000000..6445fbe9e --- /dev/null +++ b/hack/install_kubebuilder.sh @@ -0,0 +1,88 @@ +#!/bin/sh + +# +# This file will be fetched as: curl -L https://git.io/getLatestKubebuilder | sh - +# so it should be pure bourne shell, not bash (and not reference other scripts) +# +# The script fetches the latest kubebuilder release candidate and untars it. +# It lets users to do curl -L https://git.io//getLatestKubebuilder | KUBEBUILDER_VERSION=1.0.5 sh - +# for instance to change the version fetched. + +# Check if the program is installed, otherwise exit +function command_exists () { + if ! [ -x "$(command -v $1)" ]; then + echo "Error: $1 program is not installed." >&2 + exit 1 + fi +} + +# Determine OS +OS="$(uname)" +case $OS in + Darwin) + OSEXT="darwin" + ;; + Linux) + OSEXT="linux" + ;; + *) + echo "Only OSX and Linux OS are supported !" + exit 1 + ;; +esac + +HW=$(uname -m) +case $HW in + x86_64) + ARCH=amd64 ;; + *) + echo "Only x86_64 machines are supported !" + exit 1 + ;; +esac + +# Check if curl, tar commands/programs exist +command_exists curl +command_exists tar + +if [ "x${KUBEBUILDER_VERSION}" = "x" ] ; then + KUBEBUILDER_VERSION=$(curl -L -s https://api.github.com/repos/kubernetes-sigs/kubebuilder/releases/latest | \ + grep tag_name | sed "s/ *\"tag_name\": *\"\\(.*\\)\",*/\\1/") + if [ -z "$KUBEBUILDER_VERSION" ]; then + echo "\nUnable to fetch the latest version tag. This may be due to network access problem" + exit 0 + fi +fi + +KUBEBUILDER_VERSION=${KUBEBUILDER_VERSION#"v"} +KUBEBUILDER_VERSION_NAME="kubebuilder_${KUBEBUILDER_VERSION}" +KUBEBUILDER_DIR=/usr/local/kubebuilder + +# Check if folder containing kubebuilder executable exists and is not empty +if [ -d "$KUBEBUILDER_DIR" ]; then + if [ "$(ls -A $KUBEBUILDER_DIR)" ]; then + echo "\n/usr/local/kubebuilder folder is not empty. Please delete or backup it before to install ${KUBEBUILDER_VERSION_NAME}" + exit 1 + fi +fi + +TMP_DIR=$(mktemp -d) +pushd $TMP_DIR + +# Downloading Kubebuilder compressed file using curl program +URL="https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${KUBEBUILDER_VERSION}/${KUBEBUILDER_VERSION_NAME}_${OSEXT}_${ARCH}.tar.gz" +echo "Downloading ${KUBEBUILDER_VERSION_NAME}\nfrom $URL\n" +curl -L "$URL"| tar xz -C $TMP_DIR + +echo "Downloaded executable files" +ls "${KUBEBUILDER_VERSION_NAME}_${OSEXT}_${ARCH}/bin" + +echo "Moving files to $KUBEBUILDER_DIR folder\n" +mv ${KUBEBUILDER_VERSION_NAME}_${OSEXT}_${ARCH} kubebuilder && sudo mv -f kubebuilder /usr/local/ + +echo "Add kubebuilder to your path; e.g copy paste in your shell and/or edit your ~/.profile file" +echo "export PATH=\$PATH:/usr/local/kubebuilder/bin" +popd +rm -rf $TMP_DIR + +export PATH=$PATH:/usr/local/kubebuilder/bin diff --git a/pkg/apigateway/caddy-plugin/authenticate/authenticate.go b/pkg/apigateway/caddy-plugin/authenticate/authenticate.go index 2d00d6196..7bda950fe 100644 --- a/pkg/apigateway/caddy-plugin/authenticate/authenticate.go +++ b/pkg/apigateway/caddy-plugin/authenticate/authenticate.go @@ -20,6 +20,9 @@ package authenticate import ( "errors" "fmt" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/endpoints/request" "net/http" "strconv" "strings" @@ -46,6 +49,10 @@ type User struct { Extra *map[string]interface{} `json:"extra,omitempty"` } +var requestInfoFactory = request.RequestInfoFactory{ + APIPrefixes: sets.NewString("api", "apis"), + GrouplessAPIPrefixes: sets.NewString("api")} + func (h Auth) ServeHTTP(resp http.ResponseWriter, req *http.Request) (int, error) { for _, path := range h.Rule.ExceptedPath { if httpserver.Path(req.URL.Path).Matches(path) { @@ -91,10 +98,13 @@ func (h Auth) InjectContext(req *http.Request, token *jwt.Token) (*http.Request, } } + usr := &user.DefaultInfo{} + username, ok := payLoad["username"].(string) if ok && username != "" { req.Header.Set("X-Token-Username", username) + usr.Name = username } uid := payLoad["uid"] @@ -103,19 +113,38 @@ func (h Auth) InjectContext(req *http.Request, token *jwt.Token) (*http.Request, switch uid.(type) { case int: req.Header.Set("X-Token-UID", strconv.Itoa(uid.(int))) + usr.UID = strconv.Itoa(uid.(int)) break case string: req.Header.Set("X-Token-UID", uid.(string)) + usr.UID = uid.(string) break } } groups, ok := payLoad["groups"].([]string) - if ok && len(groups) > 0 { req.Header.Set("X-Token-Groups", strings.Join(groups, ",")) + usr.Groups = groups } + // hard code, support jenkins auth plugin + if httpserver.Path(req.URL.Path).Matches("/apis/jenkins.kubesphere.io") || httpserver.Path(req.URL.Path).Matches("job") { + req.SetBasicAuth(username, token.Raw) + } + + context := request.WithUser(req.Context(), usr) + + requestInfo, err := requestInfoFactory.NewRequestInfo(req) + + if err == nil { + context = request.WithRequestInfo(context, requestInfo) + } else { + return nil, err + } + + req = req.WithContext(context) + return req, nil } diff --git a/pkg/apigateway/caddy-plugin/authentication/authentication.go b/pkg/apigateway/caddy-plugin/authentication/authentication.go index 491502f58..fc46a8cc7 100644 --- a/pkg/apigateway/caddy-plugin/authentication/authentication.go +++ b/pkg/apigateway/caddy-plugin/authentication/authentication.go @@ -58,8 +58,9 @@ func (c Authentication) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, attrs, err := getAuthorizerAttributes(r.Context()) + // without authenticate, no requestInfo found in the context if err != nil { - return http.StatusInternalServerError, err + return c.Next.ServeHTTP(w, r) } permitted, err := permissionValidate(attrs) diff --git a/pkg/apis/addtoscheme_servicemesh_v1alpha2.go b/pkg/apis/addtoscheme_servicemesh_v1alpha2.go new file mode 100644 index 000000000..b6d708737 --- /dev/null +++ b/pkg/apis/addtoscheme_servicemesh_v1alpha2.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "github.com/knative/pkg/apis/istio/v1alpha3" + "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + + "sigs.k8s.io/application/pkg/apis/app/v1beta1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1alpha2.SchemeBuilder.AddToScheme) + + // Register networking.istio.io/v1alpha3 + AddToSchemes = append(AddToSchemes, v1alpha3.SchemeBuilder.AddToScheme) + + // Register application scheme + AddToSchemes = append(AddToSchemes, v1beta1.SchemeBuilder.AddToScheme) +} diff --git a/pkg/apis/iam/v1alpha2/register.go b/pkg/apis/iam/v1alpha2/register.go index 0e3f485dc..47578d8e3 100644 --- a/pkg/apis/iam/v1alpha2/register.go +++ b/pkg/apis/iam/v1alpha2/register.go @@ -20,6 +20,7 @@ package v1alpha2 import ( "github.com/emicklei/go-restful" "github.com/emicklei/go-restful-openapi" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" "kubesphere.io/kubesphere/pkg/apiserver/iam" "kubesphere.io/kubesphere/pkg/apiserver/runtime" @@ -209,6 +210,37 @@ func addWebService(c *restful.Container) error { Doc("Delete user from workspace"). Metadata(restfulspec.KeyOpenAPITags, tags)) + tags = []string{"unstable"} + + ws.Route(ws.GET("/workspaces/{workspace}/namespaces"). + To(iam.UserNamespaceListHandler). + Doc("Get namespace list"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes(models.PageableResponse{})) + ws.Route(ws.POST("/workspaces/{name}/namespaces"). + To(iam.NamespaceCreateHandler). + Doc("Create namespace"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes(v1.Namespace{})) + ws.Route(ws.DELETE("/workspaces/{name}/namespaces/{namespace}").To(iam.NamespaceDeleteHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{name}/namespaces/{namespace}").To(iam.NamespaceCheckHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/namespaces/{namespace}").To(iam.NamespaceCheckHandler)) + + // TODO move to /apis/resources.kubesphere.io/workspaces/{workspace}/members/{username} + ws.Route(ws.GET("/workspaces/{workspace}/members/{username}/namespaces").To(iam.NamespacesListHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{name}/members/{username}/devops").To(iam.DevOpsProjectHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) + // TODO /workspaces/{name}/roles/{role} + ws.Route(ws.GET("/workspaces/{name}/roles/{role}").To(iam.WorkspaceRoles).Metadata(restfulspec.KeyOpenAPITags, tags)) + // TODO move to /apis/resources.kubesphere.io/devops + ws.Route(ws.GET("/workspaces/{name}/devops").To(iam.DevOpsProjectHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.POST("/workspaces/{name}/devops").To(iam.DevOpsProjectCreateHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.DELETE("/workspaces/{name}/devops/{id}").To(iam.DevOpsProjectDeleteHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) + + // TODO merge into /groups + ws.Route(ws.GET("/groups/count").To(iam.CountHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/users/{name}/namespaces").To(iam.NamespacesListHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) + c.Add(ws) return nil } diff --git a/pkg/apis/resources/v1alpha2/register.go b/pkg/apis/resources/v1alpha2/register.go index ec112c68f..56726c1ff 100644 --- a/pkg/apis/resources/v1alpha2/register.go +++ b/pkg/apis/resources/v1alpha2/register.go @@ -83,13 +83,29 @@ func addWebService(c *restful.Container) error { DataFormat("limit=%d,page=%d"). DefaultValue("limit=10,page=1")) + tags = []string{"Applications"} + + webservice.Route(webservice.GET("/applications"). + To(resources.ApplicationHandler). + Writes(models.PageableResponse{}). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Cluster level resource query"). + Param(webservice.QueryParameter(params.ConditionsParam, "query conditions"). + Required(false). + DataFormat("key=value,key~value"). + DefaultValue("")). + Param(webservice.QueryParameter("cluster_id", "cluster id")). + Param(webservice.QueryParameter("runtime_id", "runtime id")). + Param(webservice.QueryParameter(params.PagingParam, "page"). + Required(false). + DataFormat("limit=%d,page=%d"). + DefaultValue("limit=10,page=1"))) + webservice.Route(webservice.GET("/storageclasses/{storageclass}/persistentvolumeclaims"). To(resources.GetPvcListBySc). Doc("get user's kubectl pod"). Param(webservice.PathParameter("username", "username")). Metadata(restfulspec.KeyOpenAPITags, tags)) - webservice.Route(webservice.GET("/namespaces/{namespace}/persistentvolumeclaims/{pvc}/pods"). - To(resources.GetPodListByPvc)) tags = []string{"User resources"} diff --git a/pkg/apis/servicemesh/group.go b/pkg/apis/servicemesh/group.go new file mode 100644 index 000000000..5e243b1fa --- /dev/null +++ b/pkg/apis/servicemesh/group.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package servicemesh contains servicemesh API versions +package servicemesh diff --git a/pkg/apis/servicemesh/metrics/install/install.go b/pkg/apis/servicemesh/metrics/install/install.go new file mode 100644 index 000000000..3e6563e53 --- /dev/null +++ b/pkg/apis/servicemesh/metrics/install/install.go @@ -0,0 +1,16 @@ +package install + +import ( + "github.com/emicklei/go-restful" + urlruntime "k8s.io/apimachinery/pkg/util/runtime" + "kubesphere.io/kubesphere/pkg/apis/servicemesh/metrics/v1alpha2" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" +) + +func init() { + Install(runtime.Container) +} + +func Install(c *restful.Container) { + urlruntime.Must(v1alpha2.AddToContainer(c)) +} diff --git a/pkg/apis/servicemesh/metrics/v1alpha2/register.go b/pkg/apis/servicemesh/metrics/v1alpha2/register.go new file mode 100644 index 000000000..8ffdc18aa --- /dev/null +++ b/pkg/apis/servicemesh/metrics/v1alpha2/register.go @@ -0,0 +1,132 @@ +package v1alpha2 + +import ( + "github.com/emicklei/go-restful" + "github.com/emicklei/go-restful-openapi" + "k8s.io/apimachinery/pkg/runtime/schema" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" + "kubesphere.io/kubesphere/pkg/apiserver/servicemesh/metrics" + "kubesphere.io/kubesphere/pkg/errors" +) + +const GroupName = "servicemesh.kubesphere.io" + +var GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +var ( + WebServiceBuilder = runtime.NewContainerBuilder(addWebService) + AddToContainer = WebServiceBuilder.AddToContainer +) + +func addWebService(c *restful.Container) error { + + tags := []string{"ServiceMesh"} + + webservice := runtime.NewWebService(GroupVersion) + + // Get service metrics + // GET /namespaces/{namespace}/services/{service}/metrics + webservice.Route(webservice.GET("/namespaces/{namespace}/services/{service}/metrics"). + To(metrics.GetServiceMetrics). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Get app metrics from a specific namespace"). + Param(webservice.PathParameter("namespace", "name of the namespace")). + Param(webservice.PathParameter("service", "name of the service")). + Param(webservice.QueryParameter("filters[]", "type of metrics type, e.g. request_count, request_duration, request_error_count")). + Param(webservice.QueryParameter("queryTime", "from which UNIX time to extract metrics")). + Param(webservice.QueryParameter("duration", "metrics duration, in seconds")). + Param(webservice.QueryParameter("step", "metrics step")). + Param(webservice.QueryParameter("rateInterval", "metrics rate intervals, e.g. 20s")). + Param(webservice.QueryParameter("quantiles[]", "metrics quantiles, 0.5, 0.9, 0.99")). + Param(webservice.QueryParameter("byLabels[]", "by which labels to group node, e.g. source_workload, destination_service_name")). + Param(webservice.QueryParameter("requestProtocol", "request protocol, http/tcp")). + Param(webservice.QueryParameter("reporter", "destination")). + Writes(errors.Error{})).Produces(restful.MIME_JSON) + + // Get app metrics + // Get /namespaces/{namespace}/apps/{app}/metrics + webservice.Route(webservice.GET("/namespaces/{namespace}/apps/{app}/metrics"). + To(metrics.GetAppMetrics). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Get app metrics from a specific namespace"). + Param(webservice.PathParameter("namespace", "name of the namespace")). + Param(webservice.PathParameter("app", "name of the workload label app value")). + Param(webservice.QueryParameter("filters[]", "type of metrics type, e.g. request_count, request_duration, request_error_count")). + Param(webservice.QueryParameter("queryTime", "from which UNIX time to extract metrics")). + Param(webservice.QueryParameter("duration", "metrics duration, in seconds")). + Param(webservice.QueryParameter("step", "metrics step")). + Param(webservice.QueryParameter("rateInterval", "metrics rate intervals, e.g. 20s")). + Param(webservice.QueryParameter("quantiles[]", "metrics quantiles, 0.5, 0.9, 0.99")). + Param(webservice.QueryParameter("byLabels[]", "by which labels to group node, e.g. source_workload, destination_service_name")). + Param(webservice.QueryParameter("requestProtocol", "request protocol, http/tcp")). + Param(webservice.QueryParameter("reporter", "destination")). + Writes(errors.Error{})).Produces(restful.MIME_JSON) + + // Get workload metrics + // Get /namespaces/{namespace}/workloads/{workload}/metrics + webservice.Route(webservice.GET("/namespaces/{namespace}/workloads/{workload}/metrics"). + To(metrics.GetWorkloadMetrics). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Get workload metrics from a specific namespace"). + Param(webservice.PathParameter("namespace", "name of the namespace")). + Param(webservice.PathParameter("workload", "name of the workload")). + Param(webservice.QueryParameter("filters[]", "type of metrics type, e.g. request_count, request_duration, request_error_count")). + Param(webservice.QueryParameter("queryTime", "from which UNIX time to extract metrics")). + Param(webservice.QueryParameter("duration", "metrics duration, in seconds")). + Param(webservice.QueryParameter("step", "metrics step")). + Param(webservice.QueryParameter("rateInterval", "metrics rate intervals, e.g. 20s")). + Param(webservice.QueryParameter("quantiles[]", "metrics quantiles, 0.5, 0.9, 0.99")). + Param(webservice.QueryParameter("byLabels[]", "by which labels to group node, e.g. source_workload, destination_service_name")). + Param(webservice.QueryParameter("requestProtocol", "request protocol, http/tcp")). + Param(webservice.QueryParameter("reporter", "destination")). + Writes(errors.Error{})).Produces(restful.MIME_JSON) + + // Get namespace metrics + // Get /namespaces/{namespace}/metrics + webservice.Route(webservice.GET("/namespaces/{namespace}/metrics"). + To(metrics.GetNamespaceMetrics). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Get workload metrics from a specific namespace"). + Param(webservice.PathParameter("namespace", "name of the namespace")). + Param(webservice.QueryParameter("filters[]", "type of metrics type, e.g. request_count, request_duration, request_error_count")). + Param(webservice.QueryParameter("queryTime", "from which UNIX time to extract metrics")). + Param(webservice.QueryParameter("duration", "metrics duration, in seconds")). + Param(webservice.QueryParameter("step", "metrics step")). + Param(webservice.QueryParameter("rateInterval", "metrics rate intervals, e.g. 20s")). + Param(webservice.QueryParameter("quantiles[]", "metrics quantiles, 0.5, 0.9, 0.99")). + Param(webservice.QueryParameter("byLabels[]", "by which labels to group node, e.g. source_workload, destination_service_name")). + Param(webservice.QueryParameter("requestProtocol", "request protocol, http/tcp")). + Param(webservice.QueryParameter("reporter", "destination")). + Writes(errors.Error{})).Produces(restful.MIME_JSON) + + // Get namespace graph + // Get /namespaces/{namespace}/graph + webservice.Route(webservice.GET("/namespaces/{namespace}/graph"). + To(metrics.GetNamespaceGraph). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Get service graph for a specific namespace"). + Param(webservice.PathParameter("namespace", "name of a namespace")). + Param(webservice.QueryParameter("graphType", "type of the generated service graph, eg. ")). + Param(webservice.QueryParameter("groupBy", "group nodes by kind")). + Param(webservice.QueryParameter("queryTime", "from which time point, default now")). + Param(webservice.QueryParameter("injectServiceNodes", "whether to inject service ndoes")). + Writes(errors.Error{})).Produces(restful.MIME_JSON) + + // Get namespaces graph, for multiple namespaces + // Get /namespaces/graph + webservice.Route(webservice.GET("/namespaces/{namespace}/graph"). + To(metrics.GetNamespacesGraph). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Get service graph for a specific namespace"). + Param(webservice.PathParameter("namespace", "name of a namespace")). + Param(webservice.QueryParameter("graphType", "type of the generated service graph, eg. ")). + Param(webservice.QueryParameter("groupBy", "group nodes by kind")). + Param(webservice.QueryParameter("queryTime", "from which time point, default now")). + Param(webservice.QueryParameter("injectServiceNodes", "whether to inject service ndoes")). + Param(webservice.QueryParameter("namespaces", "names of namespaces")). + Writes(errors.Error{})).Produces(restful.MIME_JSON) + + c.Add(webservice) + + return nil +} diff --git a/pkg/apis/servicemesh/v1alpha2/doc.go b/pkg/apis/servicemesh/v1alpha2/doc.go new file mode 100644 index 000000000..ac654f02a --- /dev/null +++ b/pkg/apis/servicemesh/v1alpha2/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha2 contains API Schema definitions for the servicemesh v1alpha2 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/servicemesh +// +k8s:defaulter-gen=TypeMeta +// +groupName=servicemesh.kubesphere.io +package v1alpha2 diff --git a/pkg/apis/servicemesh/v1alpha2/register.go b/pkg/apis/servicemesh/v1alpha2/register.go new file mode 100644 index 000000000..88195f2df --- /dev/null +++ b/pkg/apis/servicemesh/v1alpha2/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha2 contains API Schema definitions for the servicemesh v1alpha2 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/servicemesh +// +k8s:defaulter-gen=TypeMeta +// +groupName=servicemesh.kubesphere.io +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "servicemesh.kubesphere.io", Version: "v1alpha2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/pkg/apis/servicemesh/v1alpha2/strategy_types.go b/pkg/apis/servicemesh/v1alpha2/strategy_types.go new file mode 100644 index 000000000..41f90fd7c --- /dev/null +++ b/pkg/apis/servicemesh/v1alpha2/strategy_types.go @@ -0,0 +1,151 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +type StrategyType string + +const ( + + // Canary strategy type + CanaryType StrategyType = "Canary" + + // BlueGreen strategy type + BlueGreenType StrategyType = "BlueGreen" + + // Mirror strategy type + Mirror StrategyType = "Mirror" +) + +// StrategySpec defines the desired state of Strategy +type StrategySpec struct { + + // Strategy type + Type StrategyType `json:"type,omitempty"` + + // Label selector for virtual services. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` + + // Template describes the virtual service that will be created. + Template VirtualServiceTemplateSpec `json:"template,omitempty"` + + // Indicates that the strategy is paused and will not be processed + // by the strategy controller + Paused bool `json:"paused,omitempty"` +} + +// VirtualServiceTemplateSpec +type VirtualServiceTemplateSpec struct { + + // Metadata of the virtual services created from this template + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec indicates the behavior of a virtual service. + // +optional + Spec v1alpha3.VirtualServiceSpec `json:"spec,omitempty"` +} + +// StrategyStatus defines the observed state of Strategy +type StrategyStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // The latest available observations of an object's current state. + // +optional + Conditions []StrategyCondition + + // Represents time when the strategy was acknowledged by the controller. + // It is represented in RFC3339 form and is in UTC. + // +optional + StartTime *metav1.Time + + // Represents time when the strategy was completed. + // It is represented in RFC3339 form and is in UTC. + // +optional + CompletionTime *metav1.Time +} + +type StrategyConditionType string + +// These are valid conditions of a strategy. +const ( + // StrategyComplete means the strategy has been delivered to istio. + StrategyComplete StrategyConditionType = "Complete" + + // StrategyFailed means the strategy has failed its delivery to istio. + StrategyFailed StrategyConditionType = "Failed" +) + +// StrategyCondition describes current state of a strategy. +type StrategyCondition struct { + // Type of strategy condition, Complete or Failed. + Type StrategyConditionType + + // Status of the condition, one of True, False, Unknown + Status apiextensions.ConditionStatus + + // Last time the condition was checked. + // +optional + LastProbeTime metav1.Time + + // Last time the condition transit from one status to another + // +optional + LastTransitionTime metav1.Time + + // reason for the condition's last transition + Reason string + + // Human readable message indicating details about last transition. + // +optinal + Message string +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Strategy is the Schema for the strategies API +// +k8s:openapi-gen=true +type Strategy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StrategySpec `json:"spec,omitempty"` + Status StrategyStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StrategyList contains a list of Strategy +type StrategyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Strategy `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Strategy{}, &StrategyList{}) +} diff --git a/pkg/apis/servicemesh/v1alpha2/strategy_types_test.go b/pkg/apis/servicemesh/v1alpha2/strategy_types_test.go new file mode 100644 index 000000000..eb728fb0c --- /dev/null +++ b/pkg/apis/servicemesh/v1alpha2/strategy_types_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "github.com/knative/pkg/apis/istio/v1alpha3" + "io/ioutil" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/kubernetes/scheme" + "testing" + + "github.com/onsi/gomega" + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestStorageStrategy(t *testing.T) { + key := types.NamespacedName{ + Name: "foo", + Namespace: "default", + } + created := &Strategy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: StrategySpec{ + Template: VirtualServiceTemplateSpec{ + Spec: v1alpha3.VirtualServiceSpec{ + Hosts: []string{ + "details", + }, + }, + }, + }, + } + g := gomega.NewGomegaWithT(t) + + // Test Create + fetched := &Strategy{} + g.Expect(c.Create(context.TODO(), created)).NotTo(gomega.HaveOccurred()) + + g.Expect(c.Get(context.TODO(), key, fetched)).NotTo(gomega.HaveOccurred()) + g.Expect(fetched).To(gomega.Equal(created)) + + // Test Updating the Labels + updated := fetched.DeepCopy() + updated.Labels = map[string]string{"hello": "world"} + g.Expect(c.Update(context.TODO(), updated)).NotTo(gomega.HaveOccurred()) + + g.Expect(c.Get(context.TODO(), key, fetched)).NotTo(gomega.HaveOccurred()) + g.Expect(fetched).To(gomega.Equal(updated)) + + // Test Delete + g.Expect(c.Delete(context.TODO(), fetched)).NotTo(gomega.HaveOccurred()) + g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.HaveOccurred()) +} + +func TestStrategyRead(t *testing.T) { + + //g := gomega.NewGomegaWithT(t) + + var str []byte + file, err := ioutil.ReadFile("/Users/zry/go/src/kubesphere.io/kubesphere/config/samples/servicemesh_v1alpha2_strategy.yaml") + if err == nil { + obj, _, _ := scheme.Codecs.UniversalDeserializer().Decode(file, nil, &Strategy{}) + switch obj.(type) { + case *Strategy: + str, err = json.Marshal(obj) + t.Logf("Read strategy %s", str) + } + } +} diff --git a/pkg/apis/servicemesh/v1alpha2/v1alpha2_suite_test.go b/pkg/apis/servicemesh/v1alpha2/v1alpha2_suite_test.go new file mode 100644 index 000000000..17192bd5f --- /dev/null +++ b/pkg/apis/servicemesh/v1alpha2/v1alpha2_suite_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "log" + "os" + "path/filepath" + "testing" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" +) + +var cfg *rest.Config +var c client.Client + +func TestMain(m *testing.M) { + t := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crds")}, + } + + err := SchemeBuilder.AddToScheme(scheme.Scheme) + if err != nil { + log.Fatal(err) + } + + if cfg, err = t.Start(); err != nil { + log.Fatal(err) + } + + if c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}); err != nil { + log.Fatal(err) + } + + code := m.Run() + t.Stop() + os.Exit(code) +} diff --git a/pkg/apis/servicemesh/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/servicemesh/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..02ce246fe --- /dev/null +++ b/pkg/apis/servicemesh/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,176 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Strategy) DeepCopyInto(out *Strategy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Strategy. +func (in *Strategy) DeepCopy() *Strategy { + if in == nil { + return nil + } + out := new(Strategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Strategy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategyCondition) DeepCopyInto(out *StrategyCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyCondition. +func (in *StrategyCondition) DeepCopy() *StrategyCondition { + if in == nil { + return nil + } + out := new(StrategyCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategyList) DeepCopyInto(out *StrategyList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Strategy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList. +func (in *StrategyList) DeepCopy() *StrategyList { + if in == nil { + return nil + } + out := new(StrategyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StrategyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategySpec) DeepCopyInto(out *StrategySpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategySpec. +func (in *StrategySpec) DeepCopy() *StrategySpec { + if in == nil { + return nil + } + out := new(StrategySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategyStatus) DeepCopyInto(out *StrategyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StrategyCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyStatus. +func (in *StrategyStatus) DeepCopy() *StrategyStatus { + if in == nil { + return nil + } + out := new(StrategyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceTemplateSpec) DeepCopyInto(out *VirtualServiceTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceTemplateSpec. +func (in *VirtualServiceTemplateSpec) DeepCopy() *VirtualServiceTemplateSpec { + if in == nil { + return nil + } + out := new(VirtualServiceTemplateSpec) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apiserver/iam/users.go b/pkg/apiserver/iam/users.go index a3131ffdc..c9ed8e3aa 100644 --- a/pkg/apiserver/iam/users.go +++ b/pkg/apiserver/iam/users.go @@ -66,6 +66,10 @@ func CreateUser(req *restful.Request, resp *restful.Response) { err = iam.CreateUser(user) if err != nil { + if ldap.IsErrorWithCode(err, ldap.LDAPResultEntryAlreadyExists) { + resp.WriteHeaderAndEntity(http.StatusConflict, errors.Wrap(err)) + return + } resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } @@ -180,7 +184,7 @@ func CurrentUserDetail(req *restful.Request, resp *restful.Response) { if err != nil { if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { - resp.WriteHeaderAndEntity(http.StatusForbidden, errors.Wrap(err)) + resp.WriteHeaderAndEntity(http.StatusNotFound, errors.Wrap(err)) } else { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) } @@ -228,6 +232,12 @@ func NamespacesListHandler(req *restful.Request, resp *restful.Response) { func UserDetail(req *restful.Request, resp *restful.Response) { username := req.PathParameter("name") + usernameFromHeader := req.HeaderParameter(constants.UserNameHeader) + + if username == usernameFromHeader { + CurrentUserDetail(req, resp) + return + } conn, err := ldapclient.Client() diff --git a/pkg/apiserver/iam/workspaces.go b/pkg/apiserver/iam/workspaces.go index 033757bbc..0229f1b42 100644 --- a/pkg/apiserver/iam/workspaces.go +++ b/pkg/apiserver/iam/workspaces.go @@ -29,8 +29,6 @@ import ( "github.com/emicklei/go-restful" "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" - apierror "k8s.io/apimachinery/pkg/api/errors" - "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models" @@ -64,20 +62,6 @@ func WorkspaceRoles(req *restful.Request, resp *restful.Response) { resp.WriteAsJson(roles) } -func WorkspaceMemberQuery(req *restful.Request, resp *restful.Response) { - workspace := req.PathParameter("name") - keyword := req.QueryParameter("keyword") - - users, err := workspaces.GetWorkspaceMembers(workspace, keyword) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(users) -} - func WorkspaceMemberDetail(req *restful.Request, resp *restful.Response) { workspace := req.PathParameter("name") username := req.PathParameter("member") @@ -559,26 +543,10 @@ func DevopsRulesHandler(req *restful.Request, resp *restful.Response) { } func NamespacesRulesHandler(req *restful.Request, resp *restful.Response) { - workspaceName := req.PathParameter("workspace") + //workspaceName := req.PathParameter("workspace") username := req.HeaderParameter(constants.UserNameHeader) namespaceName := req.PathParameter("namespace") - namespace, err := iam.GetNamespace(namespaceName) - - if err != nil { - if apierror.IsNotFound(err) { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("permission undefined"))) - } else { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) - } - return - } - - if namespace.Labels == nil || namespace.Labels["kubesphere.io/workspace"] != workspaceName { - resp.WriteHeaderAndEntity(http.StatusForbidden, errors.Wrap(fmt.Errorf("permission undefined"))) - return - } - clusterRoles, err := iam.GetClusterRoles(username) if err != nil { diff --git a/pkg/apiserver/resources/application.go b/pkg/apiserver/resources/application.go new file mode 100644 index 000000000..6a921f16b --- /dev/null +++ b/pkg/apiserver/resources/application.go @@ -0,0 +1,58 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package resources + +import ( + "github.com/emicklei/go-restful" + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models/applications" + "kubesphere.io/kubesphere/pkg/params" + "net/http" +) + +func ApplicationHandler(req *restful.Request, resp *restful.Response) { + limit, offset := params.ParsePaging(req) + clusterId := req.QueryParameter("cluster_id") + runtimeId := req.QueryParameter("runtime_id") + conditions, err := params.ParseConditions(req) + if err != nil { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + } + if len(clusterId) > 0 { + app, err := applications.GetApp(clusterId) + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + resp.WriteEntity(app) + return + } + + result, err := applications.ListApplication(runtimeId, conditions, limit, offset) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(result) + +} diff --git a/pkg/apiserver/servicemesh/metrics/handlers.go b/pkg/apiserver/servicemesh/metrics/handlers.go new file mode 100644 index 000000000..8d5dce012 --- /dev/null +++ b/pkg/apiserver/servicemesh/metrics/handlers.go @@ -0,0 +1,43 @@ +package metrics + +import ( + "fmt" + "github.com/emicklei/go-restful" + "github.com/kiali/kiali/handlers" +) + +// Get app metrics +func GetAppMetrics(request *restful.Request, response *restful.Response) { + handlers.AppMetrics(response.ResponseWriter, request.Request) +} + +// Get workload metrics +func GetWorkloadMetrics(request *restful.Request, response *restful.Response) { + handlers.WorkloadMetrics(response.ResponseWriter, request.Request) +} + +// Get service metrics +func GetServiceMetrics(request *restful.Request, response *restful.Response) { + handlers.ServiceMetrics(response.ResponseWriter, request.Request) +} + +// Get namespace metrics +func GetNamespaceMetrics(request *restful.Request, response *restful.Response) { + handlers.NamespaceMetrics(response.ResponseWriter, request.Request) +} + +// Get service graph for namespace +func GetNamespaceGraph(request *restful.Request, response *restful.Response) { + namespace := request.PathParameter("namespace") + + if len(namespace) > 0 { + request.Request.URL.RawQuery = fmt.Sprintf("%s&namespaces=%s", request.Request.URL.RawQuery, namespace) + } + + handlers.GraphNamespaces(response.ResponseWriter, request.Request) +} + +// Get service graph for namespaces +func GetNamespacesGraph(request *restful.Request, response *restful.Response) { + handlers.GraphNamespaces(response.ResponseWriter, request.Request) +} diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index eb8f529b0..40b7d54b4 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -39,18 +39,12 @@ const ( DevopsOwner = "owner" DevopsReporter = "reporter" - envDevopsAPIServer = "DEVOPS_API_SERVER" - envAccountAPIServer = "ACCOUNT_API_SERVER" - envDevopsProxyToken = "DEVOPS_PROXY_TOKEN" - envOpenPitrixProxyToken = "OPENPITRIX_PROXY_TOKEN" - UserNameHeader = "X-Token-Username" ) var ( WorkSpaceRoles = []string{WorkspaceAdmin, WorkspaceRegular, WorkspaceViewer} SystemWorkspace = "system-workspace" - DevopsAPIServer = "ks-devops-apiserver.kubesphere-system.svc" - AccountAPIServer = "ks-account.kubesphere-system.svc" + DevopsAPIServer = "ks-devops.kubesphere-devops-system.svc" SystemNamespaces = []string{KubeSphereNamespace, OpenPitrixNamespace, KubeSystemNamespace} ) diff --git a/pkg/controller/add_strategy.go b/pkg/controller/add_strategy.go new file mode 100644 index 000000000..29e97c529 --- /dev/null +++ b/pkg/controller/add_strategy.go @@ -0,0 +1,30 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "kubesphere.io/kubesphere/pkg/controller/strategy" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, strategy.Add) + + // Add application to manager functions + //AddToManagerFuncs = append(AddToManagerFuncs, application.Add) + +} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 6af88cab2..80e89c7a4 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2019 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/controller/strategy/strategy_controller.go b/pkg/controller/strategy/strategy_controller.go new file mode 100644 index 000000000..ce506ef79 --- /dev/null +++ b/pkg/controller/strategy/strategy_controller.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategy + +import ( + "context" + "github.com/knative/pkg/apis/istio/v1alpha3" + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var log = logf.Log.WithName("controller") + +// Add creates a new Strategy Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileStrategy{Client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("strategy-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to Strategy + err = c.Watch(&source.Kind{Type: &servicemeshv1alpha2.Strategy{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + err = c.Watch(&source.Kind{Type: &v1alpha3.VirtualService{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + // TODO(user): Modify this to be the types you create + // Watch a VirtualService created by Strategy + err = c.Watch(&source.Kind{Type: &v1alpha3.VirtualService{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: &servicemeshv1alpha2.Strategy{}, + }) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileStrategy{} + +// ReconcileStrategy reconciles a Strategy object +type ReconcileStrategy struct { + client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a Strategy object and makes changes based on the state read +// and what is in the Strategy.Spec +// a Deployment as an example +// Automatically generate RBAC rules to allow the Controller to read and write Deployments +// +kubebuilder:rbac:groups=networking.istio.io,resources=virtualservices,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.istio.io,resources=virtualservices/status,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=servicemesh.kubesphere.io,resources=strategies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=servicemesh.kubesphere.io,resources=strategies/status,verbs=get;update;patch +func (r *ReconcileStrategy) Reconcile(request reconcile.Request) (reconcile.Result, error) { + // Fetch the Strategy instance + strategy := &servicemeshv1alpha2.Strategy{} + err := r.Get(context.TODO(), request.NamespacedName, strategy) + if err != nil { + if errors.IsNotFound(err) { + // Object not found, return. Created objects are automatically garbage collected. + // For additional cleanup logic use finalizers. + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + // Define VirtualService to be created + vs := &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: strategy.Name + "-virtualservice", + Namespace: strategy.Namespace, + Labels: strategy.Spec.Selector.MatchLabels, + }, + Spec: strategy.Spec.Template.Spec, + } + + if err := controllerutil.SetControllerReference(strategy, vs, r.scheme); err != nil { + return reconcile.Result{}, err + } + + // Check if the VirtualService already exists + found := &v1alpha3.VirtualService{} + err = r.Get(context.TODO(), types.NamespacedName{Name: vs.Name, Namespace: vs.Namespace}, found) + if err != nil && errors.IsNotFound(err) { + log.Info("Creating VirtualService", "namespace", vs.Namespace, "name", vs.Name) + err = r.Create(context.TODO(), vs) + return reconcile.Result{}, err + } else if err != nil { + return reconcile.Result{}, err + } + + // Update the found object and write the result back if there are any changes + if !reflect.DeepEqual(vs.Spec, found.Spec) { + found.Spec = vs.Spec + log.Info("Updating VirtualService", "namespace", vs.Namespace, "name", vs.Name) + err = r.Update(context.TODO(), found) + if err != nil { + return reconcile.Result{}, err + } + } + return reconcile.Result{}, nil +} diff --git a/pkg/controller/strategy/strategy_controller_suite_test.go b/pkg/controller/strategy/strategy_controller_suite_test.go new file mode 100644 index 000000000..546e3b9a0 --- /dev/null +++ b/pkg/controller/strategy/strategy_controller_suite_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategy + +import ( + stdlog "log" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "kubesphere.io/kubesphere/pkg/apis" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var cfg *rest.Config + +func TestMain(m *testing.M) { + t := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, + } + apis.AddToScheme(scheme.Scheme) + + var err error + if cfg, err = t.Start(); err != nil { + stdlog.Fatal(err) + } + + code := m.Run() + t.Stop() + os.Exit(code) +} + +// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and +// writes the request to requests after Reconcile is finished. +func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { + requests := make(chan reconcile.Request) + fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { + result, err := inner.Reconcile(req) + requests <- req + return result, err + }) + return fn, requests +} + +// StartTestManager adds recFn +func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) { + stop := make(chan struct{}) + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred()) + }() + return stop, wg +} diff --git a/pkg/controller/strategy/strategy_controller_test.go b/pkg/controller/strategy/strategy_controller_test.go new file mode 100644 index 000000000..dd4b2f714 --- /dev/null +++ b/pkg/controller/strategy/strategy_controller_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategy + +import ( + "github.com/knative/pkg/apis/istio/common/v1alpha1" + "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/apimachinery/pkg/util/json" + "testing" + "time" + + "github.com/onsi/gomega" + "golang.org/x/net/context" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var c client.Client + +var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}} +var depKey = types.NamespacedName{Name: "foo-virtualservice", Namespace: "default"} + +const timeout = time.Second * 5 + +func TestReconcile(t *testing.T) { + g := gomega.NewGomegaWithT(t) + instance := &servicemeshv1alpha2.Strategy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: servicemeshv1alpha2.StrategySpec{ + Type: servicemeshv1alpha2.CanaryType, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "type": "Canary", + }, + }, + Template: servicemeshv1alpha2.VirtualServiceTemplateSpec{ + Spec: v1alpha3.VirtualServiceSpec{ + Hosts: []string{"details"}, + Gateways: []string{"default"}, + Http: []v1alpha3.HTTPRoute{ + { + Match: []v1alpha3.HTTPMatchRequest{ + { + Method: &v1alpha1.StringMatch{ + Exact: "POST", + }, + }, + }, + Route: []v1alpha3.DestinationWeight{ + { + Destination: v1alpha3.Destination{ + Host: "details", + Subset: "v1", + }, + Weight: 60, + }, + }, + }, + { + Route: []v1alpha3.DestinationWeight{ + { + Destination: v1alpha3.Destination{ + Host: "details", + Subset: "v2", + }, + Weight: 40, + }, + }, + }, + }, + }, + }, + }, + } + + // Setup the Manager and Controller. Wrap the Controller Reconcile function so it writes each request to a + // channel when it is finished. + mgr, err := manager.New(cfg, manager.Options{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + c = mgr.GetClient() + + recFn, requests := SetupTestReconcile(newReconciler(mgr)) + g.Expect(add(mgr, recFn)).NotTo(gomega.HaveOccurred()) + + stopMgr, mgrStopped := StartTestManager(mgr, g) + + defer func() { + close(stopMgr) + mgrStopped.Wait() + }() + + // Create the Strategy object and expect the Reconcile and Deployment to be created + err = c.Create(context.TODO(), instance) + // The instance object may not be a valid object because it might be missing some required fields. + // Please modify the instance object by adding required fields and then remove the following if statement. + if apierrors.IsInvalid(err) { + t.Logf("failed to create object, got an invalid object error: %v", err) + return + } + g.Expect(err).NotTo(gomega.HaveOccurred()) + defer c.Delete(context.TODO(), instance) + g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest))) + + vs := &v1alpha3.VirtualService{} + g.Eventually(func() error { return c.Get(context.TODO(), depKey, vs) }, timeout). + Should(gomega.Succeed()) + + if str, err := json.Marshal(vs); err == nil { + t.Logf("Created virtual service %s\n", str) + } + + // Delete the Deployment and expect Reconcile to be called for Deployment deletion + g.Expect(c.Delete(context.TODO(), vs)).NotTo(gomega.HaveOccurred()) + g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest))) + //g.Eventually(func() error { return c.Get(context.TODO(), depKey, vs) }, timeout).Should(gomega.Succeed()) + + // Manually delete Deployment since GC isn't enabled in the test control plane + g.Eventually(func() error { return c.Delete(context.TODO(), vs) }, timeout). + Should(gomega.MatchError("virtualservices.networking.istio.io \"foo-virtualservice\" not found")) + +} diff --git a/pkg/models/applications/applications.go b/pkg/models/applications/applications.go new file mode 100644 index 000000000..01e1384b1 --- /dev/null +++ b/pkg/models/applications/applications.go @@ -0,0 +1,537 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package applications + +import ( + "encoding/json" + "fmt" + "github.com/golang/glog" + "io/ioutil" + v12 "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/models/resources" + "kubesphere.io/kubesphere/pkg/params" + "kubesphere.io/kubesphere/pkg/simple/client/k8s" + "net/http" + "strconv" + "strings" + "time" +) + +var ( + OpenPitrixProxyToken string + OpenPitrixServer string +) + +const ( + unknown = "-" + deploySuffix = "-Deployment" + daemonSuffix = "-DaemonSet" + stateSuffix = "-StatefulSet" +) + +type Application struct { + Name string `json:"name"` + RepoName string `json:"repoName"` + Runtime string `json:"namespace"` + RuntimeId string `json:"runtime_id"` + Version string `json:"version"` + VersionId string `json:"version_id"` + Status string `json:"status"` + UpdateTime time.Time `json:"updateTime"` + CreateTime time.Time `json:"createTime"` + App string `json:"app"` + AppId string `json:"app_id"` + Description string `json:"description,omitempty"` + WorkLoads *workLoads `json:"workloads,omitempty"` + Services []v1.Service `json:"services,omitempty"` + Ingresses []v1beta1.Ingress `json:"ingresses,omitempty"` + ClusterID string `json:"cluster_id"` +} + +type clusterRole struct { + ClusterID string `json:"cluster_id"` + Role string `json:"role"` +} + +type cluster struct { + ClusterID string `json:"cluster_id"` + Name string `json:"name"` + AppID string `json:"app_id"` + VersionID string `json:"version_id"` + Status string `json:"status"` + UpdateTime time.Time `json:"status_time"` + CreateTime time.Time `json:"create_time"` + RunTimeId string `json:"runtime_id"` + Description string `json:"description"` + ClusterRoleSets []clusterRole `json:"cluster_role_set"` +} + +type clusters struct { + Total int `json:"total_count"` + Clusters []cluster `json:"cluster_set"` +} + +type versionList struct { + Total int `json:"total_count"` + Versions []version `json:"app_version_set"` +} + +type version struct { + Name string `json:"name"` + VersionID string `json:"version_id"` +} + +type runtime struct { + RuntimeID string `json:"runtime_id"` + Zone string `json:"zone"` +} + +type runtimeList struct { + Total int `json:"total_count"` + Runtimes []runtime `json:"runtime_set"` +} + +type app struct { + AppId string `json:"app_id"` + Name string `json:"name"` + ChartName string `json:"chart_name"` + RepoId string `json:"repo_id"` +} + +type repo struct { + RepoId string `json:"repo_id"` + Name string `json:"name"` + Url string `json:"url"` +} + +type workLoads struct { + Deployments []v12.Deployment `json:"deployments,omitempty"` + Statefulsets []v12.StatefulSet `json:"statefulsets,omitempty"` + Daemonsets []v12.DaemonSet `json:"daemonsets,omitempty"` +} + +type appList struct { + Total int `json:"total_count"` + Apps []app `json:"app_set"` +} + +type repoList struct { + Total int `json:"total_count"` + Repos []repo `json:"repo_set"` +} + +func GetAppInfo(appId string) (string, string, string, error) { + url := fmt.Sprintf("%s/v1/apps?app_id=%s", OpenPitrixServer, appId) + resp, err := makeHttpRequest("GET", url, "") + if err != nil { + glog.Error(err) + return unknown, unknown, unknown, err + } + + var apps appList + err = json.Unmarshal(resp, &apps) + if err != nil { + glog.Error(err) + return unknown, unknown, unknown, err + } + + if len(apps.Apps) == 0 { + return unknown, unknown, unknown, err + } + + return apps.Apps[0].ChartName, apps.Apps[0].RepoId, apps.Apps[0].AppId, nil +} + +func GetRepo(repoId string) (string, error) { + url := fmt.Sprintf("%s/v1/repos?repo_id=%s", OpenPitrixServer, repoId) + resp, err := makeHttpRequest("GET", url, "") + if err != nil { + glog.Error(err) + return unknown, err + } + + var repos repoList + err = json.Unmarshal(resp, &repos) + if err != nil { + glog.Error(err) + return unknown, err + } + + if len(repos.Repos) == 0 { + return unknown, err + } + + return repos.Repos[0].Name, nil +} + +func GetVersion(versionId string) (string, error) { + versionUrl := fmt.Sprintf("%s/v1/app_versions?version_id=%s", OpenPitrixServer, versionId) + resp, err := makeHttpRequest("GET", versionUrl, "") + if err != nil { + glog.Error(err) + return unknown, err + } + + var versions versionList + err = json.Unmarshal(resp, &versions) + if err != nil { + glog.Error(err) + return unknown, err + } + + if len(versions.Versions) == 0 { + return unknown, nil + } + return versions.Versions[0].Name, nil +} + +func GetRuntime(runtimeId string) (string, error) { + + versionUrl := fmt.Sprintf("%s/v1/runtimes?runtime_id=%s", OpenPitrixServer, runtimeId) + resp, err := makeHttpRequest("GET", versionUrl, "") + if err != nil { + glog.Error(err) + return unknown, err + } + + var runtimes runtimeList + err = json.Unmarshal(resp, &runtimes) + if err != nil { + glog.Error(err) + return unknown, err + } + + if len(runtimes.Runtimes) == 0 { + return unknown, nil + } + + return runtimes.Runtimes[0].Zone, nil +} + +func GetWorkLoads(namespace string, clusterRoles []clusterRole) (*workLoads, error) { + + var works workLoads + for _, clusterRole := range clusterRoles { + workLoadName := clusterRole.Role + if len(workLoadName) > 0 { + if strings.HasSuffix(workLoadName, deploySuffix) { + name := strings.Split(workLoadName, deploySuffix)[0] + + item, err := informers.SharedInformerFactory().Apps().V1().Deployments().Lister().Deployments(namespace).Get(name) + + if err != nil { + return nil, err + } + + works.Deployments = append(works.Deployments, *item) + continue + } + + if strings.HasSuffix(workLoadName, daemonSuffix) { + name := strings.Split(workLoadName, daemonSuffix)[0] + item, err := informers.SharedInformerFactory().Apps().V1().DaemonSets().Lister().DaemonSets(namespace).Get(name) + if err != nil { + return nil, err + } + works.Daemonsets = append(works.Daemonsets, *item) + continue + } + + if strings.HasSuffix(workLoadName, stateSuffix) { + name := strings.Split(workLoadName, stateSuffix)[0] + item, err := informers.SharedInformerFactory().Apps().V1().StatefulSets().Lister().StatefulSets(namespace).Get(name) + if err != nil { + return nil, err + } + works.Statefulsets = append(works.Statefulsets, *item) + continue + } + } + } + return &works, nil +} + +func getLabels(namespace string, workloads *workLoads) *[]map[string]string { + k8sClient := k8s.Client() + + var workloadLables []map[string]string + if workloads == nil { + return nil + } + + for _, workload := range workloads.Deployments { + deploy, err := k8sClient.AppsV1().Deployments(namespace).Get(workload.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + continue + } + workloadLables = append(workloadLables, deploy.Labels) + } + + for _, workload := range workloads.Daemonsets { + daemonset, err := k8sClient.AppsV1().DaemonSets(namespace).Get(workload.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + continue + } + workloadLables = append(workloadLables, daemonset.Labels) + } + + for _, workload := range workloads.Statefulsets { + statefulset, err := k8sClient.AppsV1().StatefulSets(namespace).Get(workload.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + continue + } + workloadLables = append(workloadLables, statefulset.Labels) + } + + return &workloadLables +} + +func isExist(svcs []v1.Service, svc v1.Service) bool { + for _, item := range svcs { + if item.Name == svc.Name && item.Namespace == svc.Namespace { + return true + } + } + return false +} + +func getSvcs(namespace string, workLoadLabels *[]map[string]string) []v1.Service { + if len(*workLoadLabels) == 0 { + return nil + } + k8sClient := k8s.Client() + var services []v1.Service + for _, label := range *workLoadLabels { + labelSelector := labels.Set(label).AsSelector().String() + svcs, err := k8sClient.CoreV1().Services(namespace).List(metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + glog.Errorf("get app's svc failed, reason: %v", err) + } + for _, item := range svcs.Items { + if !isExist(services, item) { + services = append(services, item) + } + } + } + + return services +} + +func getIng(namespace string, services []v1.Service) []v1beta1.Ingress { + if services == nil { + return nil + } + + var ings []v1beta1.Ingress + for _, svc := range services { + result, err := resources.ListNamespaceResource(namespace, "ingress", ¶ms.Conditions{Fuzzy: map[string]string{"serviceName": svc.Name}}, "", false, -1, 0) + if err != nil { + glog.Error(err) + return nil + } + + glog.Error(result) + for _, i := range result.Items { + ingress := i.(*v1beta1.Ingress) + + exist := false + var tmpRules []v1beta1.IngressRule + for _, rule := range ingress.Spec.Rules { + for _, p := range rule.HTTP.Paths { + if p.Backend.ServiceName == svc.Name { + exist = true + tmpRules = append(tmpRules, rule) + } + } + + } + + if exist { + ing := v1beta1.Ingress{} + ing.Name = ingress.Name + ing.Spec.Rules = tmpRules + ings = append(ings, ing) + } + } + } + + return ings +} + +func ListApplication(runtimeId string, conditions *params.Conditions, limit, offset int) (*models.PageableResponse, error) { + if strings.HasSuffix(OpenPitrixServer, "/") { + OpenPitrixServer = strings.TrimSuffix(OpenPitrixServer, "/") + } + + defaultStatus := "status=active&status=stopped&status=pending&status=ceased" + + url := fmt.Sprintf("%s/v1/clusters?limit=%s&offset=%s", OpenPitrixServer, strconv.Itoa(limit), strconv.Itoa(offset)) + + if len(conditions.Fuzzy["name"]) > 0 { + url = fmt.Sprintf("%s&search_word=%s", url, conditions.Fuzzy["name"]) + } + + if len(conditions.Match["status"]) > 0 { + url = fmt.Sprintf("%s&status=%s", url, conditions.Match["status"]) + } else { + url = fmt.Sprintf("%s&%s", url, defaultStatus) + } + + if len(runtimeId) > 0 { + url = fmt.Sprintf("%s&runtime_id=%s", url, runtimeId) + } + + resp, err := makeHttpRequest("GET", url, "") + if err != nil { + glog.Errorf("request %s failed, reason: %s", url, err) + return nil, err + } + + var clusterList clusters + err = json.Unmarshal(resp, &clusterList) + + if err != nil { + return nil, err + } + + result := models.PageableResponse{TotalCount: clusterList.Total} + result.Items = make([]interface{}, 0) + for _, item := range clusterList.Clusters { + var app Application + + app.Name = item.Name + app.ClusterID = item.ClusterID + app.UpdateTime = item.UpdateTime + app.Status = item.Status + versionInfo, _ := GetVersion(item.VersionID) + app.Version = versionInfo + app.VersionId = item.VersionID + runtimeInfo, _ := GetRuntime(item.RunTimeId) + app.Runtime = runtimeInfo + app.RuntimeId = item.RunTimeId + appInfo, _, appId, _ := GetAppInfo(item.AppID) + app.App = appInfo + app.AppId = appId + app.Description = item.Description + + result.Items = append(result.Items, app) + } + + return &result, nil +} + +func GetApp(clusterId string) (*Application, error) { + if strings.HasSuffix(OpenPitrixServer, "/") { + OpenPitrixServer = strings.TrimSuffix(OpenPitrixServer, "/") + } + + url := fmt.Sprintf("%s/v1/clusters?cluster_id=%s", OpenPitrixServer, clusterId) + + resp, err := makeHttpRequest("GET", url, "") + if err != nil { + glog.Error(err) + return nil, err + } + + var clusterList clusters + err = json.Unmarshal(resp, &clusterList) + + if err != nil { + glog.Error(err) + return nil, err + } + + if len(clusterList.Clusters) == 0 { + return nil, fmt.Errorf("NotFound, clusterId:%s", clusterId) + } + + item := clusterList.Clusters[0] + var app Application + + app.Name = item.Name + app.ClusterID = item.ClusterID + app.UpdateTime = item.UpdateTime + app.CreateTime = item.CreateTime + app.Status = item.Status + versionInfo, _ := GetVersion(item.VersionID) + app.Version = versionInfo + app.VersionId = item.VersionID + + runtimeInfo, _ := GetRuntime(item.RunTimeId) + app.Runtime = runtimeInfo + app.RuntimeId = item.RunTimeId + appInfo, repoId, appId, _ := GetAppInfo(item.AppID) + app.App = appInfo + app.AppId = appId + app.Description = item.Description + + app.RepoName, _ = GetRepo(repoId) + + workloads, err := GetWorkLoads(app.Runtime, item.ClusterRoleSets) + if err != nil { + glog.Error(err) + return nil, err + } + app.WorkLoads = workloads + workloadLabels := getLabels(app.Runtime, app.WorkLoads) + app.Services = getSvcs(app.Runtime, workloadLabels) + app.Ingresses = getIng(app.Runtime, app.Services) + + return &app, nil +} + +func makeHttpRequest(method, url, data string) ([]byte, error) { + var req *http.Request + + var err error + if method == "GET" { + req, err = http.NewRequest(method, url, nil) + } else { + req, err = http.NewRequest(method, url, strings.NewReader(data)) + } + + req.Header.Add("Authorization", OpenPitrixProxyToken) + + if err != nil { + glog.Error(err) + return nil, err + } + + httpClient := &http.Client{} + resp, err := httpClient.Do(req) + + if err != nil { + err := fmt.Errorf("Request to %s failed, method: %s, reason: %s ", url, method, err) + glog.Error(err) + return nil, err + } + + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if resp.StatusCode >= http.StatusBadRequest { + err = fmt.Errorf(string(body)) + } + return body, err +} diff --git a/pkg/models/iam/am.go b/pkg/models/iam/am.go index c131cdc10..977ad4918 100644 --- a/pkg/models/iam/am.go +++ b/pkg/models/iam/am.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/golang/glog" "io/ioutil" "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/simple/client/k8s" @@ -174,7 +175,7 @@ func GetRoles(username string, namespace string) ([]*v1.Role, error) { func GetClusterRoles(username string) ([]*v1.ClusterRole, error) { clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() - clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything()) + clusterRoleBindings, err := clusterRoleBindingLister.List(labels.SelectorFromSet(labels.Set{"": ""})) if err != nil { return nil, err @@ -204,7 +205,7 @@ func GetClusterRoles(username string) ([]*v1.ClusterRole, error) { roles = append(roles, role) break } else if apierrors.IsNotFound(err) { - log.Println(err) + glog.Warningln(err) break } else { return nil, err @@ -572,8 +573,10 @@ func GetRoleSimpleRules(roles []*v1.Role, namespace string) (map[string][]models return rulesMapping, nil } +// func CreateClusterRoleBinding(username string, clusterRoleName string) error { clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() + _, err := clusterRoleLister.Get(clusterRoleName) if err != nil { diff --git a/pkg/models/iam/iam.go b/pkg/models/iam/iam.go index f771fd1ec..cac09c88b 100644 --- a/pkg/models/iam/iam.go +++ b/pkg/models/iam/iam.go @@ -18,19 +18,13 @@ package iam import ( - "encoding/json" "fmt" - "io/ioutil" - "net/http" - "strings" - "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/simple/client/ldap" "k8s.io/api/rbac/v1" "k8s.io/kubernetes/pkg/util/slice" - "kubesphere.io/kubesphere/pkg/constants" - kserr "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models" ) @@ -75,27 +69,18 @@ func GetUsers(names []string) ([]models.User, error) { return make([]models.User, 0), nil } - result, err := http.Get(fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/users?name=%s", constants.AccountAPIServer, strings.Join(names, ","))) + conn, err := ldap.Client() if err != nil { return nil, err } - defer result.Body.Close() - data, err := ioutil.ReadAll(result.Body) - - if err != nil { - return nil, err - } - - if result.StatusCode > 200 { - return nil, kserr.Parse(data) - } - - err = json.Unmarshal(data, &users) - - if err != nil { - return nil, err + for _, name := range names { + user, err := UserDetail(name, conn) + if err != nil { + return nil, err + } + users = append(users, *user) } return users, nil @@ -103,32 +88,19 @@ func GetUsers(names []string) ([]models.User, error) { func GetUser(name string) (*models.User, error) { - result, err := http.Get(fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/users/%s", constants.AccountAPIServer, name)) + conn, err := ldap.Client() if err != nil { return nil, err } - defer result.Body.Close() - data, err := ioutil.ReadAll(result.Body) + user, err := UserDetail(name, conn) if err != nil { return nil, err } - if result.StatusCode > 200 { - return nil, kserr.Parse(data) - } - - var user models.User - - err = json.Unmarshal(data, &user) - - if err != nil { - return nil, err - } - - return &user, nil + return user, nil } func GetUserNamespaces(username string, requiredRule v1.PolicyRule) (allNamespace bool, namespaces []string, err error) { diff --git a/pkg/models/iam/im.go b/pkg/models/iam/im.go index 82f797eaf..34934a7c7 100644 --- a/pkg/models/iam/im.go +++ b/pkg/models/iam/im.go @@ -24,7 +24,6 @@ import ( "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/simple/client/k8s" "kubesphere.io/kubesphere/pkg/simple/client/redis" - "os" "regexp" "strconv" "strings" @@ -42,27 +41,18 @@ import ( jwtutils "kubesphere.io/kubesphere/pkg/utils/jwt" ) -const ( - envAdminEmail = "ADMIN_EMAIL" - envAdminPWD = "ADMIN_PWD" -) - var ( - counter Counter - AdminEmail = "admin@kubesphere.io" - AdminPWD = "passw0rd" + counter Counter + adminEmail string + adminPassword string + tokenExpireTime time.Duration ) -func init() { - if env := os.Getenv(envAdminEmail); env != "" { - AdminEmail = env - } - if env := os.Getenv(envAdminPWD); env != "" { - AdminPWD = env - } -} +func Init(email, password string, t time.Duration) error { + adminEmail = email + adminPassword = password + tokenExpireTime = t -func DatabaseInit() error { conn, err := ldapclient.Client() if err != nil { @@ -96,13 +86,16 @@ func checkAndCreateDefaultGroup(conn ldap.Client) error { if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { err = createGroupsBaseDN(conn) + if err != nil { + return fmt.Errorf("GroupBaseDN %s create failed: %s\n", ldapclient.GroupSearchBase, err) + } } if err != nil { - return fmt.Errorf("GroupBaseDN %s not exist: %s\n", ldapclient.GroupSearchBase, err) + return fmt.Errorf("iam database init failed: %s\n", err) } - if len(groups.Entries) == 0 { + if groups == nil || len(groups.Entries) == 0 { _, err = CreateGroup(models.Group{Path: constants.SystemWorkspace, Name: constants.SystemWorkspace, Creator: constants.AdminUserName, Description: "system workspace"}) if err != nil { @@ -127,21 +120,24 @@ func checkAndCreateDefaultUser(conn ldap.Client) error { if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { err = createUserBaseDN(conn) - } - - if err != nil { - return fmt.Errorf("UserBaseDN %s not exist: %s\n", ldapclient.UserSearchBase, err) - } - - if len(users.Entries) == 0 { - err := CreateUser(models.User{Username: constants.AdminUserName, Email: AdminEmail, Password: AdminPWD, Description: "Administrator account that was always created by default."}) - if err != nil { - return fmt.Errorf("admin create failed: %s\n", err) + return fmt.Errorf("UserBaseDN %s create failed: %s\n", ldapclient.UserSearchBase, err) } } - counter = NewCounter(len(users.Entries)) + if err != nil { + return fmt.Errorf("iam database init failed: %s\n", err) + } + + if users == nil || len(users.Entries) == 0 { + counter = NewCounter(0) + err := CreateUser(models.User{Username: constants.AdminUserName, Email: adminEmail, Password: adminPassword, Description: "Administrator account that was always created by default."}) + if err != nil { + return fmt.Errorf("admin create failed: %s\n", err) + } + } else { + counter = NewCounter(len(users.Entries)) + } return nil } @@ -200,8 +196,6 @@ func Login(username string, password string, ip string) (string, error) { email := result.Entries[0].GetAttributeValue("mail") dn := result.Entries[0].DN - user := models.User{Username: uid, Email: email} - // bind as the user to verify their password err = conn.Bind(dn, password) @@ -209,25 +203,29 @@ func Login(username string, password string, ip string) (string, error) { return "", err } - if ip != "" { - redisClient := redis.Client() - redisClient.RPush(fmt.Sprintf("kubesphere:users:%s:login-log", uid), fmt.Sprintf("%s,%s", time.Now().UTC().Format("2006-01-02T15:04:05Z"), ip)) - redisClient.LTrim(fmt.Sprintf("kubesphere:users:%s:login-log", uid), -10, -1) - } - claims := jwt.MapClaims{} - claims["exp"] = time.Now().Add(time.Hour * 24).Unix() - claims["username"] = user.Username - claims["email"] = user.Email + claims["exp"] = time.Now().Add(tokenExpireTime).Unix() + claims["username"] = uid + claims["email"] = email token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) uToken, _ := token.SignedString(jwtutils.Secret) + loginLog(uid, ip) + return uToken, nil } +func loginLog(uid, ip string) { + if ip != "" { + redisClient := redis.Client() + redisClient.RPush(fmt.Sprintf("kubesphere:users:%s:login-log", uid), fmt.Sprintf("%s,%s", time.Now().UTC().Format("2006-01-02T15:04:05Z"), ip)) + redisClient.LTrim(fmt.Sprintf("kubesphere:users:%s:login-log", uid), -10, -1) + } +} + func UserList(limit int, offset int) (int, []models.User, error) { conn, err := ldapclient.Client() @@ -668,7 +666,7 @@ func CreateUser(user models.User) error { } if len(result.Entries) > 0 { - return errors.New("username or email already exists") + return ldap.NewError(ldap.LDAPResultEntryAlreadyExists, fmt.Errorf("username or email already exists")) } maxUid, err := getMaxUid(conn) diff --git a/pkg/models/metrics/metrics.go b/pkg/models/metrics/metrics.go index 17810bc01..4cb20577f 100644 --- a/pkg/models/metrics/metrics.go +++ b/pkg/models/metrics/metrics.go @@ -732,7 +732,7 @@ func MonitorAllWorkspacesStatistics() *FormatedLevelMetric { wg.Add(4) go func() { - orgNums, errOrg := workspaces.Count() + orgNums, errOrg := workspaces.WorkspaceCount() if errOrg != nil { glog.Errorln(errOrg.Error()) } diff --git a/pkg/models/quotas/quotas.go b/pkg/models/quotas/quotas.go index 70a55ac3e..256b25ddc 100644 --- a/pkg/models/quotas/quotas.go +++ b/pkg/models/quotas/quotas.go @@ -34,8 +34,6 @@ const ( daemonsetsKey = "count/daemonsets.apps" deploymentsKey = "count/deployments.apps" ingressKey = "count/ingresses.extensions" - rolesKey = "count/roles.rbac.authorization.k8s.io" - clusterRolesKey = "count/cluster-role" servicesKey = "count/services" statefulsetsKey = "count/statefulsets.apps" persistentvolumeclaimsKey = "persistentvolumeclaims" @@ -47,18 +45,26 @@ const ( var ( resourceMap = map[string]string{daemonsetsKey: resources.DaemonSets, deploymentsKey: resources.Deployments, - ingressKey: resources.Ingresses, rolesKey: resources.Roles, servicesKey: resources.Services, + ingressKey: resources.Ingresses, servicesKey: resources.Services, statefulsetsKey: resources.StatefulSets, persistentvolumeclaimsKey: resources.PersistentVolumeClaims, podsKey: resources.Pods, - namespaceKey: resources.Namespaces, storageClassesKey: resources.StorageClasses, clusterRolesKey: resources.ClusterRoles, + namespaceKey: resources.Namespaces, storageClassesKey: resources.StorageClasses, jobsKey: resources.Jobs, cronJobsKey: resources.CronJobs} ) func getUsage(namespace, resource string) (int, error) { - list, err := resources.ListNamespaceResource(namespace, resource, ¶ms.Conditions{}, "", false, -1, 0) + var result *models.PageableResponse + var err error + if resource == resources.Namespaces || resource == resources.StorageClasses { + result, err = resources.ListClusterResource(resource, ¶ms.Conditions{}, "", false, 1, 0) + } else { + result, err = resources.ListNamespaceResource(namespace, resource, ¶ms.Conditions{}, "", false, 1, 0) + } + if err != nil { return 0, err } - return list.TotalCount, nil + + return result.TotalCount, nil } func GetClusterQuotas() (*models.ResourceQuota, error) { diff --git a/pkg/models/resources/pods.go b/pkg/models/resources/pods.go index cce6b3bbc..2cf9ecf26 100644 --- a/pkg/models/resources/pods.go +++ b/pkg/models/resources/pods.go @@ -18,6 +18,7 @@ package resources import ( + v12 "k8s.io/api/apps/v1" "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/params" "sort" @@ -30,16 +31,149 @@ import ( type podSearcher struct { } +func podBelongTo(item *v1.Pod, kind string, name string) bool { + + if strings.EqualFold(kind, "Deployment") { + if podBelongToDeployment(item, name) { + return true + } + } else if strings.EqualFold(kind, "ReplicaSet") { + if podBelongToReplicaSet(item, name) { + return true + } + } else if strings.EqualFold(kind, "DaemonSet") { + if podBelongToDaemonSet(item, name) { + return true + } + } else if strings.EqualFold(kind, "StatefulSet") { + if podBelongToStatefulSet(item, name) { + return true + } + } else if strings.EqualFold(kind, "Job") { + if podBelongToJob(item, name) { + return true + } + } + return false +} + +func replicaSetBelongToDeployment(replicaSet *v12.ReplicaSet, name string) bool { + for _, owner := range replicaSet.OwnerReferences { + if owner.Kind == "Deployment" && owner.Name == name { + return true + } + } + return false +} + +func podBelongToDaemonSet(item *v1.Pod, name string) bool { + for _, owner := range item.OwnerReferences { + if owner.Kind == "DaemonSet" && owner.Name == name { + return true + } + } + return false +} + +func podBelongToJob(item *v1.Pod, name string) bool { + for _, owner := range item.OwnerReferences { + if owner.Kind == "Job" && owner.Name == name { + return true + } + } + return false +} + +func podBelongToReplicaSet(item *v1.Pod, name string) bool { + for _, owner := range item.OwnerReferences { + if owner.Kind == "ReplicaSet" && owner.Name == name { + return true + } + } + return false +} + +func podBelongToStatefulSet(item *v1.Pod, name string) bool { + replicas, err := informers.SharedInformerFactory().Apps().V1().ReplicaSets().Lister().ReplicaSets(item.Namespace).List(labels.Everything()) + if err != nil { + return false + } + for _, r := range replicas { + if replicaSetBelongToDeployment(r, name) { + return podBelongToReplicaSet(item, r.Name) + } + } + return false +} + +func podBelongToDeployment(item *v1.Pod, name string) bool { + replicas, err := informers.SharedInformerFactory().Apps().V1().ReplicaSets().Lister().ReplicaSets(item.Namespace).List(labels.Everything()) + if err != nil { + return false + } + for _, r := range replicas { + if replicaSetBelongToDeployment(r, name) { + return podBelongToReplicaSet(item, r.Name) + } + } + return false +} + +func podBindPVC(item *v1.Pod, pvcName string) bool { + for _, v := range item.Spec.Volumes { + if v.VolumeSource.PersistentVolumeClaim != nil && + v.VolumeSource.PersistentVolumeClaim.ClaimName == pvcName { + return true + } + } + return false +} + +func podBelongToService(item *v1.Pod, serviceName string) bool { + service, err := informers.SharedInformerFactory().Core().V1().Services().Lister().Services(item.Namespace).Get(serviceName) + if err != nil { + return false + } + for k, v := range service.Spec.Selector { + if item.Labels[k] != v { + return false + } + } + return true +} + // exactly Match func (*podSearcher) match(match map[string]string, item *v1.Pod) bool { for k, v := range match { switch k { + case "ownerKind": + fallthrough + case "ownerName": + kind := match["ownerKind"] + name := match["ownerName"] + if !podBelongTo(item, kind, name) { + return false + } + case "nodeName": + if item.Spec.NodeName != v { + return false + } + case "pvcName": + if !podBindPVC(item, v) { + return false + } + case "serviceName": + if !podBelongToService(item, v) { + return false + } case name: if item.Name != v && item.Labels[displayName] != v { return false } default: - return false + if item.Labels[k] != v { + return false + } } } return true diff --git a/pkg/models/resources/resources.go b/pkg/models/resources/resources.go index 615c56897..a0b6d4fa6 100644 --- a/pkg/models/resources/resources.go +++ b/pkg/models/resources/resources.go @@ -131,6 +131,8 @@ func ListClusterResource(resource string, conditions *params.Conditions, orderBy if searcher, ok := clusterResources[resource]; ok { result, err = searcher.search(conditions, orderBy, reverse) + } else if searcher, ok := namespacedResources[resource]; ok { + result, err = searcher.search("", conditions, orderBy, reverse) } else { return nil, fmt.Errorf("not support") } diff --git a/pkg/models/servicemesh/metrics/handlers.go b/pkg/models/servicemesh/metrics/handlers.go new file mode 100644 index 000000000..7792328e2 --- /dev/null +++ b/pkg/models/servicemesh/metrics/handlers.go @@ -0,0 +1,7 @@ +package metrics + +import "github.com/emicklei/go-restful" + +func GetAppMetrics(request *restful.Request, response *restful.Response) { + +} diff --git a/pkg/models/workspaces/workspaces.go b/pkg/models/workspaces/workspaces.go index 12666b1b9..2633af192 100644 --- a/pkg/models/workspaces/workspaces.go +++ b/pkg/models/workspaces/workspaces.go @@ -23,6 +23,7 @@ import ( "fmt" "io/ioutil" "kubesphere.io/kubesphere/pkg/simple/client/k8s" + "kubesphere.io/kubesphere/pkg/simple/client/ldap" "kubesphere.io/kubesphere/pkg/simple/client/mysql" "net/http" @@ -40,7 +41,6 @@ import ( "errors" "regexp" - "github.com/emicklei/go-restful" "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -56,7 +56,6 @@ import ( func UnBindDevopsProject(workspace string, devops string) error { db := mysql.Client() - defer db.Close() return db.Delete(&models.WorkspaceDPBinding{Workspace: workspace, DevOpsProject: devops}).Error } @@ -306,7 +305,6 @@ func Namespaces(workspaceName string) ([]*core.Namespace, error) { func BindingDevopsProject(workspace string, devops string) error { db := mysql.Client() - defer db.Close() return db.Create(&models.WorkspaceDPBinding{Workspace: workspace, DevOpsProject: devops}).Error } @@ -332,27 +330,11 @@ func Delete(workspace *models.Workspace) error { return err } - req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/groups/%s", constants.AccountAPIServer, workspace.Name), nil) + err = iam.DeleteGroup(workspace.Name) if err != nil { return err } - result, err := http.DefaultClient.Do(req) - - if err != nil { - return err - } - - defer result.Body.Close() - data, err := ioutil.ReadAll(result.Body) - - if err != nil { - return err - } - - if result.StatusCode > 200 { - return kserr.Parse(data) - } return nil } @@ -401,34 +383,13 @@ func workspaceRoleRelease(workspace string) error { func Create(workspace *models.Workspace) (*models.Workspace, error) { - data, err := json.Marshal(workspace) - + group, err := iam.CreateGroup(workspace.Group) if err != nil { return nil, err } - result, err := http.Post(fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/groups", constants.AccountAPIServer), restful.MIME_JSON, bytes.NewReader(data)) - - if err != nil { - return nil, err - } - defer result.Body.Close() - data, err = ioutil.ReadAll(result.Body) - - if err != nil { - return nil, err - } - - if result.StatusCode > 200 { - return nil, kserr.Parse(data) - } - - var created models.Workspace - - err = json.Unmarshal(data, &created) - - if err != nil { - return nil, err + created := models.Workspace{ + Group: *group, } created.Members = make([]string, 0) @@ -446,78 +407,35 @@ func Create(workspace *models.Workspace) (*models.Workspace, error) { func Edit(workspace *models.Workspace) (*models.Workspace, error) { - data, err := json.Marshal(workspace) + group, err := iam.UpdateGroup(&workspace.Group) if err != nil { return nil, err } - req, err := http.NewRequest(http.MethodPut, fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/groups/%s", constants.AccountAPIServer, workspace.Name), bytes.NewReader(data)) - req.Header.Set("Content-Type", "application/json") + workspace.Group = *group - if err != nil { - return nil, err - } - - result, err := http.DefaultClient.Do(req) - - if err != nil { - return nil, err - } - - defer result.Body.Close() - data, err = ioutil.ReadAll(result.Body) - - if err != nil { - return nil, err - } - - if result.StatusCode > 200 { - return nil, kserr.Parse(data) - } - - var edited models.Workspace - - err = json.Unmarshal(data, &edited) - - if err != nil { - return nil, err - } - - return &edited, nil + return workspace, nil } func Detail(name string) (*models.Workspace, error) { - result, err := http.Get(fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/groups/%s", constants.AccountAPIServer, name)) - + conn, err := ldap.Client() if err != nil { return nil, err } - defer result.Body.Close() - data, err := ioutil.ReadAll(result.Body) + defer conn.Close() - if err != nil { - return nil, err - } - - if result.StatusCode > 200 { - return nil, kserr.Parse(data) - } - - var group models.Group - - err = json.Unmarshal(data, &group) + group, err := iam.GroupDetail(name, conn) if err != nil { return nil, err } db := mysql.Client() - defer db.Close() - workspace, err := convertGroupToWorkspace(db, group) + workspace, err := convertGroupToWorkspace(db, *group) if err != nil { return nil, err @@ -570,45 +488,33 @@ func ListWorkspaceByUser(username string, keyword string) ([]*models.Workspace, func fetch(names []string) ([]*models.Workspace, error) { - url := fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/groups", constants.AccountAPIServer) - - if names != nil { - if len(names) == 0 { - return make([]*models.Workspace, 0), nil - } else { - url = url + "?path=" + strings.Join(names, ",") + if names != nil && len(names) == 0 { + return make([]*models.Workspace, 0), nil + } + var groups []models.Group + var err error + if names == nil { + groups, err = iam.ChildList("") + if err != nil { + return nil, err + } + } else { + conn, err := ldap.Client() + if err != nil { + return nil, err + } + defer conn.Close() + for _, name := range names { + group, err := iam.GroupDetail(name, conn) + if err != nil { + return nil, err + } + groups = append(groups, *group) } } - result, err := http.Get(url) - - if err != nil { - return nil, err - } - - defer result.Body.Close() - data, err := ioutil.ReadAll(result.Body) - - if err != nil { - return nil, err - } - - if result.StatusCode > 200 { - return nil, kserr.Parse(data) - } - - var groups []models.Group - - err = json.Unmarshal(data, &groups) - - if err != nil { - return nil, err - } - db := mysql.Client() - defer db.Close() - workspaces := make([]*models.Workspace, 0) for _, group := range groups { workspace, err := convertGroupToWorkspace(db, group) @@ -624,7 +530,6 @@ func fetch(names []string) ([]*models.Workspace, error) { func ListDevopsProjectsByUser(username string, workspace string, keyword string, orderBy string, reverse bool, limit int, offset int) (int, []models.DevopsProject, error) { db := mysql.Client() - defer db.Close() var workspaceDOPBindings []models.WorkspaceDPBinding @@ -850,43 +755,6 @@ func Roles(workspace *models.Workspace) ([]*v1.ClusterRole, error) { return roles, nil } -func GetWorkspaceMembers(workspace string, keyword string) ([]models.User, error) { - - url := fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/workspaces/%s/members", constants.AccountAPIServer, workspace) - - if keyword != "" { - url = url + "?keyword=" + keyword - } - - result, err := http.Get(url) - - if err != nil { - return nil, err - } - - defer result.Body.Close() - data, err := ioutil.ReadAll(result.Body) - - if err != nil { - return nil, err - } - - if result.StatusCode > 200 { - return nil, kserr.Parse(data) - } - - var users []models.User - - err = json.Unmarshal(data, &users) - - if err != nil { - return nil, err - } - - return users, nil - -} - func WorkspaceRoleInit(workspace *models.Workspace) error { k8sClient := k8s.Client() @@ -1222,7 +1090,6 @@ func CreateWorkspaceRoleBinding(workspace *models.Workspace, username string, ro func GetDevOpsProjects(workspaceName string) ([]string, error) { db := mysql.Client() - defer db.Close() var workspaceDOPBindings []models.WorkspaceDPBinding @@ -1266,42 +1133,15 @@ func WorkspaceNamespaces(workspaceName string) ([]string, error) { return namespaces, nil } -func Count() (int, error) { +func WorkspaceCount() (int, error) { - result, err := http.Get(fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/groups/count", constants.AccountAPIServer)) + workspaces, err := iam.ChildList("") if err != nil { return 0, err } - defer result.Body.Close() - - data, err := ioutil.ReadAll(result.Body) - - if err != nil { - return 0, err - } - - if result.StatusCode > 200 { - return 0, kserr.Parse(data) - } - var count map[string]json.Number - - err = json.Unmarshal(data, &count) - - if err != nil { - return 0, err - } - - value := count["total_count"] - - v, err := value.Int64() - - if err != nil { - return 0, err - } - - return int(v), nil + return len(workspaces), nil } func GetAllProjectNums() (int, error) { @@ -1315,7 +1155,6 @@ func GetAllProjectNums() (int, error) { func GetAllDevOpsProjectsNums() (int, error) { db := mysql.Client() - defer db.Close() var count int if err := db.Model(&models.WorkspaceDPBinding{}).Count(&count).Error; err != nil { @@ -1325,35 +1164,11 @@ func GetAllDevOpsProjectsNums() (int, error) { } func GetAllAccountNums() (int, error) { - result, err := http.Get(fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/users", constants.AccountAPIServer)) + totalCount, _, err := iam.UserList(1, 0) if err != nil { return 0, err } - defer result.Body.Close() - data, err := ioutil.ReadAll(result.Body) - if err != nil { - return 0, err - } - if result.StatusCode > 200 { - return 0, kserr.Parse(data) - } - var count map[string]json.Number - - err = json.Unmarshal(data, &count) - - if err != nil { - return 0, err - } - - value := count["total_count"] - - v, err := value.Int64() - - if err != nil { - return 0, err - } - - return int(v), nil + return totalCount, nil } diff --git a/pkg/options/options.go b/pkg/options/options.go index 595a584ec..152b08a9c 100644 --- a/pkg/options/options.go +++ b/pkg/options/options.go @@ -17,11 +17,7 @@ */ package options -import ( - "github.com/spf13/pflag" -) - -var SharedOptions = NewServerRunOptions() +import "github.com/spf13/pflag" type ServerRunOptions struct { // server bind address @@ -38,8 +34,6 @@ type ServerRunOptions struct { // tls private key file TlsPrivateKey string - - CommandLine *pflag.FlagSet } func NewServerRunOptions() *ServerRunOptions { @@ -50,14 +44,16 @@ func NewServerRunOptions() *ServerRunOptions { SecurePort: 0, TlsCertFile: "", TlsPrivateKey: "", - CommandLine: &pflag.FlagSet{}, } - s.CommandLine.StringVar(&s.BindAddress, "bind-address", "0.0.0.0", "server bind address") - s.CommandLine.IntVar(&s.InsecurePort, "insecure-port", 9090, "insecure port number") - s.CommandLine.IntVar(&s.SecurePort, "secure-port", 0, "secure port number") - s.CommandLine.StringVar(&s.TlsCertFile, "tls-cert-file", "", "tls cert file") - s.CommandLine.StringVar(&s.TlsPrivateKey, "tls-private-key", "", "tls private key") - return &s } + +func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { + + fs.StringVar(&s.BindAddress, "bind-address", "0.0.0.0", "server bind address") + fs.IntVar(&s.InsecurePort, "insecure-port", 9090, "insecure port number") + fs.IntVar(&s.SecurePort, "secure-port", 0, "secure port number") + fs.StringVar(&s.TlsCertFile, "tls-cert-file", "", "tls cert file") + fs.StringVar(&s.TlsPrivateKey, "tls-private-key", "", "tls private key") +} diff --git a/pkg/params/params.go b/pkg/params/params.go index 80854989f..a8a1ecde2 100644 --- a/pkg/params/params.go +++ b/pkg/params/params.go @@ -23,8 +23,6 @@ import ( "regexp" "strconv" "strings" - - "golang.org/x/tools/container/intsets" ) const ( @@ -36,7 +34,7 @@ const ( func ParsePaging(req *restful.Request) (limit, offset int) { paging := req.QueryParameter(PagingParam) - limit = intsets.MaxInt + limit = 10 offset = 0 if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(paging); len(groups) == 3 { limit, _ = strconv.Atoi(groups[1]) diff --git a/pkg/simple/client/prometheus/prometheusclient.go b/pkg/simple/client/prometheus/prometheusclient.go index ab0cf030b..e4963c4a8 100644 --- a/pkg/simple/client/prometheus/prometheusclient.go +++ b/pkg/simple/client/prometheus/prometheusclient.go @@ -38,11 +38,11 @@ const ( ) var ( - prometheusAPIEndpoint string + PrometheusAPIEndpoint string ) func init() { - flag.StringVar(&prometheusAPIEndpoint, "prometheus-endpoint", "http://prometheus-k8s.kubesphere-monitoring-system.svc:9090/api/v1/", "prometheus api endpoint") + flag.StringVar(&PrometheusAPIEndpoint, "prometheus-endpoint", "http://prometheus-k8s.kubesphere-monitoring-system.svc:9090/api/v1/", "prometheus api endpoint") } type MonitoringRequestParams struct { @@ -71,7 +71,7 @@ type MonitoringRequestParams struct { } func SendMonitoringRequest(queryType string, params string) string { - epurl := prometheusAPIEndpoint + queryType + params + epurl := PrometheusAPIEndpoint + queryType + params response, err := http.DefaultClient.Get(epurl) if err != nil { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go new file mode 100644 index 000000000..3fe9c0865 --- /dev/null +++ b/pkg/utils/utils.go @@ -0,0 +1,7 @@ +package utils + +func CheckError(err error) { + if err != nil { + panic(err) + } +} diff --git a/tools/cmd/doc-gen/main.go b/tools/cmd/doc-gen/main.go index 65d5057c8..2c3be77ac 100644 --- a/tools/cmd/doc-gen/main.go +++ b/tools/cmd/doc-gen/main.go @@ -26,12 +26,15 @@ import ( "github.com/emicklei/go-restful-openapi" "github.com/go-openapi/spec" "io/ioutil" + _ "kubesphere.io/kubesphere/pkg/apis/iam/install" "kubesphere.io/kubesphere/pkg/apiserver/runtime" "log" // Install apis _ "kubesphere.io/kubesphere/pkg/apis/metrics/install" + _ "kubesphere.io/kubesphere/pkg/apis/monitoring/install" _ "kubesphere.io/kubesphere/pkg/apis/operations/install" _ "kubesphere.io/kubesphere/pkg/apis/resources/install" + _ "kubesphere.io/kubesphere/pkg/apis/servicemesh/metrics/install" ) var output string diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore deleted file mode 100644 index b883f1fdc..000000000 --- a/vendor/github.com/Microsoft/go-winio/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.exe diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 568001057..000000000 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# go-winio - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -This project has adopted the [Microsoft Open Source Code of -Conduct](https://opensource.microsoft.com/codeofconduct/). For more information -see the [Code of Conduct -FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional -questions or comments. - -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c807..000000000 --- a/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af6..000000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index 07de0c498..000000000 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,188 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). -* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich -@beeker1121 - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f9..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a53..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 6b7d7d1e8..000000000 --- a/vendor/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -logrus -vendor diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index a8f154515..000000000 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,52 +0,0 @@ -language: go -go_import_path: github.com/sirupsen/logrus -env: - - GOMAXPROCS=4 GORACE=halt_on_error=1 -matrix: - include: - - go: 1.10.x - install: - - go get github.com/stretchr/testify/assert - - go get golang.org/x/crypto/ssh/terminal - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows - script: - - go test -race -v ./... - - go: 1.11.x - env: GO111MODULE=on - install: - - go mod download - script: - - go test -race -v ./... - - go: 1.11.x - env: GO111MODULE=off - install: - - go get github.com/stretchr/testify/assert - - go get golang.org/x/crypto/ssh/terminal - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows - script: - - go test -race -v ./... - - go: 1.10.x - install: - - go get github.com/stretchr/testify/assert - - go get golang.org/x/crypto/ssh/terminal - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows - script: - - go test -race -v -tags appengine ./... - - go: 1.11.x - env: GO111MODULE=on - install: - - go mod download - script: - - go test -race -v -tags appengine ./... - - go: 1.11.x - env: GO111MODULE=off - install: - - go get github.com/stretchr/testify/assert - - go get golang.org/x/crypto/ssh/terminal - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows - script: - - go test -race -v -tags appengine ./... diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index 9978b415a..000000000 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,190 +0,0 @@ -# 1.4.0 -This new release introduces: - * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). - * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911) - * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). - -Fixes: - * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). - * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) - * Fix infinite recursion on unknown `Level.String()` (#907) - * Fix race condition in `getCaller` (#916). - - -# 1.3.0 -This new release introduces: - * Log, Logf, Logln functions for Logger and Entry that take a Level - -Fixes: - * Building prometheus node_exporter on AIX (#840) - * Race condition in TextFormatter (#468) - * Travis CI import path (#868) - * Remove coloured output on Windows (#862) - * Pointer to func as field in JSONFormatter (#870) - * Properly marshal Levels (#873) - -# 1.2.0 -This new release introduces: - * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued - * A new trace level named `Trace` whose level is below `Debug` - * A configurable exit function to be called upon a Fatal trace - * The `Level` object now implements `encoding.TextUnmarshaler` interface - -# 1.1.1 -This is a bug fix release. - * fix the build break on Solaris - * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized - -# 1.1.0 -This new release introduces: - * several fixes: - * a fix for a race condition on entry formatting - * proper cleanup of previously used entries before putting them back in the pool - * the extra new line at the end of message in text formatter has been removed - * a new global public API to check if a level is activated: IsLevelEnabled - * the following methods have been added to the Logger object - * IsLevelEnabled - * SetFormatter - * SetOutput - * ReplaceHooks - * introduction of go module - * an indent configuration for the json formatter - * output colour support for windows - * the field sort function is now configurable for text formatter - * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater - -# 1.0.6 - -This new release introduces: - * a new api WithTime which allows to easily force the time of the log entry - which is mostly useful for logger wrapper - * a fix reverting the immutability of the entry given as parameter to the hooks - a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary - * a new SetOutput method in the Logger - * a new configuration of the textformatter to configure the name of the default keys - * a new configuration of the text formatter to disable the level truncation - -# 1.0.5 - -* Fix hooks race (#707) -* Fix panic deadlock (#695) - -# 1.0.4 - -* Fix race when adding hooks (#612) -* Fix terminal check in AppEngine (#635) - -# 1.0.3 - -* Replace example files with testable examples - -# 1.0.2 - -* bug: quote non-string values in text formatter (#583) -* Make (*Logger) SetLevel a public method - -# 1.0.1 - -* bug: fix escaping in text formatter (#575) - -# 1.0.0 - -* Officially changed name to lower-case -* bug: colors on Windows 10 (#541) -* bug: fix race in accessing level (#512) - -# 0.11.5 - -* feature: add writer and writerlevel to entry (#372) - -# 0.11.4 - -* bug: fix undefined variable on solaris (#493) - -# 0.11.3 - -* formatter: configure quoting of empty values (#484) -* formatter: configure quoting character (default is `"`) (#484) -* bug: fix not importing io correctly in non-linux environments (#481) - -# 0.11.2 - -* bug: fix windows terminal detection (#476) - -# 0.11.1 - -* bug: fix tty detection with custom out (#471) - -# 0.11.0 - -* performance: Use bufferpool to allocate (#370) -* terminal: terminal detection for app-engine (#343) -* feature: exit handler (#375) - -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index a4796eb07..000000000 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,495 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. - -**Seeing weird case-sensitive problems?** It's in the past been possible to -import Logrus as both upper- and lower-case. Due to the Go package environment, -this caused issues in the community and we needed a standard. Some environments -experienced problems with the upper-case variant, so the lower-case was decided. -Everything using `logrus` will need to use the lower-case: -`github.com/sirupsen/logrus`. Any package that isn't, should be changed. - -To fix Glide, see [these -comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). -For an in-depth explanation of the casing issue, see [this -comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). - -**Are you interested in assisting in maintaining Logrus?** Currently I have a -lot of obligations, and I am unable to provide Logrus with the maintainership it -needs. If you'd like to help, please reach out to me at `simon at author's -username dot com`. - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -``` -To ensure this behaviour even if a TTY is attached, set your formatter as follows: - -```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) -``` - -#### Logging Method Name - -If you wish to add the calling method as a field, instruct the logger via: -```go -log.SetReportCaller(true) -``` -This adds the caller as 'method' like so: - -```json -{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", -"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} -``` - -```text -time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin -``` -Note that this does add measurable overhead - the cost will depend on the version of Go, but is -between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: -``` -go test -bench=.*CallerTracing -``` - - -#### Case-sensitivity - -The organization's name was changed to lower-case--and this will not be changed -back. If you are getting import conflicts due to case sensitivity, please use -the lower-case import: `github.com/sirupsen/logrus`. - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stdout instead of the default stderr - // Can be any io.Writer, see below for File example - log.SetOutput(os.Stdout) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "os" - "github.com/sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout - - // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) - // if err == nil { - // log.Out = file - // } else { - // log.Info("Failed to log to file, using default stderr") - // } - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Default Fields - -Often it's helpful to have fields _always_ attached to log statements in an -application or parts of one. For example, you may want to always log the -`request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on -every line, you can create a `logrus.Entry` to pass around instead: - -```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip -requestLogger.Warn("something not great happened") -``` - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) - - -#### Level logging - -Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true`. For Windows, see - [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). - * When colors are enabled, levels are truncated to 4 characters by default. To disable - truncation set the `DisableLevelTruncation` field to `true`. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). -* `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). - -Third party logging formatters: - -* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. -* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. -* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -This means that we can override the standard library logger easily: - -```go -logger := logrus.New() -logger.Formatter = &logrus.JSONFormatter{} - -// Use logrus for standard log output -// Note that `log` here references stdlib's log -// Not logrus imported under the name `log`. -log.SetOutput(logger.Writer()) -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -import( - "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSomething(t*testing.T){ - logger, hook := test.NewNullLogger() - logger.Error("Helloerror") - - assert.Equal(t, 1, len(hook.Entries)) - assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal(t, "Helloerror", hook.LastEntry().Message) - - hook.Reset() - assert.Nil(t, hook.LastEntry()) -} -``` - -#### Fatal handlers - -Logrus can register one or more functions that will be called when any `fatal` -level message is logged. The registered handlers will be executed before -logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. - -``` -... -handler := func() { - // gracefully shutdown something... -} -logrus.RegisterExitHandler(handler) -... -``` - -#### Thread safety - -By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. -If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. - -Situation when locking is not needed includes: - -* You have no hooks registered, or hooks calling is already thread-safe. - -* Writing to logger.Out is already thread-safe, for example: - - 1) logger.Out is protected by locks. - - 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) - - (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/Sirupsen/logrus/appveyor.yml deleted file mode 100644 index b4ffca275..000000000 --- a/vendor/github.com/Sirupsen/logrus/appveyor.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test diff --git a/vendor/github.com/Sirupsen/logrus/go.mod b/vendor/github.com/Sirupsen/logrus/go.mod deleted file mode 100644 index 94574cc63..000000000 --- a/vendor/github.com/Sirupsen/logrus/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/sirupsen/logrus - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/konsorten/go-windows-terminal-sequences v1.0.1 - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.1.1 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 - golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 -) diff --git a/vendor/github.com/Sirupsen/logrus/go.sum b/vendor/github.com/Sirupsen/logrus/go.sum deleted file mode 100644 index 133d34ae1..000000000 --- a/vendor/github.com/Sirupsen/logrus/go.sum +++ /dev/null @@ -1,15 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/aead/chacha20/.gitignore b/vendor/github.com/aead/chacha20/.gitignore deleted file mode 100644 index 9d3d84373..000000000 --- a/vendor/github.com/aead/chacha20/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.vscode - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/aead/chacha20/.travis.yml b/vendor/github.com/aead/chacha20/.travis.yml deleted file mode 100644 index 99199e094..000000000 --- a/vendor/github.com/aead/chacha20/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: go - -go: - - "1.8.x" - - "1.9.x" - - "1.10.x" - -env: - - TRAVIS_GOARCH=amd64 - - TRAVIS_GOARCH=386 - -before_install: -- export GOARCH=$TRAVIS_GOARCH - -branches: - only: - - master - -before_script: -- go get -u github.com/klauspost/asmfmt/cmd/asmfmt - -script: -- diff -au <(gofmt -d .) <(printf "") -- diff -au <(asmfmt -d .) <(printf "") -- go test -v ./... diff --git a/vendor/github.com/aead/chacha20/README.md b/vendor/github.com/aead/chacha20/README.md deleted file mode 100644 index b36942492..000000000 --- a/vendor/github.com/aead/chacha20/README.md +++ /dev/null @@ -1,82 +0,0 @@ -[![Godoc Reference](https://godoc.org/github.com/aead/chacha20?status.svg)](https://godoc.org/github.com/aead/chacha20) -[![Build Status](https://travis-ci.org/aead/chacha20.svg?branch=master)](https://travis-ci.org/aead/chacha20) -[![Go Report Card](https://goreportcard.com/badge/aead/chacha20)](https://goreportcard.com/report/aead/chacha20) - -## The ChaCha20 stream cipher - -ChaCha is a stream cipher family created by Daniel J. Bernstein. -The most common ChaCha variant is ChaCha20 (20 rounds). ChaCha20 is -standardized in [RFC 7539](https://tools.ietf.org/html/rfc7539 "RFC 7539"). - -This package provides implementations of three ChaCha versions: -- ChaCha20 with a 64 bit nonce (can en/decrypt up to 2^64 * 64 bytes for one key-nonce combination) -- ChaCha20 with a 96 bit nonce (can en/decrypt up to 2^32 * 64 bytes ~ 256 GB for one key-nonce combination) -- XChaCha20 with a 192 bit nonce (can en/decrypt up to 2^64 * 64 bytes for one key-nonce combination) - -Furthermore the chacha sub package implements ChaCha20/12 and ChaCha20/8. -These versions use 12 or 8 rounds instead of 20. -But it's recommended to use ChaCha20 (with 20 rounds) - it will be fast enough for almost all purposes. - -### Installation -Install in your GOPATH: `go get -u github.com/aead/chacha20` - -### Requirements -All go versions >= 1.8.7 are supported. -The code may also work on Go 1.7 but this is not tested. - -### Performance - -#### AMD64 -Hardware: Intel i7-6500U 2.50GHz x 2 -System: Linux Ubuntu 16.04 - kernel: 4.4.0-62-generic -Go version: 1.8.0 -``` -AVX2 -name speed cpb -ChaCha20_64-4 573MB/s ± 0% 4.16 -ChaCha20_1K-4 2.19GB/s ± 0% 1.06 -XChaCha20_64-4 261MB/s ± 0% 9.13 -XChaCha20_1K-4 1.69GB/s ± 4% 1.37 -XORKeyStream64-4 474MB/s ± 2% 5.02 -XORKeyStream1K-4 2.09GB/s ± 1% 1.11 -XChaCha20_XORKeyStream64-4 262MB/s ± 0% 9.09 -XChaCha20_XORKeyStream1K-4 1.71GB/s ± 1% 1.36 - -SSSE3 -name speed cpb -ChaCha20_64-4 583MB/s ± 0% 4.08 -ChaCha20_1K-4 1.15GB/s ± 1% 2.02 -XChaCha20_64-4 267MB/s ± 0% 8.92 -XChaCha20_1K-4 984MB/s ± 5% 2.42 -XORKeyStream64-4 492MB/s ± 1% 4.84 -XORKeyStream1K-4 1.10GB/s ± 5% 2.11 -XChaCha20_XORKeyStream64-4 266MB/s ± 0% 8.96 -XChaCha20_XORKeyStream1K-4 1.00GB/s ± 2% 2.32 -``` -#### 386 -Hardware: Intel i7-6500U 2.50GHz x 2 -System: Linux Ubuntu 16.04 - kernel: 4.4.0-62-generic -Go version: 1.8.0 -``` -SSSE3 -name                        speed cpb -ChaCha20_64-4               570MB/s ± 0% 4.18 -ChaCha20_1K-4               650MB/s ± 0% 3.66 -XChaCha20_64-4              223MB/s ± 0% 10.69 -XChaCha20_1K-4              584MB/s ± 1% 4.08 -XORKeyStream64-4            392MB/s ± 1% 6.08 -XORKeyStream1K-4            629MB/s ± 1% 3.79 -XChaCha20_XORKeyStream64-4  222MB/s ± 0% 10.73 -XChaCha20_XORKeyStream1K-4  585MB/s ± 0% 4.07 - -SSE2 -name speed cpb -ChaCha20_64-4 509MB/s ± 0% 4.68 -ChaCha20_1K-4 553MB/s ± 2% 4.31 -XChaCha20_64-4 201MB/s ± 0% 11.86 -XChaCha20_1K-4 498MB/s ± 4% 4.78 -XORKeyStream64-4 359MB/s ± 1% 6.64 -XORKeyStream1K-4 545MB/s ± 0% 4.37 -XChaCha20_XORKeyStream64-4 201MB/s ± 1% 11.86 -XChaCha20_XORKeyStream1K-4 507MB/s ± 0% 4.70 -``` diff --git a/vendor/github.com/mattbaird/jsonpatch/LICENSE b/vendor/github.com/appscode/jsonpatch/LICENSE similarity index 100% rename from vendor/github.com/mattbaird/jsonpatch/LICENSE rename to vendor/github.com/appscode/jsonpatch/LICENSE diff --git a/vendor/github.com/mattbaird/jsonpatch/jsonpatch.go b/vendor/github.com/appscode/jsonpatch/jsonpatch.go similarity index 52% rename from vendor/github.com/mattbaird/jsonpatch/jsonpatch.go rename to vendor/github.com/appscode/jsonpatch/jsonpatch.go index 295f260f5..3e698949e 100644 --- a/vendor/github.com/mattbaird/jsonpatch/jsonpatch.go +++ b/vendor/github.com/appscode/jsonpatch/jsonpatch.go @@ -8,20 +8,22 @@ import ( "strings" ) -var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONDoc = fmt.Errorf("invalid JSON Document") -type JsonPatchOperation struct { +type JsonPatchOperation = Operation + +type Operation struct { Operation string `json:"op"` Path string `json:"path"` Value interface{} `json:"value,omitempty"` } -func (j *JsonPatchOperation) Json() string { +func (j *Operation) Json() string { b, _ := json.Marshal(j) return string(b) } -func (j *JsonPatchOperation) MarshalJSON() ([]byte, error) { +func (j *Operation) MarshalJSON() ([]byte, error) { var b bytes.Buffer b.WriteString("{") b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation)) @@ -39,14 +41,14 @@ func (j *JsonPatchOperation) MarshalJSON() ([]byte, error) { return b.Bytes(), nil } -type ByPath []JsonPatchOperation +type ByPath []Operation func (a ByPath) Len() int { return len(a) } func (a ByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } -func NewPatch(operation, path string, value interface{}) JsonPatchOperation { - return JsonPatchOperation{Operation: operation, Path: path, Value: value} +func NewPatch(operation, path string, value interface{}) Operation { + return Operation{Operation: operation, Path: path, Value: value} } // CreatePatch creates a patch as specified in http://jsonpatch.com/ @@ -55,7 +57,7 @@ func NewPatch(operation, path string, value interface{}) JsonPatchOperation { // The function will return an array of JsonPatchOperations // // An error will be returned if any of the two documents are invalid. -func CreatePatch(a, b []byte) ([]JsonPatchOperation, error) { +func CreatePatch(a, b []byte) ([]Operation, error) { aI := map[string]interface{}{} bI := map[string]interface{}{} err := json.Unmarshal(a, &aI) @@ -66,7 +68,7 @@ func CreatePatch(a, b []byte) ([]JsonPatchOperation, error) { if err != nil { return nil, errBadJSONDoc } - return diff(aI, bI, "", []JsonPatchOperation{}) + return diff(aI, bI, "", []Operation{}) } // Returns true if the values matches (must be json types) @@ -78,22 +80,25 @@ func matchesValue(av, bv interface{}) bool { } switch at := av.(type) { case string: - bt := bv.(string) - if bt == at { + bt, ok := bv.(string) + if ok && bt == at { return true } case float64: - bt := bv.(float64) - if bt == at { + bt, ok := bv.(float64) + if ok && bt == at { return true } case bool: - bt := bv.(bool) - if bt == at { + bt, ok := bv.(bool) + if ok && bt == at { return true } case map[string]interface{}: - bt := bv.(map[string]interface{}) + bt, ok := bv.(map[string]interface{}) + if !ok { + return false + } for key := range at { if !matchesValue(at[key], bt[key]) { return false @@ -106,7 +111,10 @@ func matchesValue(av, bv interface{}) bool { } return true case []interface{}: - bt := bv.([]interface{}) + bt, ok := bv.([]interface{}) + if !ok { + return false + } if len(bt) != len(at) { return false } @@ -148,7 +156,7 @@ func makePath(path string, newPart interface{}) string { } // diff returns the (recursive) difference between a and b as an array of JsonPatchOperations. -func diff(a, b map[string]interface{}, path string, patch []JsonPatchOperation) ([]JsonPatchOperation, error) { +func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operation, error) { for key, bv := range b { p := makePath(path, key) av, ok := a[key] @@ -157,11 +165,6 @@ func diff(a, b map[string]interface{}, path string, patch []JsonPatchOperation) patch = append(patch, NewPatch("add", p, bv)) continue } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - patch = append(patch, NewPatch("replace", p, bv)) - continue - } // Types are the same, compare values var err error patch, err = handleValues(av, bv, p, patch) @@ -181,7 +184,21 @@ func diff(a, b map[string]interface{}, path string, patch []JsonPatchOperation) return patch, nil } -func handleValues(av, bv interface{}, p string, patch []JsonPatchOperation) ([]JsonPatchOperation, error) { +func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, error) { + { + at := reflect.TypeOf(av) + bt := reflect.TypeOf(bv) + if at == nil && bt == nil { + // do nothing + return patch, nil + } else if at == nil && bt != nil { + return append(patch, NewPatch("add", p, bv)), nil + } else if at != bt { + // If types have changed, replace completely (preserves null in destination) + return append(patch, NewPatch("replace", p, bv)), nil + } + } + var err error switch at := av.(type) { case map[string]interface{}: @@ -195,63 +212,125 @@ func handleValues(av, bv interface{}, p string, patch []JsonPatchOperation) ([]J patch = append(patch, NewPatch("replace", p, bv)) } case []interface{}: - bt, ok := bv.([]interface{}) - if !ok { - // array replaced by non-array - patch = append(patch, NewPatch("replace", p, bv)) - } else if len(at) != len(bt) { - // arrays are not the same length - patch = append(patch, compareArray(at, bt, p)...) - + bt := bv.([]interface{}) + if isSimpleArray(at) && isSimpleArray(bt) { + patch = append(patch, compareEditDistance(at, bt, p)...) } else { - for i := range bt { + n := min(len(at), len(bt)) + for i := len(at) - 1; i >= n; i-- { + patch = append(patch, NewPatch("remove", makePath(p, i), nil)) + } + for i := n; i < len(bt); i++ { + patch = append(patch, NewPatch("add", makePath(p, i), bt[i])) + } + for i := 0; i < n; i++ { + var err error patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) if err != nil { return nil, err } } } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - patch = append(patch, NewPatch("add", p, bv)) - } default: panic(fmt.Sprintf("Unknown type:%T ", av)) } return patch, nil } -func compareArray(av, bv []interface{}, p string) []JsonPatchOperation { - retval := []JsonPatchOperation{} - // var err error - for i, v := range av { - found := false - for _, v2 := range bv { - if reflect.DeepEqual(v, v2) { - found = true - break - } - } - if !found { - retval = append(retval, NewPatch("remove", makePath(p, i), nil)) - } +func isBasicType(a interface{}) bool { + switch a.(type) { + case string, float64, bool: + default: + return false } - - for i, v := range bv { - found := false - for _, v2 := range av { - if reflect.DeepEqual(v, v2) { - found = true - break - } - } - if !found { - retval = append(retval, NewPatch("add", makePath(p, i), v)) - } - } - - return retval + return true +} + +func isSimpleArray(a []interface{}) bool { + for i := range a { + switch a[i].(type) { + case string, float64, bool: + default: + val := reflect.ValueOf(a[i]) + if val.Kind() == reflect.Map { + for _, k := range val.MapKeys() { + av := val.MapIndex(k) + if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface { + if av.IsNil() { + continue + } + av = av.Elem() + } + if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool { + return false + } + } + return true + } + return false + } + } + return true +} + +// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm +// Adapted from https://github.com/texttheater/golang-levenshtein +func compareEditDistance(s, t []interface{}, p string) []Operation { + m := len(s) + n := len(t) + + d := make([][]int, m+1) + for i := 0; i <= m; i++ { + d[i] = make([]int, n+1) + d[i][0] = i + } + for j := 0; j <= n; j++ { + d[0][j] = j + } + + for j := 1; j <= n; j++ { + for i := 1; i <= m; i++ { + if reflect.DeepEqual(s[i-1], t[j-1]) { + d[i][j] = d[i-1][j-1] // no op required + } else { + del := d[i-1][j] + 1 + add := d[i][j-1] + 1 + rep := d[i-1][j-1] + 1 + d[i][j] = min(rep, min(add, del)) + } + } + } + + return backtrace(s, t, p, m, n, d) +} + +func min(x int, y int) int { + if y < x { + return y + } + return x +} + +func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation { + if i > 0 && matrix[i-1][j]+1 == matrix[i][j] { + op := NewPatch("remove", makePath(p, i-1), nil) + return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...) + } + if j > 0 && matrix[i][j-1]+1 == matrix[i][j] { + op := NewPatch("add", makePath(p, i), t[j-1]) + return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...) + } + if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] { + if isBasicType(s[0]) { + op := NewPatch("replace", makePath(p, i-1), t[j-1]) + return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) + } + + p2, _ := handleValues(s[j-1], t[j-1], makePath(p, i-1), []Operation{}) + return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) + } + if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { + return backtrace(s, t, p, i-1, j-1, matrix) + } + return []Operation{} } diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7..000000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/bifurcation/mint/README.md b/vendor/github.com/bifurcation/mint/README.md deleted file mode 100644 index 9fa05dddd..000000000 --- a/vendor/github.com/bifurcation/mint/README.md +++ /dev/null @@ -1,94 +0,0 @@ -![A lock with a mint leaf](https://ipv.sx/mint/mint.svg) - -mint - A Minimal TLS 1.3 stack -============================== - -[![Build Status](https://circleci.com/gh/bifurcation/mint.svg)](https://circleci.com/gh/bifurcation/mint) - -This project is primarily a learning effort for me to understand the [TLS -1.3](http://tlswg.github.io/tls13-spec/) protocol. The goal is to arrive at a -pretty complete implementation of TLS 1.3, with minimal, elegant code that -demonstrates how things work. Testing is a priority to ensure correctness, but -otherwise, the quality of the software engineering might not be at a level where -it makes sense to integrate this with other libraries. Backward compatibility -is not an objective. - -We borrow liberally from the [Go TLS -library](https://golang.org/pkg/crypto/tls/), especially where TLS 1.3 aligns -with earlier TLS versions. However, unnecessary parts will be ruthlessly cut -off. - -## DTLS Support - -Mint has partial support for DTLS, but that support is not yet complete -and may still contain serious defects. - - -## Quickstart - -Installation is the same as for any other Go package: - -``` -go get github.com/bifurcation/mint -``` - -The API is pretty much the same as for the TLS module, with `Dial` and `Listen` -methods wrapping the underlying socket APIs. - -``` -conn, err := mint.Dial("tcp", "localhost:4430", &mint.Config{...}) -... -listener, err := mint.Listen("tcp", "localhost:4430", &mint.Config{...}) -``` - -Documentation is available on -[godoc.org](https://godoc.org/github.com/bifurcation/mint) - - -## Interoperability testing - -The `mint-client` and `mint-server` executables are included to make it easy to -do basic interoperability tests with other TLS 1.3 implementations. The steps -for testing against NSS are as follows. - -``` -# Install mint -go get github.com/bifurcation/mint - -# Environment for NSS (you'll probably want a new directory) -NSS_ROOT= -mkdir $NSS_ROOT -cd $NSS_ROOT -export USE_64=1 -export ENABLE_TLS_1_3=1 -export HOST=localhost -export DOMSUF=localhost - -# Build NSS -hg clone https://hg.mozilla.org/projects/nss -hg clone https://hg.mozilla.org/projects/nspr -cd nss -make nss_build_all - -export PLATFORM=`cat $NSS_ROOT/dist/latest` -export DYLD_LIBRARY_PATH=$NSS_ROOT/dist/$PLATFORM/lib -export LD_LIBRARY_PATH=$NSS_ROOT/dist/$PLATFORM/lib - -# Run NSS tests (this creates data for the server to use) -cd tests/ssl_gtests -./ssl_gtests.sh - -# Test with client=mint server=NSS -cd $NSS_ROOT -./dist/$PLATFORM/bin/selfserv -d tests_results/security/$HOST.1/ssl_gtests/ -n rsa -p 4430 -# if you get `NSS_Init failed.`, check the path above, particularly around $HOST -# ... -go run $GOPATH/src/github.com/bifurcation/mint/bin/mint-client/main.go - -# Test with client=NSS server=mint -go run $GOPATH/src/github.com/bifurcation/mint/bin/mint-server/main.go -# ... -cd $NSS_ROOT -dist/$PLATFORM/bin/tstclnt -d tests_results/security/$HOST/ssl_gtests/ -V tls1.3:tls1.3 -h 127.0.0.1 -p 4430 -o -``` - diff --git a/vendor/github.com/bifurcation/mint/client-state-machine.go b/vendor/github.com/bifurcation/mint/client-state-machine.go index 07e7f53f9..d8464a147 100644 --- a/vendor/github.com/bifurcation/mint/client-state-machine.go +++ b/vendor/github.com/bifurcation/mint/client-state-machine.go @@ -89,7 +89,7 @@ func (state clientStateStart) Next(hr handshakeMessageReader) (HandshakeState, [ logf(logTypeHandshake, "opts: %+v", state.Opts) // supported_versions, supported_groups, signature_algorithms, server_name - sv := SupportedVersionsExtension{HandshakeType: HandshakeTypeClientHello, Versions: []uint16{supportedVersion}} + sv := SupportedVersionsExtension{HandshakeType: HandshakeTypeClientHello, Versions: []uint16{tls13Version}} sni := ServerNameExtension(state.Opts.ServerName) sg := SupportedGroupsExtension{Groups: state.Config.Groups} sa := SignatureAlgorithmsExtension{Algorithms: state.Config.SignatureSchemes} @@ -136,6 +136,15 @@ func (state clientStateStart) Next(hr handshakeMessageReader) (HandshakeState, [ } } + if len(state.Config.PSKModes) != 0 { + kem := &PSKKeyExchangeModesExtension{KEModes: state.Config.PSKModes} + err = ch.Extensions.Add(kem) + if err != nil { + logf(logTypeHandshake, "Error adding PSKKeyExchangeModes extension: %v", err) + return nil, nil, AlertInternalError + } + } + // Run the external extension handler. if state.Config.ExtensionHandler != nil { err := state.Config.ExtensionHandler.Send(HandshakeTypeClientHello, &ch.Extensions) @@ -152,7 +161,7 @@ func (state clientStateStart) Next(hr handshakeMessageReader) (HandshakeState, [ var offeredPSK PreSharedKey var earlyHash crypto.Hash var earlySecret []byte - var clientEarlyTrafficKeys keySet + var clientEarlyTrafficKeys KeySet var clientHello *HandshakeMessage if key, ok := state.Config.PSKs.Get(state.Opts.ServerName); ok { offeredPSK = key @@ -190,12 +199,6 @@ func (state clientStateStart) Next(hr handshakeMessageReader) (HandshakeState, [ logf(logTypeHandshake, "PSK selected, but no PSKModes") return nil, nil, AlertInternalError } - kem := &PSKKeyExchangeModesExtension{KEModes: state.Config.PSKModes} - err = ch.Extensions.Add(kem) - if err != nil { - logf(logTypeHandshake, "Error adding PSKKeyExchangeModes extension: %v", err) - return nil, nil, AlertInternalError - } // Add the shim PSK extension to the ClientHello logf(logTypeHandshake, "Adding PSK extension with id = %x", key.Identity) @@ -354,7 +357,7 @@ func (state clientStateWaitSH) Next(hr handshakeMessageReader) (HandshakeState, logf(logTypeHandshake, "[ClientStateWaitSH] no supported_versions extension") return nil, nil, AlertMissingExtension } - if supportedVersions.Versions[0] != supportedVersion { + if supportedVersions.Versions[0] != tls13Version { logf(logTypeHandshake, "[ClientStateWaitSH] unsupported version [%x]", supportedVersions.Versions[0]) return nil, nil, AlertProtocolVersion } @@ -416,6 +419,7 @@ func (state clientStateWaitSH) Next(hr handshakeMessageReader) (HandshakeState, // mode. In DTLS, we also need to bump the sequence number. // This is a pre-existing defect in Mint. Issue #175. logf(logTypeHandshake, "[ClientStateWaitSH] -> [ClientStateStart]") + state.hsCtx.SetVersion(tls12Version) // Everything after this should be 1.2. return clientStateStart{ Config: state.Config, Opts: state.Opts, diff --git a/vendor/github.com/bifurcation/mint/common.go b/vendor/github.com/bifurcation/mint/common.go index 05af3e95b..4d6477aa9 100644 --- a/vendor/github.com/bifurcation/mint/common.go +++ b/vendor/github.com/bifurcation/mint/common.go @@ -6,7 +6,7 @@ import ( ) const ( - supportedVersion uint16 = 0x7f16 // draft-22 + tls13Version uint16 = 0x0304 tls12Version uint16 = 0x0303 tls10Version uint16 = 0x0301 dtls12WireVersion uint16 = 0xfefd @@ -118,7 +118,7 @@ const ( ExtensionTypeSupportedGroups ExtensionType = 10 ExtensionTypeSignatureAlgorithms ExtensionType = 13 ExtensionTypeALPN ExtensionType = 16 - ExtensionTypeKeyShare ExtensionType = 40 + ExtensionTypeKeyShare ExtensionType = 51 ExtensionTypePreSharedKey ExtensionType = 41 ExtensionTypeEarlyData ExtensionType = 42 ExtensionTypeSupportedVersions ExtensionType = 43 diff --git a/vendor/github.com/bifurcation/mint/conn.go b/vendor/github.com/bifurcation/mint/conn.go index 12a99171e..ccf8dc112 100644 --- a/vendor/github.com/bifurcation/mint/conn.go +++ b/vendor/github.com/bifurcation/mint/conn.go @@ -129,6 +129,8 @@ type Config struct { NonBlocking bool UseDTLS bool + RecordLayer RecordLayerFactory + // The same config object can be shared among different connections, so it // needs its own mutex mutex sync.RWMutex @@ -270,28 +272,33 @@ type Conn struct { handshakeComplete bool readBuffer []byte - in, out *RecordLayer + in, out RecordLayer hsCtx *HandshakeContext } func NewConn(conn net.Conn, config *Config, isClient bool) *Conn { c := &Conn{conn: conn, config: config, isClient: isClient, hsCtx: &HandshakeContext{}} if !config.UseDTLS { - c.in = NewRecordLayerTLS(c.conn, directionRead) - c.out = NewRecordLayerTLS(c.conn, directionWrite) + if config.RecordLayer == nil { + c.in = NewRecordLayerTLS(c.conn, DirectionRead) + c.out = NewRecordLayerTLS(c.conn, DirectionWrite) + } else { + c.in = config.RecordLayer.NewLayer(c.conn, DirectionRead) + c.out = config.RecordLayer.NewLayer(c.conn, DirectionWrite) + } c.hsCtx.hIn = NewHandshakeLayerTLS(c.hsCtx, c.in) c.hsCtx.hOut = NewHandshakeLayerTLS(c.hsCtx, c.out) } else { - c.in = NewRecordLayerDTLS(c.conn, directionRead) - c.out = NewRecordLayerDTLS(c.conn, directionWrite) + c.in = NewRecordLayerDTLS(c.conn, DirectionRead) + c.out = NewRecordLayerDTLS(c.conn, DirectionWrite) c.hsCtx.hIn = NewHandshakeLayerDTLS(c.hsCtx, c.in) c.hsCtx.hOut = NewHandshakeLayerDTLS(c.hsCtx, c.out) c.hsCtx.timeoutMS = initialTimeout c.hsCtx.timers = newTimerSet() c.hsCtx.waitingNextFlight = true } - c.in.label = c.label() - c.out.label = c.label() + c.in.SetLabel(c.label()) + c.out.SetLabel(c.label()) c.hsCtx.hIn.nonblocking = c.config.NonBlocking return c } @@ -598,7 +605,7 @@ func (c *Conn) takeAction(actionGeneric HandshakeAction) Alert { logf(logTypeHandshake, "%s Rekey with data still in handshake buffers", label) return AlertDecodeError } - err := c.in.Rekey(action.epoch, action.KeySet.cipher, action.KeySet.key, action.KeySet.iv) + err := c.in.Rekey(action.epoch, action.KeySet.Cipher, &action.KeySet) if err != nil { logf(logTypeHandshake, "%s Unable to rekey inbound: %v", label, err) return AlertInternalError @@ -606,7 +613,7 @@ func (c *Conn) takeAction(actionGeneric HandshakeAction) Alert { case RekeyOut: logf(logTypeHandshake, "%s Rekeying out to %s: %+v", label, action.epoch.label(), action.KeySet) - err := c.out.Rekey(action.epoch, action.KeySet.cipher, action.KeySet.key, action.KeySet.iv) + err := c.out.Rekey(action.epoch, action.KeySet.Cipher, &action.KeySet) if err != nil { logf(logTypeHandshake, "%s Unable to rekey outbound: %v", label, err) return AlertInternalError @@ -906,7 +913,7 @@ func (c *Conn) Writable() bool { } // If we're a client in 0-RTT, then we're writable. - if c.isClient && c.out.cipher.epoch == EpochEarlyData { + if c.isClient && c.out.Epoch() == EpochEarlyData { return true } diff --git a/vendor/github.com/bifurcation/mint/crypto.go b/vendor/github.com/bifurcation/mint/crypto.go index ef7397d82..18ea8b37e 100644 --- a/vendor/github.com/bifurcation/mint/crypto.go +++ b/vendor/github.com/bifurcation/mint/crypto.go @@ -27,11 +27,11 @@ import ( var prng = rand.Reader -type aeadFactory func(key []byte) (cipher.AEAD, error) +type AeadFactory func(key []byte) (cipher.AEAD, error) type CipherSuiteParams struct { Suite CipherSuite - Cipher aeadFactory // Cipher factory + Cipher AeadFactory // Cipher factory Hash crypto.Hash // Hash function KeyLen int // Key length in octets IvLen int // IV length in octets @@ -546,8 +546,10 @@ const ( // opaque label<9..255>; // opaque hash_value<0..255>; // }; +var HkdfLabelPrefix = "tls13 " + func hkdfEncodeLabel(labelIn string, hashValue []byte, outLen int) []byte { - label := "tls13 " + labelIn + label := HkdfLabelPrefix + labelIn labelLen := len(label) hashLen := len(hashValue) @@ -604,18 +606,20 @@ func computeFinishedData(params CipherSuiteParams, baseKey []byte, input []byte) return mac.Sum(nil) } -type keySet struct { - cipher aeadFactory - key []byte - iv []byte +type KeySet struct { + Cipher AeadFactory + Key []byte + Iv []byte + Pn []byte } -func makeTrafficKeys(params CipherSuiteParams, secret []byte) keySet { +func makeTrafficKeys(params CipherSuiteParams, secret []byte) KeySet { logf(logTypeCrypto, "making traffic keys: secret=%x", secret) - return keySet{ - cipher: params.Cipher, - key: HkdfExpandLabel(params.Hash, secret, "key", []byte{}, params.KeyLen), - iv: HkdfExpandLabel(params.Hash, secret, "iv", []byte{}, params.IvLen), + return KeySet{ + Cipher: params.Cipher, + Key: HkdfExpandLabel(params.Hash, secret, "key", []byte{}, params.KeyLen), + Iv: HkdfExpandLabel(params.Hash, secret, "iv", []byte{}, params.IvLen), + Pn: HkdfExpandLabel(params.Hash, secret, "pn", []byte{}, params.KeyLen), } } diff --git a/vendor/github.com/bifurcation/mint/handshake-layer.go b/vendor/github.com/bifurcation/mint/handshake-layer.go index de17b30bf..170286b17 100644 --- a/vendor/github.com/bifurcation/mint/handshake-layer.go +++ b/vendor/github.com/bifurcation/mint/handshake-layer.go @@ -120,7 +120,7 @@ func (h *HandshakeLayer) HandshakeMessageFromBody(body HandshakeMessageBody) (*H type HandshakeLayer struct { ctx *HandshakeContext // The handshake we are attached to nonblocking bool // Should we operate in nonblocking mode - conn *RecordLayer // Used for reading/writing records + conn RecordLayer // Used for reading/writing records frame *frameReader // The buffered frame reader datagram bool // Is this DTLS? msgSeq uint32 // The DTLS message sequence number @@ -153,7 +153,7 @@ func (d handshakeLayerFrameDetails) frameLen(hdr []byte) (int, error) { return int(val), nil } -func NewHandshakeLayerTLS(c *HandshakeContext, r *RecordLayer) *HandshakeLayer { +func NewHandshakeLayerTLS(c *HandshakeContext, r RecordLayer) *HandshakeLayer { h := HandshakeLayer{} h.ctx = c h.conn = r @@ -163,7 +163,7 @@ func NewHandshakeLayerTLS(c *HandshakeContext, r *RecordLayer) *HandshakeLayer { return &h } -func NewHandshakeLayerDTLS(c *HandshakeContext, r *RecordLayer) *HandshakeLayer { +func NewHandshakeLayerDTLS(c *HandshakeContext, r RecordLayer) *HandshakeLayer { h := HandshakeLayer{} h.ctx = c h.conn = r @@ -174,8 +174,15 @@ func NewHandshakeLayerDTLS(c *HandshakeContext, r *RecordLayer) *HandshakeLayer } func (h *HandshakeLayer) readRecord() error { - logf(logTypeVerbose, "Trying to read record") - pt, err := h.conn.readRecordAnyEpoch() + var pt *TLSPlaintext + var err error + + if h.datagram { + logf(logTypeVerbose, "Trying to read record") + pt, err = h.conn.(*RecordLayerImpl).ReadRecordAnyEpoch() + } else { + pt, err = h.conn.ReadRecord() + } if err != nil { return err } @@ -204,7 +211,7 @@ func (h *HandshakeLayer) readRecord() error { } assert(h.ctx.hIn.conn != nil) - if pt.epoch != h.ctx.hIn.conn.cipher.epoch { + if pt.epoch != h.ctx.hIn.conn.Epoch() { // This is out of order but we're dropping it. // TODO(ekr@rtfm.com): If server, need to retransmit Finished. if pt.epoch == EpochClear || pt.epoch == EpochHandshakeData { @@ -394,9 +401,13 @@ func (h *HandshakeLayer) ReadMessage() (*HandshakeMessage, error) { } func (h *HandshakeLayer) QueueMessage(hm *HandshakeMessage) error { - hm.cipher = h.conn.cipher - h.queued = append(h.queued, hm) - return nil + if h.datagram { + hm.cipher = h.conn.(*RecordLayerImpl).cipher + h.queued = append(h.queued, hm) + return nil + } + _, err := h.WriteMessages([]*HandshakeMessage{hm}) + return err } func (h *HandshakeLayer) SendQueuedMessages() (int, error) { @@ -456,22 +467,30 @@ func (h *HandshakeLayer) writeFragment(hm *HandshakeMessage, start int, room int buf = body } + var err error if h.datagram { // Remember that we sent this. h.ctx.sentFragments = append(h.ctx.sentFragments, &SentHandshakeFragment{ hm.seq, start, len(body), - h.conn.cipher.combineSeq(true), + h.conn.(*RecordLayerImpl).cipher.combineSeq(true), false, }) + err = h.conn.(*RecordLayerImpl).writeRecordWithPadding( + &TLSPlaintext{ + contentType: RecordTypeHandshake, + fragment: buf, + }, + hm.cipher, 0) + } else { + err = h.conn.WriteRecord( + &TLSPlaintext{ + contentType: RecordTypeHandshake, + fragment: buf, + }) } - return true, start + bodylen, h.conn.writeRecordWithPadding( - &TLSPlaintext{ - contentType: RecordTypeHandshake, - fragment: buf, - }, - hm.cipher, 0) + return true, start + bodylen, err } func (h *HandshakeLayer) WriteMessage(hm *HandshakeMessage) (int, error) { diff --git a/vendor/github.com/bifurcation/mint/handshake-messages.go b/vendor/github.com/bifurcation/mint/handshake-messages.go index 5a229f1d0..e8e3aef8e 100644 --- a/vendor/github.com/bifurcation/mint/handshake-messages.go +++ b/vendor/github.com/bifurcation/mint/handshake-messages.go @@ -406,7 +406,7 @@ func (cr *CertificateRequestBody) Unmarshal(data []byte) (int, error) { type NewSessionTicketBody struct { TicketLifetime uint32 TicketAgeAdd uint32 - TicketNonce []byte `tls:"head=1,min=1"` + TicketNonce []byte `tls:"head=1"` Ticket []byte `tls:"head=2,min=1"` Extensions ExtensionList `tls:"head=2"` } diff --git a/vendor/github.com/bifurcation/mint/mint.svg b/vendor/github.com/bifurcation/mint/mint.svg deleted file mode 100644 index ae32703d1..000000000 --- a/vendor/github.com/bifurcation/mint/mint.svg +++ /dev/null @@ -1,101 +0,0 @@ - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/bifurcation/mint/negotiation.go b/vendor/github.com/bifurcation/mint/negotiation.go index 2c80b8d74..74639538c 100644 --- a/vendor/github.com/bifurcation/mint/negotiation.go +++ b/vendor/github.com/bifurcation/mint/negotiation.go @@ -9,9 +9,9 @@ import ( func VersionNegotiation(offered, supported []uint16) (bool, uint16) { for _, offeredVersion := range offered { - for _, supportedVersion := range supported { - logf(logTypeHandshake, "[server] version offered by client [%04x] <> [%04x]", offeredVersion, supportedVersion) - if offeredVersion == supportedVersion { + for _, tls13Version := range supported { + logf(logTypeHandshake, "[server] version offered by client [%04x] <> [%04x]", offeredVersion, tls13Version) + if offeredVersion == tls13Version { // XXX: Should probably be highest supported version, but for now, we // only support one version, so it doesn't really matter. return true, offeredVersion diff --git a/vendor/github.com/bifurcation/mint/record-layer.go b/vendor/github.com/bifurcation/mint/record-layer.go index 5cf8ae2cb..338f339ff 100644 --- a/vendor/github.com/bifurcation/mint/record-layer.go +++ b/vendor/github.com/bifurcation/mint/record-layer.go @@ -20,11 +20,11 @@ func (err DecryptError) Error() string { return string(err) } -type direction uint8 +type Direction uint8 const ( - directionWrite = direction(1) - directionRead = direction(2) + DirectionWrite = Direction(1) + DirectionRead = Direction(2) ) // struct { @@ -42,6 +42,18 @@ type TLSPlaintext struct { fragment []byte } +func NewTLSPlaintext(ct RecordType, epoch Epoch, fragment []byte) *TLSPlaintext { + return &TLSPlaintext{ + contentType: ct, + epoch: epoch, + fragment: fragment, + } +} + +func (t TLSPlaintext) Fragment() []byte { + return t.fragment +} + type cipherState struct { epoch Epoch // DTLS epoch ivLength int // Length of the seq and nonce fields @@ -50,10 +62,28 @@ type cipherState struct { cipher cipher.AEAD // AEAD cipher } -type RecordLayer struct { +type RecordLayerFactory interface { + NewLayer(conn io.ReadWriter, dir Direction) RecordLayer +} + +type RecordLayer interface { + Lock() + Unlock() + SetVersion(v uint16) + SetLabel(s string) + Rekey(epoch Epoch, factory AeadFactory, keys *KeySet) error + ResetClear(seq uint64) + DiscardReadKey(epoch Epoch) + PeekRecordType(block bool) (RecordType, error) + ReadRecord() (*TLSPlaintext, error) + WriteRecord(pt *TLSPlaintext) error + Epoch() Epoch +} + +type RecordLayerImpl struct { sync.Mutex label string - direction direction + direction Direction version uint16 // The current version number conn io.ReadWriter // The underlying connection frame *frameReader // The buffered frame reader @@ -67,6 +97,10 @@ type RecordLayer struct { datagram bool } +func (r *RecordLayerImpl) Impl() *RecordLayerImpl { + return r +} + type recordLayerFrameDetails struct { datagram bool } @@ -90,7 +124,7 @@ func newCipherStateNull() *cipherState { return &cipherState{EpochClear, 0, 0, nil, nil} } -func newCipherStateAead(epoch Epoch, factory aeadFactory, key []byte, iv []byte) (*cipherState, error) { +func newCipherStateAead(epoch Epoch, factory AeadFactory, key []byte, iv []byte) (*cipherState, error) { cipher, err := factory(key) if err != nil { return nil, err @@ -99,8 +133,8 @@ func newCipherStateAead(epoch Epoch, factory aeadFactory, key []byte, iv []byte) return &cipherState{epoch, len(iv), 0, iv, cipher}, nil } -func NewRecordLayerTLS(conn io.ReadWriter, dir direction) *RecordLayer { - r := RecordLayer{} +func NewRecordLayerTLS(conn io.ReadWriter, dir Direction) *RecordLayerImpl { + r := RecordLayerImpl{} r.label = "" r.direction = dir r.conn = conn @@ -110,8 +144,8 @@ func NewRecordLayerTLS(conn io.ReadWriter, dir direction) *RecordLayer { return &r } -func NewRecordLayerDTLS(conn io.ReadWriter, dir direction) *RecordLayer { - r := RecordLayer{} +func NewRecordLayerDTLS(conn io.ReadWriter, dir Direction) *RecordLayerImpl { + r := RecordLayerImpl{} r.label = "" r.direction = dir r.conn = conn @@ -123,29 +157,37 @@ func NewRecordLayerDTLS(conn io.ReadWriter, dir direction) *RecordLayer { return &r } -func (r *RecordLayer) SetVersion(v uint16) { +func (r *RecordLayerImpl) SetVersion(v uint16) { r.version = v } -func (r *RecordLayer) ResetClear(seq uint64) { +func (r *RecordLayerImpl) ResetClear(seq uint64) { r.cipher = newCipherStateNull() r.cipher.seq = seq } -func (r *RecordLayer) Rekey(epoch Epoch, factory aeadFactory, key []byte, iv []byte) error { - cipher, err := newCipherStateAead(epoch, factory, key, iv) +func (r *RecordLayerImpl) Epoch() Epoch { + return r.cipher.epoch +} + +func (r *RecordLayerImpl) SetLabel(s string) { + r.label = s +} + +func (r *RecordLayerImpl) Rekey(epoch Epoch, factory AeadFactory, keys *KeySet) error { + cipher, err := newCipherStateAead(epoch, factory, keys.Key, keys.Iv) if err != nil { return err } r.cipher = cipher - if r.datagram && r.direction == directionRead { + if r.datagram && r.direction == DirectionRead { r.readCiphers[epoch] = cipher } return nil } // TODO(ekr@rtfm.com): This is never used, which is a bug. -func (r *RecordLayer) DiscardReadKey(epoch Epoch) { +func (r *RecordLayerImpl) DiscardReadKey(epoch Epoch) { if !r.datagram { return } @@ -197,34 +239,29 @@ func (c *cipherState) overhead() int { return c.cipher.Overhead() } -func (r *RecordLayer) encrypt(cipher *cipherState, seq uint64, pt *TLSPlaintext, padLen int) *TLSPlaintext { - assert(r.direction == directionWrite) +func (r *RecordLayerImpl) encrypt(cipher *cipherState, seq uint64, header []byte, pt *TLSPlaintext, padLen int) []byte { + assert(r.direction == DirectionWrite) logf(logTypeIO, "%s Encrypt seq=[%x]", r.label, seq) // Expand the fragment to hold contentType, padding, and overhead originalLen := len(pt.fragment) plaintextLen := originalLen + 1 + padLen ciphertextLen := plaintextLen + cipher.overhead() - // Assemble the revised plaintext - out := &TLSPlaintext{ - - contentType: RecordTypeApplicationData, - fragment: make([]byte, ciphertextLen), - } - copy(out.fragment, pt.fragment) - out.fragment[originalLen] = byte(pt.contentType) + ciphertext := make([]byte, ciphertextLen) + copy(ciphertext, pt.fragment) + ciphertext[originalLen] = byte(pt.contentType) for i := 1; i <= padLen; i++ { - out.fragment[originalLen+i] = 0 + ciphertext[originalLen+i] = 0 } // Encrypt the fragment - payload := out.fragment[:plaintextLen] - cipher.cipher.Seal(payload[:0], cipher.computeNonce(seq), payload, nil) - return out + payload := ciphertext[:plaintextLen] + cipher.cipher.Seal(payload[:0], cipher.computeNonce(seq), payload, header) + return ciphertext } -func (r *RecordLayer) decrypt(pt *TLSPlaintext, seq uint64) (*TLSPlaintext, int, error) { - assert(r.direction == directionRead) +func (r *RecordLayerImpl) decrypt(seq uint64, header []byte, pt *TLSPlaintext) (*TLSPlaintext, int, error) { + assert(r.direction == DirectionRead) logf(logTypeIO, "%s Decrypt seq=[%x]", r.label, seq) if len(pt.fragment) < r.cipher.overhead() { msg := fmt.Sprintf("tls.record.decrypt: Record too short [%d] < [%d]", len(pt.fragment), r.cipher.overhead()) @@ -238,7 +275,7 @@ func (r *RecordLayer) decrypt(pt *TLSPlaintext, seq uint64) (*TLSPlaintext, int, } // Decrypt - _, err := r.cipher.cipher.Open(out.fragment[:0], r.cipher.computeNonce(seq), pt.fragment, nil) + _, err := r.cipher.cipher.Open(out.fragment[:0], r.cipher.computeNonce(seq), pt.fragment, header) if err != nil { logf(logTypeIO, "%s AEAD decryption failure [%x]", r.label, pt) return nil, 0, DecryptError("tls.record.decrypt: AEAD decrypt failed") @@ -259,7 +296,7 @@ func (r *RecordLayer) decrypt(pt *TLSPlaintext, seq uint64) (*TLSPlaintext, int, return out, padLen, nil } -func (r *RecordLayer) PeekRecordType(block bool) (RecordType, error) { +func (r *RecordLayerImpl) PeekRecordType(block bool) (RecordType, error) { var pt *TLSPlaintext var err error @@ -275,7 +312,7 @@ func (r *RecordLayer) PeekRecordType(block bool) (RecordType, error) { return pt.contentType, nil } -func (r *RecordLayer) ReadRecord() (*TLSPlaintext, error) { +func (r *RecordLayerImpl) ReadRecord() (*TLSPlaintext, error) { pt, err := r.nextRecord(false) // Consume the cached record if there was one @@ -285,7 +322,7 @@ func (r *RecordLayer) ReadRecord() (*TLSPlaintext, error) { return pt, err } -func (r *RecordLayer) readRecordAnyEpoch() (*TLSPlaintext, error) { +func (r *RecordLayerImpl) ReadRecordAnyEpoch() (*TLSPlaintext, error) { pt, err := r.nextRecord(true) // Consume the cached record if there was one @@ -295,7 +332,7 @@ func (r *RecordLayer) readRecordAnyEpoch() (*TLSPlaintext, error) { return pt, err } -func (r *RecordLayer) nextRecord(allowOldEpoch bool) (*TLSPlaintext, error) { +func (r *RecordLayerImpl) nextRecord(allowOldEpoch bool) (*TLSPlaintext, error) { cipher := r.cipher if r.cachedRecord != nil { logf(logTypeIO, "%s Returning cached record", r.label) @@ -391,7 +428,7 @@ func (r *RecordLayer) nextRecord(allowOldEpoch bool) (*TLSPlaintext, error) { if cipher.cipher != nil { logf(logTypeIO, "%s RecordLayer.ReadRecord epoch=[%s] seq=[%x] [%d] ciphertext=[%x]", r.label, cipher.epoch.label(), seq, pt.contentType, pt.fragment) - pt, _, err = r.decrypt(pt, seq) + pt, _, err = r.decrypt(seq, header, pt) if err != nil { logf(logTypeIO, "%s Decryption failed", r.label) return nil, err @@ -411,46 +448,58 @@ func (r *RecordLayer) nextRecord(allowOldEpoch bool) (*TLSPlaintext, error) { return pt, nil } -func (r *RecordLayer) WriteRecord(pt *TLSPlaintext) error { +func (r *RecordLayerImpl) WriteRecord(pt *TLSPlaintext) error { return r.writeRecordWithPadding(pt, r.cipher, 0) } -func (r *RecordLayer) WriteRecordWithPadding(pt *TLSPlaintext, padLen int) error { +func (r *RecordLayerImpl) WriteRecordWithPadding(pt *TLSPlaintext, padLen int) error { return r.writeRecordWithPadding(pt, r.cipher, padLen) } -func (r *RecordLayer) writeRecordWithPadding(pt *TLSPlaintext, cipher *cipherState, padLen int) error { +func (r *RecordLayerImpl) writeRecordWithPadding(pt *TLSPlaintext, cipher *cipherState, padLen int) error { seq := cipher.combineSeq(r.datagram) - if cipher.cipher != nil { - logf(logTypeIO, "%s RecordLayer.WriteRecord epoch=[%s] seq=[%x] [%d] plaintext=[%x]", r.label, cipher.epoch.label(), cipher.seq, pt.contentType, pt.fragment) - pt = r.encrypt(cipher, seq, pt, padLen) - } else if padLen > 0 { - return fmt.Errorf("tls.record: Padding can only be done on encrypted records") - } - - if len(pt.fragment) > maxFragmentLen { - return fmt.Errorf("tls.record: Record size too big") - } - length := len(pt.fragment) + var contentType RecordType + if cipher.cipher != nil { + length += 1 + padLen + cipher.cipher.Overhead() + contentType = RecordTypeApplicationData + } else { + contentType = pt.contentType + } var header []byte if !r.datagram { - header = []byte{byte(pt.contentType), + header = []byte{byte(contentType), byte(r.version >> 8), byte(r.version & 0xff), byte(length >> 8), byte(length)} } else { header = make([]byte, 13) version := dtlsConvertVersion(r.version) - copy(header, []byte{byte(pt.contentType), + copy(header, []byte{byte(contentType), byte(version >> 8), byte(version & 0xff), }) encodeUint(seq, 8, header[3:]) encodeUint(uint64(length), 2, header[11:]) } - record := append(header, pt.fragment...) - logf(logTypeIO, "%s RecordLayer.WriteRecord epoch=[%s] seq=[%x] [%d] ciphertext=[%x]", r.label, cipher.epoch.label(), cipher.seq, pt.contentType, pt.fragment) + var ciphertext []byte + if cipher.cipher != nil { + logf(logTypeIO, "%s RecordLayer.WriteRecord epoch=[%s] seq=[%x] [%d] plaintext=[%x]", r.label, cipher.epoch.label(), cipher.seq, pt.contentType, pt.fragment) + ciphertext = r.encrypt(cipher, seq, header, pt, padLen) + } else { + if padLen > 0 { + return fmt.Errorf("tls.record: Padding can only be done on encrypted records") + } + ciphertext = pt.fragment + } + + if len(ciphertext) > maxFragmentLen { + return fmt.Errorf("tls.record: Record size too big") + } + + record := append(header, ciphertext...) + + logf(logTypeIO, "%s RecordLayer.WriteRecord epoch=[%s] seq=[%x] [%d] ciphertext=[%x]", r.label, cipher.epoch.label(), cipher.seq, contentType, ciphertext) cipher.incrementSequenceNumber() _, err := r.conn.Write(record) diff --git a/vendor/github.com/bifurcation/mint/server-state-machine.go b/vendor/github.com/bifurcation/mint/server-state-machine.go index f91b22e4c..e392a5d2e 100644 --- a/vendor/github.com/bifurcation/mint/server-state-machine.go +++ b/vendor/github.com/bifurcation/mint/server-state-machine.go @@ -163,7 +163,7 @@ func (state serverStateStart) Next(hr handshakeMessageReader) (HandshakeState, [ logf(logTypeHandshake, "[ServerStateStart] Client did not send supported_versions") return nil, nil, AlertProtocolVersion } - versionOK, _ := VersionNegotiation(supportedVersions.Versions, []uint16{supportedVersion}) + versionOK, _ := VersionNegotiation(supportedVersions.Versions, []uint16{tls13Version}) if !versionOK { logf(logTypeHandshake, "[ServerStateStart] Client does not support the same version") return nil, nil, AlertProtocolVersion @@ -416,7 +416,7 @@ func (state *serverStateStart) generateHRR(cs CipherSuite, legacySessionId []byt sv := &SupportedVersionsExtension{ HandshakeType: HandshakeTypeServerHello, - Versions: []uint16{supportedVersion}, + Versions: []uint16{tls13Version}, } if err := hrr.Extensions.Add(sv); err != nil { @@ -483,7 +483,7 @@ func (state serverStateNegotiated) Next(_ handshakeMessageReader) (HandshakeStat err := sh.Extensions.Add(&SupportedVersionsExtension{ HandshakeType: HandshakeTypeServerHello, - Versions: []uint16{supportedVersion}, + Versions: []uint16{tls13Version}, }) if err != nil { logf(logTypeHandshake, "[ServerStateNegotiated] Error adding supported_versions extension [%v]", err) @@ -785,7 +785,7 @@ func (state serverStateWaitEOED) State() State { func (state serverStateWaitEOED) Next(hr handshakeMessageReader) (HandshakeState, []HandshakeAction, Alert) { for { logf(logTypeHandshake, "Server reading early data...") - assert(state.hsCtx.hIn.conn.cipher.epoch == EpochEarlyData) + assert(state.hsCtx.hIn.conn.Epoch() == EpochEarlyData) t, err := state.hsCtx.hIn.conn.PeekRecordType(!state.hsCtx.hIn.nonblocking) if err == AlertWouldBlock { return nil, nil, AlertWouldBlock diff --git a/vendor/github.com/bifurcation/mint/state-machine.go b/vendor/github.com/bifurcation/mint/state-machine.go index 558b76cc5..f0635c8aa 100644 --- a/vendor/github.com/bifurcation/mint/state-machine.go +++ b/vendor/github.com/bifurcation/mint/state-machine.go @@ -19,12 +19,12 @@ type SendEarlyData struct{} type RekeyIn struct { epoch Epoch - KeySet keySet + KeySet KeySet } type RekeyOut struct { epoch Epoch - KeySet keySet + KeySet KeySet } type ResetOut struct { @@ -111,7 +111,7 @@ func (state stateConnected) State() State { } func (state *stateConnected) KeyUpdate(request KeyUpdateRequest) ([]HandshakeAction, Alert) { - var trafficKeys keySet + var trafficKeys KeySet if state.isClient { state.clientTrafficSecret = HkdfExpandLabel(state.cryptoParams.Hash, state.clientTrafficSecret, labelClientApplicationTrafficSecret, []byte{}, state.cryptoParams.Hash.Size()) @@ -196,7 +196,7 @@ func (state stateConnected) ProcessMessage(hm *HandshakeMessage) (HandshakeState switch body := bodyGeneric.(type) { case *KeyUpdateBody: - var trafficKeys keySet + var trafficKeys KeySet if !state.isClient { state.clientTrafficSecret = HkdfExpandLabel(state.cryptoParams.Hash, state.clientTrafficSecret, labelClientApplicationTrafficSecret, []byte{}, state.cryptoParams.Hash.Size()) diff --git a/vendor/github.com/bifurcation/mint/syntax/README.md b/vendor/github.com/bifurcation/mint/syntax/README.md deleted file mode 100644 index 537b9b4e8..000000000 --- a/vendor/github.com/bifurcation/mint/syntax/README.md +++ /dev/null @@ -1,84 +0,0 @@ -TLS Syntax -========== - -TLS defines [its own syntax](https://tlswg.github.io/tls13-spec/#rfc.section.3) -for describing structures used in that protocol. To facilitate experimentation -with TLS in Go, this module maps that syntax to the Go structure syntax, taking -advantage of Go's type annotations to encode non-type information carried in the -TLS presentation format. - -For example, in the TLS specification, a ClientHello message has the following -structure: - -~~~~~ -uint16 ProtocolVersion; -opaque Random[32]; -uint8 CipherSuite[2]; -enum { ... (65535)} ExtensionType; - -struct { - ExtensionType extension_type; - opaque extension_data<0..2^16-1>; -} Extension; - -struct { - ProtocolVersion legacy_version = 0x0303; /* TLS v1.2 */ - Random random; - opaque legacy_session_id<0..32>; - CipherSuite cipher_suites<2..2^16-2>; - opaque legacy_compression_methods<1..2^8-1>; - Extension extensions<0..2^16-1>; -} ClientHello; -~~~~~ - -This maps to the following Go type definitions: - -~~~~~ -type protocolVersion uint16 -type random [32]byte -type cipherSuite uint16 // or [2]byte -type extensionType uint16 - -type extension struct { - ExtensionType extensionType - ExtensionData []byte `tls:"head=2"` -} - -type clientHello struct { - LegacyVersion protocolVersion - Random random - LegacySessionID []byte `tls:"head=1,max=32"` - CipherSuites []cipherSuite `tls:"head=2,min=2"` - LegacyCompressionMethods []byte `tls:"head=1,min=1"` - Extensions []extension `tls:"head=2"` -} -~~~~~ - -Then you can just declare, marshal, and unmarshal structs just like you would -with, say JSON. - -The available annotations right now are all related to vectors: - -* `head`: The number of bytes of length to use as a "header" -* `min`: The minimum length of the vector, in bytes -* `max`: The maximum length of the vector, in bytes - -## Not supported - -* The `select()` syntax for creating alternate version of the same struct (see, - e.g., the KeyShare extension) - -* The backreference syntax for array lengths or select parameters, as in `opaque - fragment[TLSPlaintext.length]`. Note, however, that in cases where the length - immediately preceds the array, these can be reframed as vectors with - appropriate sizes. - - -QUIC Extensions Syntax -====================== -syntax also supports some minor extensions to allow implementing QUIC. - -* The `varint` annotation describes a QUIC-style varint -* `head=none` means no header, i.e., the bytes are encoded directly on the wire. - On reading, the decoder will consume all available data. -* `head=varint` means to encode the header as a varint diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index 47a6a46ec..000000000 --- a/vendor/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.7 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/README.md deleted file mode 100644 index 55ebc98fc..000000000 --- a/vendor/github.com/cenkalti/backoff/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## Usage - -See https://godoc.org/github.com/cenkalti/backoff#pkg-examples - -## Contributing - -* I would like to keep this library as small as possible. -* Please don't send a PR without opening an issue and discussing it first. -* If proposed change is not a common use case, I will probably not accept it. - -[godoc]: https://godoc.org/github.com/cenkalti/backoff -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master - -[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c83641619..bc52e96f2 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a6589a..792994785 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) -var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) +type flag uintptr - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return } } - - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() - } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3d5..205c28d68 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582a7..f78d89fc1 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875bac..b04edb7d7 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore deleted file mode 100644 index 80bed650e..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin - - diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml deleted file mode 100644 index 1027f56cd..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -script: - - go vet ./... - - go test -v ./... - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md deleted file mode 100644 index 7fc1f793c..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ /dev/null @@ -1,97 +0,0 @@ -## Migration Guide from v2 -> v3 - -Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. - -### `Token.Claims` is now an interface type - -The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. - -`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. - -The old example for parsing a token looked like this.. - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is now directly mapped to... - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. - -```go - type MyCustomClaims struct { - User string - *StandardClaims - } - - if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { - claims := token.Claims.(*MyCustomClaims) - fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) - } -``` - -### `ParseFromRequest` has been moved - -To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. - -`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. - -This simple parsing example: - -```go - if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is directly mapped to: - -```go - if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -There are several concrete `Extractor` types provided for your convenience: - -* `HeaderExtractor` will search a list of headers until one contains content. -* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. -* `MultiExtractor` will try a list of `Extractors` in order until one returns content. -* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. -* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument -* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header - - -### RSA signing methods no longer accept `[]byte` keys - -Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. - -To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. - -```go - func keyLookupFunc(*Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // Look up key - key, err := lookupPublicKey(token.Header["kid"]) - if err != nil { - return nil, err - } - - // Unpack key from PEM encoded PKCS8 - return jwt.ParseRSAPublicKeyFromPEM(key) - } -``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md deleted file mode 100644 index d358d881b..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# jwt-go - -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) -[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) - -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) - -**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. - -**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. - -**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Examples - -See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: - -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) -* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go - -## Compliance - -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: - -* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. - -**BREAKING CHANGES:*** -* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### Signing Methods and Key Types - -Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: - -* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md deleted file mode 100644 index 637029831..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ /dev/null @@ -1,118 +0,0 @@ -## `jwt-go` Version History - -#### 3.2.0 - -* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation -* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate -* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. -* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. - -#### 3.1.0 - -* Improvements to `jwt` command line tool -* Added `SkipClaimsValidation` option to `Parser` -* Documentation updates - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md deleted file mode 100644 index 8954ed017..000000000 --- a/vendor/github.com/docker/docker/api/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Working on the Engine API - -The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. - -It consists of various components in this repository: - -- `api/swagger.yaml` A Swagger definition of the API. -- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. -- `cli/` The command-line client. -- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. -- `daemon/` The daemon, which serves the API. - -## Swagger definition - -The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: - -1. To automatically generate documentation. -2. To automatically generate the Go server and client. (A work-in-progress.) -3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. - -## Updating the API documentation - -The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation. - -The file is split into two main sections: - -- `definitions`, which defines re-usable objects used in requests and responses -- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) - -To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. - -There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919) - -`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing. - -## Viewing the API documentation - -When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. - -Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. - -The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml deleted file mode 100644 index f07a02737..000000000 --- a/vendor/github.com/docker/docker/api/swagger-gen.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -layout: - models: - - name: definition - source: asset:model - target: "{{ joinFilePath .Target .ModelPackage }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" - operations: - - name: handler - source: asset:serverOperation - target: "{{ joinFilePath .Target .APIPackage .Package }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml deleted file mode 100644 index 36927b5a0..000000000 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ /dev/null @@ -1,8238 +0,0 @@ -# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. -# -# This is used for generating API documentation and the types used by the -# client/server. See api/README.md for more information. -# -# Some style notes: -# - This file is used by ReDoc, which allows GitHub Flavored Markdown in -# descriptions. -# - There is no maximum line length, for ease of editing and pretty diffs. -# - operationIds are in the format "NounVerb", with a singular noun. - -swagger: "2.0" -schemes: - - "http" - - "https" -produces: - - "application/json" - - "text/plain" -consumes: - - "application/json" - - "text/plain" -basePath: "/v1.29" -info: - title: "Docker Engine API" - version: "1.29" - x-logo: - url: "https://docs.docker.com/images/logo-docker-main.png" - description: | - The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. - - Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. - - # Errors - - The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: - - ``` - { - "message": "page not found" - } - ``` - - # Versioning - - The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. - - For Docker Engine 17.05, the API version is 1.29. To lock to this version, you prefix the URL with `/v1.29`. For example, calling `/info` is the same as calling `/v1.29/info`. - - Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. - - In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. - - The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. - - This documentation is for version 1.29 of the API, which was introduced with Docker 17.05. Use this table to find documentation for previous versions of the API: - - Docker version | API version | Changes - ----------------|-------------|--------- - 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes) - 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes) - 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) - 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) - 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) - 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) - 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) - 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) - 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) - 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) - 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) - - # Authentication - - Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: - - ``` - { - "username": "string", - "password": "string", - "email": "string", - "serveraddress": "string" - } - ``` - - The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. - - If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: - - ``` - { - "identitytoken": "9cbaf023786cd7..." - } - ``` - -# The tags on paths define the menu sections in the ReDoc documentation, so -# the usage of tags must make sense for that: -# - They should be singular, not plural. -# - There should not be too many tags, or the menu becomes unwieldy. For -# example, it is preferable to add a path to the "System" tag instead of -# creating a tag with a single path in it. -# - The order of tags in this list defines the order in the menu. -tags: - # Primary objects - - name: "Container" - x-displayName: "Containers" - description: | - Create and manage containers. - - name: "Image" - x-displayName: "Images" - - name: "Network" - x-displayName: "Networks" - description: | - Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. - - name: "Volume" - x-displayName: "Volumes" - description: | - Create and manage persistent storage that can be attached to containers. - - name: "Exec" - x-displayName: "Exec" - description: | - Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. - - To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. - # Swarm things - - name: "Swarm" - x-displayName: "Swarm" - description: | - Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - - name: "Node" - x-displayName: "Nodes" - description: | - Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Service" - x-displayName: "Services" - description: | - Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Task" - x-displayName: "Tasks" - description: | - A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Secret" - x-displayName: "Secrets" - description: | - Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - # System things - - name: "Plugin" - x-displayName: "Plugins" - - name: "System" - x-displayName: "System" - -definitions: - Port: - type: "object" - description: "An open port on a container" - required: [PrivatePort, Type] - properties: - IP: - type: "string" - format: "ip-address" - PrivatePort: - type: "integer" - format: "uint16" - x-nullable: false - description: "Port on the container" - PublicPort: - type: "integer" - format: "uint16" - description: "Port exposed on the host" - Type: - type: "string" - x-nullable: false - enum: ["tcp", "udp"] - example: - PrivatePort: 8080 - PublicPort: 80 - Type: "tcp" - - MountPoint: - type: "object" - description: "A mount point inside a container" - properties: - Type: - type: "string" - Name: - type: "string" - Source: - type: "string" - Destination: - type: "string" - Driver: - type: "string" - Mode: - type: "string" - RW: - type: "boolean" - Propagation: - type: "string" - - DeviceMapping: - type: "object" - description: "A device mapping between the host and container" - properties: - PathOnHost: - type: "string" - PathInContainer: - type: "string" - CgroupPermissions: - type: "string" - example: - PathOnHost: "/dev/deviceName" - PathInContainer: "/dev/deviceName" - CgroupPermissions: "mrw" - - ThrottleDevice: - type: "object" - properties: - Path: - description: "Device path" - type: "string" - Rate: - description: "Rate" - type: "integer" - format: "int64" - minimum: 0 - - Mount: - type: "object" - properties: - Target: - description: "Container path." - type: "string" - Source: - description: "Mount source (e.g. a volume name, a host path)." - Type: - description: | - The mount type. Available types: - - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - ReadOnly: - description: "Whether the mount should be read-only." - type: "boolean" - Consistency: - description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." - type: "string" - BindOptions: - description: "Optional configuration for the `bind` type." - type: "object" - properties: - Propagation: - description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." - enum: - - "private" - - "rprivate" - - "shared" - - "rshared" - - "slave" - - "rslave" - VolumeOptions: - description: "Optional configuration for the `volume` type." - type: "object" - properties: - NoCopy: - description: "Populate volume with data from the target." - type: "boolean" - default: false - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - DriverConfig: - description: "Map of driver specific options" - type: "object" - properties: - Name: - description: "Name of the driver to use to create the volume." - type: "string" - Options: - description: "key/value map of driver specific options." - type: "object" - additionalProperties: - type: "string" - TmpfsOptions: - description: "Optional configuration for the `tmpfs` type." - type: "object" - properties: - SizeBytes: - description: "The size for the tmpfs mount in bytes." - type: "integer" - format: "int64" - Mode: - description: "The permission mode for the tmpfs mount in an integer." - type: "integer" - RestartPolicy: - description: | - The behavior to apply when the container exits. The default is not to restart. - - An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. - type: "object" - properties: - Name: - type: "string" - description: | - - `always` Always restart - - `unless-stopped` Restart always except when the user has manually stopped the container - - `on-failure` Restart only when the container exit code is non-zero - enum: - - "always" - - "unless-stopped" - - "on-failure" - MaximumRetryCount: - type: "integer" - description: "If `on-failure` is used, the number of times to retry before giving up" - default: {} - - Resources: - description: "A container's resources (cgroups config, ulimits, etc)" - type: "object" - properties: - # Applicable to all platforms - CpuShares: - description: "An integer value representing this container's relative CPU weight versus other containers." - type: "integer" - Memory: - description: "Memory limit in bytes." - type: "integer" - default: 0 - # Applicable to UNIX platforms - CgroupParent: - description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." - type: "string" - BlkioWeight: - description: "Block IO weight (relative weight)." - type: "integer" - minimum: 0 - maximum: 1000 - BlkioWeightDevice: - description: | - Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. - type: "array" - items: - type: "object" - properties: - Path: - type: "string" - Weight: - type: "integer" - minimum: 0 - BlkioDeviceReadBps: - description: | - Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteBps: - description: | - Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceReadIOps: - description: | - Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteIOps: - description: | - Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - CpuPeriod: - description: "The length of a CPU period in microseconds." - type: "integer" - format: "int64" - CpuQuota: - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - format: "int64" - CpuRealtimePeriod: - description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." - type: "integer" - format: "int64" - CpuRealtimeRuntime: - description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." - type: "integer" - format: "int64" - CpusetCpus: - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" - type: "string" - CpusetMems: - description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." - type: "string" - Devices: - description: "A list of devices to add to the container." - type: "array" - items: - $ref: "#/definitions/DeviceMapping" - DeviceCgroupRules: - description: "a list of cgroup rules to apply to the container" - type: "array" - items: - type: "string" - example: "c 13:* rwm" - DiskQuota: - description: "Disk limit (in bytes)." - type: "integer" - format: "int64" - KernelMemory: - description: "Kernel memory limit in bytes." - type: "integer" - format: "int64" - MemoryReservation: - description: "Memory soft limit in bytes." - type: "integer" - format: "int64" - MemorySwap: - description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." - type: "integer" - format: "int64" - MemorySwappiness: - description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." - type: "integer" - format: "int64" - minimum: 0 - maximum: 100 - NanoCPUs: - description: "CPU quota in units of 10-9 CPUs." - type: "integer" - format: "int64" - OomKillDisable: - description: "Disable OOM Killer for the container." - type: "boolean" - PidsLimit: - description: "Tune a container's pids limit. Set -1 for unlimited." - type: "integer" - format: "int64" - Ulimits: - description: | - A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - # Applicable to Windows - CpuCount: - description: | - The number of usable CPUs (Windows only). - - On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - CpuPercent: - description: | - The usable percentage of the available CPUs (Windows only). - - On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - IOMaximumIOps: - description: "Maximum IOps for the container system drive (Windows only)" - type: "integer" - format: "int64" - IOMaximumBandwidth: - description: "Maximum IO in bytes per second for the container system drive (Windows only)" - type: "integer" - format: "int64" - - HealthConfig: - description: "A test to perform to check that the container is healthy." - type: "object" - properties: - Test: - description: | - The test to perform. Possible values are: - - - `[]` inherit healthcheck from image or parent image - - `["NONE"]` disable healthcheck - - `["CMD", args...]` exec arguments directly - - `["CMD-SHELL", command]` run command with system's default shell - type: "array" - items: - type: "string" - Interval: - description: "The time to wait between checks in nanoseconds. It should be 0 or not less than 1000000000(1s). 0 means inherit." - type: "integer" - Timeout: - description: "The time to wait before considering the check to have hung. It should be 0 or not less than 1000000000(1s). 0 means inherit." - type: "integer" - Retries: - description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." - type: "integer" - StartPeriod: - description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. 0 means inherit." - type: "integer" - - HostConfig: - description: "Container configuration that depends on the host we are running on" - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - # Applicable to all platforms - Binds: - type: "array" - description: | - A list of volume bindings for this container. Each volume binding is a string in one of these forms: - - - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. - - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. - items: - type: "string" - ContainerIDFile: - type: "string" - description: "Path to a file where the container ID is written" - LogConfig: - type: "object" - description: "The logging configuration for this container" - properties: - Type: - type: "string" - enum: - - "json-file" - - "syslog" - - "journald" - - "gelf" - - "fluentd" - - "awslogs" - - "splunk" - - "etwlogs" - - "none" - Config: - type: "object" - additionalProperties: - type: "string" - NetworkMode: - type: "string" - description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken - as a custom network's name to which this container should connect to." - PortBindings: - type: "object" - description: "A map of exposed container ports and the host port they should map to." - additionalProperties: - type: "object" - properties: - HostIp: - type: "string" - description: "The host IP address" - HostPort: - type: "string" - description: "The host port number, as a string" - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - AutoRemove: - type: "boolean" - description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." - VolumeDriver: - type: "string" - description: "Driver that this container uses to mount volumes." - VolumesFrom: - type: "array" - description: "A list of volumes to inherit from another container, specified in the form `[:]`." - items: - type: "string" - Mounts: - description: "Specification for mounts to be added to the container." - type: "array" - items: - $ref: "#/definitions/Mount" - - # Applicable to UNIX platforms - CapAdd: - type: "array" - description: "A list of kernel capabilities to add to the container." - items: - type: "string" - CapDrop: - type: "array" - description: "A list of kernel capabilities to drop from the container." - items: - type: "string" - Dns: - type: "array" - description: "A list of DNS servers for the container to use." - items: - type: "string" - DnsOptions: - type: "array" - description: "A list of DNS options." - items: - type: "string" - DnsSearch: - type: "array" - description: "A list of DNS search domains." - items: - type: "string" - ExtraHosts: - type: "array" - description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - items: - type: "string" - GroupAdd: - type: "array" - description: "A list of additional groups that the container process will run as." - items: - type: "string" - IpcMode: - type: "string" - description: "IPC namespace to use for the container." - Cgroup: - type: "string" - description: "Cgroup to use for the container." - Links: - type: "array" - description: "A list of links for the container in the form `container_name:alias`." - items: - type: "string" - OomScoreAdj: - type: "integer" - description: "An integer value containing the score given to the container in order to tune OOM killer preferences." - PidMode: - type: "string" - description: | - Set the PID (Process) Namespace mode for the container. It can be either: - - - `"container:"`: joins another container's PID namespace - - `"host"`: use the host's PID namespace inside the container - Privileged: - type: "boolean" - description: "Gives the container full access to the host." - PublishAllPorts: - type: "boolean" - description: "Allocates a random host port for all of a container's exposed ports." - ReadonlyRootfs: - type: "boolean" - description: "Mount the container's root filesystem as read only." - SecurityOpt: - type: "array" - description: "A list of string values to customize labels for MLS - systems, such as SELinux." - items: - type: "string" - StorageOpt: - type: "object" - description: | - Storage driver options for this container, in the form `{"size": "120G"}`. - additionalProperties: - type: "string" - Tmpfs: - type: "object" - description: | - A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. - additionalProperties: - type: "string" - UTSMode: - type: "string" - description: "UTS namespace to use for the container." - UsernsMode: - type: "string" - description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." - ShmSize: - type: "integer" - description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." - minimum: 0 - Sysctls: - type: "object" - description: | - A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` - additionalProperties: - type: "string" - Runtime: - type: "string" - description: "Runtime to use with this container." - # Applicable to Windows - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array. (Windows only)" - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - Isolation: - type: "string" - description: "Isolation technology of the container. (Windows only)" - enum: - - "default" - - "process" - - "hyperv" - - Config: - description: "Configuration for a container that is portable between hosts" - type: "object" - properties: - Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." - type: "string" - Domainname: - description: "The domain name to use for the container." - type: "string" - User: - description: "The user that commands are run as inside the container." - type: "string" - AttachStdin: - description: "Whether to attach to `stdin`." - type: "boolean" - default: false - AttachStdout: - description: "Whether to attach to `stdout`." - type: "boolean" - default: true - AttachStderr: - description: "Whether to attach to `stderr`." - type: "boolean" - default: true - ExposedPorts: - description: | - An object mapping ports to an empty object in the form: - - `{"/": {}}` - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - Tty: - description: "Attach standard streams to a TTY, including `stdin` if it is not closed." - type: "boolean" - default: false - OpenStdin: - description: "Open `stdin`" - type: "boolean" - default: false - StdinOnce: - description: "Close `stdin` after one attached client disconnects" - type: "boolean" - default: false - Env: - description: | - A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. - type: "array" - items: - type: "string" - Cmd: - description: "Command to run specified as a string or an array of strings." - type: - - "array" - - "string" - items: - type: "string" - Healthcheck: - $ref: "#/definitions/HealthConfig" - ArgsEscaped: - description: "Command is already escaped (Windows only)" - type: "boolean" - Image: - description: "The name of the image to use when creating the container" - type: "string" - Volumes: - description: "An object mapping mount point paths inside the container to empty objects." - type: "object" - properties: - additionalProperties: - type: "object" - enum: - - {} - default: {} - WorkingDir: - description: "The working directory for commands to run in." - type: "string" - Entrypoint: - description: | - The entry point for the container as a string or an array of strings. - - If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: - - "array" - - "string" - items: - type: "string" - NetworkDisabled: - description: "Disable networking for the container." - type: "boolean" - MacAddress: - description: "MAC address of the container." - type: "string" - OnBuild: - description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." - type: "array" - items: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - StopSignal: - description: "Signal to stop a container as a string or unsigned integer." - type: "string" - default: "SIGTERM" - StopTimeout: - description: "Timeout to stop a container in seconds." - type: "integer" - default: 10 - Shell: - description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." - type: "array" - items: - type: "string" - - NetworkConfig: - description: "TODO: check is correct" - type: "object" - properties: - Bridge: - type: "string" - Gateway: - type: "string" - Address: - type: "string" - IPPrefixLen: - type: "integer" - MacAddress: - type: "string" - PortMapping: - type: "string" - Ports: - type: "array" - items: - $ref: "#/definitions/Port" - - GraphDriverData: - description: "Information about a container's graph driver." - type: "object" - required: [Name, Data] - properties: - Name: - type: "string" - x-nullable: false - Data: - type: "object" - x-nullable: false - additionalProperties: - type: "string" - - Image: - type: "object" - required: - - Id - - Parent - - Comment - - Created - - Container - - DockerVersion - - Author - - Architecture - - Os - - Size - - VirtualSize - - GraphDriver - - RootFS - properties: - Id: - type: "string" - x-nullable: false - RepoTags: - type: "array" - items: - type: "string" - RepoDigests: - type: "array" - items: - type: "string" - Parent: - type: "string" - x-nullable: false - Comment: - type: "string" - x-nullable: false - Created: - type: "string" - x-nullable: false - Container: - type: "string" - x-nullable: false - ContainerConfig: - $ref: "#/definitions/Config" - DockerVersion: - type: "string" - x-nullable: false - Author: - type: "string" - x-nullable: false - Config: - $ref: "#/definitions/Config" - Architecture: - type: "string" - x-nullable: false - Os: - type: "string" - x-nullable: false - OsVersion: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - VirtualSize: - type: "integer" - format: "int64" - x-nullable: false - GraphDriver: - $ref: "#/definitions/GraphDriverData" - RootFS: - type: "object" - required: [Type] - properties: - Type: - type: "string" - x-nullable: false - Layers: - type: "array" - items: - type: "string" - BaseLayer: - type: "string" - - ImageSummary: - type: "object" - required: - - Id - - ParentId - - RepoTags - - RepoDigests - - Created - - Size - - SharedSize - - VirtualSize - - Labels - - Containers - properties: - Id: - type: "string" - x-nullable: false - ParentId: - type: "string" - x-nullable: false - RepoTags: - type: "array" - x-nullable: false - items: - type: "string" - RepoDigests: - type: "array" - x-nullable: false - items: - type: "string" - Created: - type: "integer" - x-nullable: false - Size: - type: "integer" - x-nullable: false - SharedSize: - type: "integer" - x-nullable: false - VirtualSize: - type: "integer" - x-nullable: false - Labels: - type: "object" - x-nullable: false - additionalProperties: - type: "string" - Containers: - x-nullable: false - type: "integer" - - AuthConfig: - type: "object" - properties: - username: - type: "string" - password: - type: "string" - email: - type: "string" - serveraddress: - type: "string" - example: - username: "hannibal" - password: "xxxx" - serveraddress: "https://index.docker.io/v1/" - - ProcessConfig: - type: "object" - properties: - privileged: - type: "boolean" - user: - type: "string" - tty: - type: "boolean" - entrypoint: - type: "string" - arguments: - type: "array" - items: - type: "string" - - Volume: - type: "object" - required: [Name, Driver, Mountpoint, Labels, Scope, Options] - properties: - Name: - type: "string" - description: "Name of the volume." - x-nullable: false - Driver: - type: "string" - description: "Name of the volume driver used by the volume." - x-nullable: false - Mountpoint: - type: "string" - description: "Mount path of the volume on the host." - x-nullable: false - Status: - type: "object" - description: | - Low-level details about the volume, provided by the volume driver. - Details are returned as a map with key/value pairs: - `{"key":"value","key2":"value2"}`. - - The `Status` field is optional, and is omitted if the volume driver - does not support this feature. - additionalProperties: - type: "object" - Labels: - type: "object" - description: "User-defined key/value metadata." - x-nullable: false - additionalProperties: - type: "string" - Scope: - type: "string" - description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." - default: "local" - x-nullable: false - enum: ["local", "global"] - Options: - type: "object" - description: "The driver specific options used when creating the volume." - additionalProperties: - type: "string" - UsageData: - type: "object" - required: [Size, RefCount] - properties: - Size: - type: "integer" - description: "The disk space used by the volume (local driver only)" - default: -1 - x-nullable: false - RefCount: - type: "integer" - default: -1 - description: "The number of containers referencing this volume." - x-nullable: false - - example: - Name: "tardis" - Driver: "custom" - Mountpoint: "/var/lib/docker/volumes/tardis" - Status: - hello: "world" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - - Network: - type: "object" - properties: - Name: - type: "string" - Id: - type: "string" - Created: - type: "string" - format: "dateTime" - Scope: - type: "string" - Driver: - type: "string" - EnableIPv6: - type: "boolean" - IPAM: - $ref: "#/definitions/IPAM" - Internal: - type: "boolean" - Attachable: - type: "boolean" - Ingress: - type: "boolean" - Containers: - type: "object" - additionalProperties: - $ref: "#/definitions/NetworkContainer" - Options: - type: "object" - additionalProperties: - type: "string" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - Name: "net01" - Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" - Created: "2016-10-19T04:33:30.360899459Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - IPAM: - Driver: "default" - Config: - - Subnet: "172.19.0.0/16" - Gateway: "172.19.0.1" - Options: - foo: "bar" - Internal: false - Attachable: false - Ingress: false - Containers: - 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: - Name: "test" - EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: "02:42:ac:13:00:02" - IPv4Address: "172.19.0.2/16" - IPv6Address: "" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - IPAM: - type: "object" - properties: - Driver: - description: "Name of the IPAM driver to use." - type: "string" - default: "default" - Config: - description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" - type: "array" - items: - type: "object" - additionalProperties: - type: "string" - Options: - description: "Driver-specific options, specified as a map." - type: "array" - items: - type: "object" - additionalProperties: - type: "string" - NetworkContainer: - type: "object" - properties: - EndpointID: - type: "string" - MacAddress: - type: "string" - IPv4Address: - type: "string" - IPv6Address: - type: "string" - - BuildInfo: - type: "object" - properties: - id: - type: "string" - stream: - type: "string" - error: - type: "string" - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - CreateImageInfo: - type: "object" - properties: - error: - type: "string" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - PushImageInfo: - type: "object" - properties: - error: - type: "string" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - ErrorDetail: - type: "object" - properties: - code: - type: "integer" - message: - type: "string" - ProgressDetail: - type: "object" - properties: - code: - type: "integer" - message: - type: "integer" - - ErrorResponse: - description: "Represents an error." - type: "object" - required: ["message"] - properties: - message: - description: "The error message." - type: "string" - x-nullable: false - example: - message: "Something went wrong." - - IdResponse: - description: "Response to an API call that returns just an Id" - type: "object" - required: ["Id"] - properties: - Id: - description: "The id of the newly created object." - type: "string" - x-nullable: false - - EndpointSettings: - description: "Configuration for a network endpoint." - type: "object" - properties: - IPAMConfig: - description: "IPAM configurations for the endpoint" - type: "object" - properties: - IPv4Address: - type: "string" - IPv6Address: - type: "string" - LinkLocalIPs: - type: "array" - items: - type: "string" - Links: - type: "array" - items: - type: "string" - Aliases: - type: "array" - items: - type: "string" - NetworkID: - type: "string" - EndpointID: - type: "string" - Gateway: - type: "string" - IPAddress: - type: "string" - IPPrefixLen: - type: "integer" - IPv6Gateway: - type: "string" - GlobalIPv6Address: - type: "string" - GlobalIPv6PrefixLen: - type: "integer" - format: "int64" - MacAddress: - type: "string" - - PluginMount: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Source, Destination, Type, Options] - properties: - Name: - type: "string" - x-nullable: false - Description: - type: "string" - x-nullable: false - Settable: - type: "array" - items: - type: "string" - Source: - type: "string" - Destination: - type: "string" - x-nullable: false - Type: - type: "string" - x-nullable: false - Options: - type: "array" - items: - type: "string" - - PluginDevice: - type: "object" - required: [Name, Description, Settable, Path] - x-nullable: false - properties: - Name: - type: "string" - x-nullable: false - Description: - type: "string" - x-nullable: false - Settable: - type: "array" - items: - type: "string" - Path: - type: "string" - - PluginEnv: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - Description: - x-nullable: false - type: "string" - Settable: - type: "array" - items: - type: "string" - Value: - type: "string" - - PluginInterfaceType: - type: "object" - x-nullable: false - required: [Prefix, Capability, Version] - properties: - Prefix: - type: "string" - x-nullable: false - Capability: - type: "string" - x-nullable: false - Version: - type: "string" - x-nullable: false - - Plugin: - description: "A plugin for the Engine API" - type: "object" - required: [Settings, Enabled, Config, Name] - properties: - Id: - type: "string" - Name: - type: "string" - x-nullable: false - Enabled: - description: "True when the plugin is running. False when the plugin is not running, only installed." - type: "boolean" - x-nullable: false - Settings: - description: "Settings that can be modified by users." - type: "object" - x-nullable: false - required: [Args, Devices, Env, Mounts] - properties: - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - type: "string" - Args: - type: "array" - items: - type: "string" - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PluginReference: - description: "plugin remote reference used to push/pull the plugin" - type: "string" - x-nullable: false - Config: - description: "The config of a plugin." - type: "object" - x-nullable: false - required: - - Description - - Documentation - - Interface - - Entrypoint - - WorkDir - - Network - - Linux - - PidHost - - PropagatedMount - - IpcHost - - Mounts - - Env - - Args - properties: - DockerVersion: - description: "Docker Version used to create the plugin" - type: "string" - x-nullable: false - Description: - type: "string" - x-nullable: false - Documentation: - type: "string" - x-nullable: false - Interface: - description: "The interface between Docker and the plugin" - x-nullable: false - type: "object" - required: [Types, Socket] - properties: - Types: - type: "array" - items: - $ref: "#/definitions/PluginInterfaceType" - Socket: - type: "string" - x-nullable: false - Entrypoint: - type: "array" - items: - type: "string" - WorkDir: - type: "string" - x-nullable: false - User: - type: "object" - x-nullable: false - properties: - UID: - type: "integer" - format: "uint32" - GID: - type: "integer" - format: "uint32" - Network: - type: "object" - x-nullable: false - required: [Type] - properties: - Type: - x-nullable: false - type: "string" - Linux: - type: "object" - x-nullable: false - required: [Capabilities, AllowAllDevices, Devices] - properties: - Capabilities: - type: "array" - items: - type: "string" - AllowAllDevices: - type: "boolean" - x-nullable: false - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PropagatedMount: - type: "string" - x-nullable: false - IpcHost: - type: "boolean" - x-nullable: false - PidHost: - type: "boolean" - x-nullable: false - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - $ref: "#/definitions/PluginEnv" - Args: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - Description: - x-nullable: false - type: "string" - Settable: - type: "array" - items: - type: "string" - Value: - type: "array" - items: - type: "string" - rootfs: - type: "object" - properties: - type: - type: "string" - diff_ids: - type: "array" - items: - type: "string" - example: - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" - Name: "tiborvass/sample-volume-plugin" - Tag: "latest" - Active: true - Settings: - Env: - - "DEBUG=0" - Args: null - Devices: null - Config: - Description: "A sample volume plugin for Docker" - Documentation: "https://docs.docker.com/engine/extend/plugins/" - Interface: - Types: - - "docker.volumedriver/1.0" - Socket: "plugins.sock" - Entrypoint: - - "/usr/bin/sample-volume-plugin" - - "/data" - WorkDir: "" - User: {} - Network: - Type: "" - Linux: - Capabilities: null - AllowAllDevices: false - Devices: null - Mounts: null - PropagatedMount: "/data" - Env: - - Name: "DEBUG" - Description: "If set, prints debug messages" - Settable: null - Value: "0" - Args: - Name: "args" - Description: "command line arguments" - Settable: null - Value: [] - - ObjectVersion: - description: | - The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. - The client must send the version number along with the modified specification when updating these objects. - This approach ensures safe concurrency and determinism in that the change on the object - may not be applied if the version number has changed from the last read. In other words, - if two update requests specify the same base version, only one of the requests can succeed. - As a result, two separate update requests that happen at the same time will not - unintentially overwrite each other. - type: "object" - properties: - Index: - type: "integer" - format: "int64" - - NodeSpec: - type: "object" - properties: - Name: - description: "Name for the node." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Role: - description: "Role of the node." - type: "string" - enum: - - "worker" - - "manager" - Availability: - description: "Availability of the node." - type: "string" - enum: - - "active" - - "pause" - - "drain" - example: - Availability: "active" - Name: "node-name" - Role: "manager" - Labels: - foo: "bar" - Node: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/NodeSpec" - Description: - type: "object" - properties: - Hostname: - type: "string" - Platform: - type: "object" - properties: - Architecture: - type: "string" - OS: - type: "string" - Resources: - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - MemoryBytes: - type: "integer" - format: "int64" - Engine: - type: "object" - properties: - EngineVersion: - type: "string" - Labels: - type: "object" - additionalProperties: - type: "string" - Plugins: - type: "array" - items: - type: "object" - properties: - Type: - type: "string" - Name: - type: "string" - example: - ID: "24ifsmvkjbyhk" - Version: - Index: 8 - CreatedAt: "2016-06-07T20:31:11.853781916Z" - UpdatedAt: "2016-06-07T20:31:11.999868824Z" - Spec: - Name: "my-node" - Role: "manager" - Availability: "active" - Labels: - foo: "bar" - Description: - Hostname: "bf3067039e47" - Platform: - Architecture: "x86_64" - OS: "linux" - Resources: - NanoCPUs: 4000000000 - MemoryBytes: 8272408576 - Engine: - EngineVersion: "17.04.0" - Labels: - foo: "bar" - Plugins: - - Type: "Volume" - Name: "local" - - Type: "Network" - Name: "bridge" - - Type: "Network" - Name: "null" - - Type: "Network" - Name: "overlay" - Status: - State: "ready" - Addr: "172.17.0.2" - ManagerStatus: - Leader: true - Reachability: "reachable" - Addr: "172.17.0.2:2377" - SwarmSpec: - description: "User modifiable swarm configuration." - type: "object" - properties: - Name: - description: "Name of the swarm." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Orchestration: - description: "Orchestration configuration." - type: "object" - properties: - TaskHistoryRetentionLimit: - description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." - type: "integer" - format: "int64" - Raft: - description: "Raft configuration." - type: "object" - properties: - SnapshotInterval: - description: "The number of log entries between snapshots." - type: "integer" - format: "int64" - KeepOldSnapshots: - description: "The number of snapshots to keep beyond the current snapshot." - type: "integer" - format: "int64" - LogEntriesForSlowFollowers: - description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." - type: "integer" - format: "int64" - ElectionTick: - description: | - The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. - - A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. - type: "integer" - HeartbeatTick: - description: | - The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. - - A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. - type: "integer" - Dispatcher: - description: "Dispatcher configuration." - type: "object" - properties: - HeartbeatPeriod: - description: "The delay for an agent to send a heartbeat to the dispatcher." - type: "integer" - format: "int64" - CAConfig: - description: "CA configuration." - type: "object" - properties: - NodeCertExpiry: - description: "The duration node certificates are issued for." - type: "integer" - format: "int64" - ExternalCAs: - description: "Configuration for forwarding signing requests to an external certificate authority." - type: "array" - items: - type: "object" - properties: - Protocol: - description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." - type: "string" - enum: - - "cfssl" - default: "cfssl" - URL: - description: "URL where certificate signing requests should be sent." - type: "string" - Options: - description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." - type: "object" - additionalProperties: - type: "string" - EncryptionConfig: - description: "Parameters related to encryption-at-rest." - type: "object" - properties: - AutoLockManagers: - description: "If set, generate a key and use it to lock data stored on the managers." - type: "boolean" - TaskDefaults: - description: "Defaults for creating tasks in this cluster." - type: "object" - properties: - LogDriver: - description: | - The log driver to use for tasks created in the orchestrator if unspecified by a service. - - Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. - type: "object" - properties: - Name: - type: "string" - Options: - type: "object" - additionalProperties: - type: "string" - example: - Name: "default" - Orchestration: - TaskHistoryRetentionLimit: 10 - Raft: - SnapshotInterval: 10000 - LogEntriesForSlowFollowers: 500 - HeartbeatTick: 1 - ElectionTick: 3 - Dispatcher: - HeartbeatPeriod: 5000000000 - CAConfig: - NodeCertExpiry: 7776000000000000 - JoinTokens: - Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" - Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - EncryptionConfig: - AutoLockManagers: false - # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but - # without `JoinTokens`. - ClusterInfo: - type: "object" - properties: - ID: - description: "The ID of the swarm." - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/SwarmSpec" - TaskSpec: - description: "User modifiable task configuration." - type: "object" - properties: - ContainerSpec: - type: "object" - properties: - Image: - description: "The image name to use for the container." - type: "string" - Labels: - description: "User-defined key/value data." - type: "object" - additionalProperties: - type: "string" - Command: - description: "The command to be run in the image." - type: "array" - items: - type: "string" - Args: - description: "Arguments to the command." - type: "array" - items: - type: "string" - Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." - type: "string" - Env: - description: "A list of environment variables in the form `VAR=value`." - type: "array" - items: - type: "string" - Dir: - description: "The working directory for commands to run in." - type: "string" - User: - description: "The user inside the container." - type: "string" - Groups: - type: "array" - description: "A list of additional groups that the container process will run as." - items: - type: "string" - TTY: - description: "Whether a pseudo-TTY should be allocated." - type: "boolean" - OpenStdin: - description: "Open `stdin`" - type: "boolean" - ReadOnly: - description: "Mount the container's root filesystem as read only." - type: "boolean" - Mounts: - description: "Specification for mounts to be added to containers created as part of the service." - type: "array" - items: - $ref: "#/definitions/Mount" - StopSignal: - description: "Signal to stop the container." - type: "string" - StopGracePeriod: - description: "Amount of time to wait for the container to terminate before forcefully killing it." - type: "integer" - format: "int64" - HealthCheck: - $ref: "#/definitions/HealthConfig" - Hosts: - type: "array" - description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. - The format of extra hosts on swarmkit is specified in: - http://man7.org/linux/man-pages/man5/hosts.5.html - IP_address canonical_hostname [aliases...] - items: - type: "string" - DNSConfig: - description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." - type: "object" - properties: - Nameservers: - description: "The IP addresses of the name servers." - type: "array" - items: - type: "string" - Search: - description: "A search list for host-name lookup." - type: "array" - items: - type: "string" - Options: - description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." - type: "array" - items: - type: "string" - Secrets: - description: "Secrets contains references to zero or more secrets that will be exposed to the service." - type: "array" - items: - type: "object" - properties: - File: - description: "File represents a specific target that is backed by a file." - type: "object" - properties: - Name: - description: "Name represents the final filename in the filesystem." - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - SecretID: - description: "SecretID represents the ID of the specific secret that we're referencing." - type: "string" - SecretName: - description: | - SecretName is the name of the secret that this references, but this is just provided for - lookup/display purposes. The secret in the reference will be identified by its ID. - type: "string" - - Resources: - description: "Resource requirements which apply to each individual container created as part of the service." - type: "object" - properties: - Limits: - description: "Define resources limits." - type: "object" - properties: - NanoCPUs: - description: "CPU limit in units of 10-9 CPU shares." - type: "integer" - format: "int64" - MemoryBytes: - description: "Memory limit in Bytes." - type: "integer" - format: "int64" - Reservation: - description: "Define resources reservation." - properties: - NanoCPUs: - description: "CPU reservation in units of 10-9 CPU shares." - type: "integer" - format: "int64" - MemoryBytes: - description: "Memory reservation in Bytes." - type: "integer" - format: "int64" - RestartPolicy: - description: "Specification for the restart policy which applies to containers created as part of this service." - type: "object" - properties: - Condition: - description: "Condition for restart." - type: "string" - enum: - - "none" - - "on-failure" - - "any" - Delay: - description: "Delay between restart attempts." - type: "integer" - format: "int64" - MaxAttempts: - description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." - type: "integer" - format: "int64" - default: 0 - Window: - description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." - type: "integer" - format: "int64" - default: 0 - Placement: - type: "object" - properties: - Constraints: - description: "An array of constraints." - type: "array" - items: - type: "string" - Preferences: - description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." - type: "array" - items: - type: "object" - properties: - Spread: - type: "object" - properties: - SpreadDescriptor: - description: "label descriptor, such as engine.labels.az" - type: "string" - ForceUpdate: - description: "A counter that triggers an update even if no relevant parameters have been changed." - type: "integer" - Networks: - type: "array" - items: - type: "object" - properties: - Target: - type: "string" - Aliases: - type: "array" - items: - type: "string" - LogDriver: - description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." - type: "object" - properties: - Name: - type: "string" - Options: - type: "object" - additionalProperties: - type: "string" - TaskState: - type: "string" - enum: - - "new" - - "allocated" - - "pending" - - "assigned" - - "accepted" - - "preparing" - - "ready" - - "starting" - - "running" - - "complete" - - "shutdown" - - "failed" - - "rejected" - Task: - type: "object" - properties: - ID: - description: "The ID of the task." - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Name: - description: "Name of the task." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Spec: - $ref: "#/definitions/TaskSpec" - ServiceID: - description: "The ID of the service this task is part of." - type: "string" - Slot: - type: "integer" - NodeID: - description: "The ID of the node that this task is on." - type: "string" - Status: - type: "object" - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - type: "object" - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" - DesiredState: - $ref: "#/definitions/TaskState" - example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - ServiceSpec: - description: "User modifiable configuration for a service." - properties: - Name: - description: "Name of the service." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - TaskTemplate: - $ref: "#/definitions/TaskSpec" - Mode: - description: "Scheduling mode for the service." - type: "object" - properties: - Replicated: - type: "object" - properties: - Replicas: - type: "integer" - format: "int64" - Global: - type: "object" - UpdateConfig: - description: "Specification for the update strategy of the service." - type: "object" - properties: - Parallelism: - description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." - type: "integer" - format: "int64" - Delay: - description: "Amount of time between updates, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: "Action to take if an updated task fails to run, or stops running during the update." - type: "string" - enum: - - "continue" - - "pause" - - "rollback" - Monitor: - description: "Amount of time to monitor each updated task for failures, in nanoseconds." - type: "integer" - format: "int64" - MaxFailureRatio: - description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." - type: "number" - default: 0 - Order: - description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." - type: "string" - enum: - - "stop-first" - - "start-first" - RollbackConfig: - description: "Specification for the rollback strategy of the service." - type: "object" - properties: - Parallelism: - description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." - type: "integer" - format: "int64" - Delay: - description: "Amount of time between rollback iterations, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: "Action to take if an rolled back task fails to run, or stops running during the rollback." - type: "string" - enum: - - "continue" - - "pause" - Monitor: - description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." - type: "integer" - format: "int64" - MaxFailureRatio: - description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." - type: "number" - default: 0 - Order: - description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." - type: "string" - enum: - - "stop-first" - - "start-first" - Networks: - description: "Array of network names or IDs to attach the service to." - type: "array" - items: - type: "object" - properties: - Target: - type: "string" - Aliases: - type: "array" - items: - type: "string" - EndpointSpec: - $ref: "#/definitions/EndpointSpec" - EndpointPortConfig: - type: "object" - properties: - Name: - type: "string" - Protocol: - type: "string" - enum: - - "tcp" - - "udp" - TargetPort: - description: "The port inside the container." - type: "integer" - PublishedPort: - description: "The port on the swarm hosts." - type: "integer" - EndpointSpec: - description: "Properties that can be configured to access and load balance a service." - type: "object" - properties: - Mode: - description: "The mode of resolution to use for internal load balancing - between tasks." - type: "string" - enum: - - "vip" - - "dnsrr" - default: "vip" - Ports: - description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - Service: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ServiceSpec" - Endpoint: - type: "object" - properties: - Spec: - $ref: "#/definitions/EndpointSpec" - Ports: - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - VirtualIPs: - type: "array" - items: - type: "object" - properties: - NetworkID: - type: "string" - Addr: - type: "string" - UpdateStatus: - description: "The status of a service update." - type: "object" - properties: - State: - type: "string" - enum: - - "updating" - - "paused" - - "completed" - StartedAt: - type: "string" - format: "dateTime" - CompletedAt: - type: "string" - format: "dateTime" - Message: - type: "string" - example: - ID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Version: - Index: 19 - CreatedAt: "2016-06-07T21:05:51.880065305Z" - UpdatedAt: "2016-06-07T21:07:29.962229872Z" - Spec: - Name: "hopeful_cori" - TaskTemplate: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Endpoint: - Spec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - VirtualIPs: - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.2/16" - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.3/16" - ImageDeleteResponseItem: - type: "object" - properties: - Untagged: - description: "The image ID of an image that was untagged" - type: "string" - Deleted: - description: "The image ID of an image that was deleted" - type: "string" - ServiceUpdateResponse: - type: "object" - properties: - Warnings: - description: "Optional warning messages" - type: "array" - items: - type: "string" - example: - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - ContainerSummary: - type: "array" - items: - type: "object" - properties: - Id: - description: "The ID of this container" - type: "string" - x-go-name: "ID" - Names: - description: "The names that this container has been given" - type: "array" - items: - type: "string" - Image: - description: "The name of the image used when creating this container" - type: "string" - ImageID: - description: "The ID of the image that this container was created from" - type: "string" - Command: - description: "Command to run when starting the container" - type: "string" - Created: - description: "When the container was created" - type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: "The size of files that have been created or changed by this container" - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" - format: "int64" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - State: - description: "The state of this container (e.g. `Exited`)" - type: "string" - Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" - type: "string" - HostConfig: - type: "object" - properties: - NetworkMode: - type: "string" - NetworkSettings: - description: "A summary of the container's network settings" - type: "object" - properties: - Networks: - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - items: - $ref: "#/definitions/Mount" - SecretSpec: - type: "object" - properties: - Name: - description: "User-defined name of the secret." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Data: - description: "Base64-url-safe-encoded secret data" - type: "array" - items: - type: "string" - Secret: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ServiceSpec" -paths: - /containers/json: - get: - summary: "List containers" - operationId: "ContainerList" - produces: - - "application/json" - parameters: - - name: "all" - in: "query" - description: "Return all containers. By default, only running containers are shown" - type: "boolean" - default: false - - name: "limit" - in: "query" - description: "Return this number of most recently created containers, including non-running ones." - type: "integer" - - name: "size" - in: "query" - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - - - `ancestor`=(`[:]`, ``, or ``) - - `before`=(`` or ``) - - `expose`=(`[/]`|`/[]`) - - `exited=` containers with exit code of `` - - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - - `id=` a container's ID - - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - - `is-task=`(`true`|`false`) - - `label=key` or `label="key=value"` of a container label - - `name=` a container's name - - `network`=(`` or ``) - - `publish`=(`[/]`|`/[]`) - - `since`=(`` or ``) - - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - - `volume`=(`` or ``) - type: "string" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ContainerSummary" - examples: - application/json: - - Id: "8dfafdbc3a40" - Names: - - "/boring_feynman" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 1" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: - - PrivatePort: 2222 - PublicPort: 3333 - Type: "tcp" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:02" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - - Id: "9cd87474be90" - Names: - - "/coolName" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 222222" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.8" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:08" - Mounts: [] - - Id: "3176a2479c92" - Names: - - "/sleepy_dog" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 3333333333333333" - Created: 1367854154 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.6" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:06" - Mounts: [] - - Id: "4cb07b47f9fb" - Names: - - "/running_cat" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 444444444444444444444444444444444" - Created: 1367854152 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.5" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:05" - Mounts: [] - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/create: - post: - summary: "Create a container" - operationId: "ContainerCreate" - consumes: - - "application/json" - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "name" - in: "query" - description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." - type: "string" - pattern: "/?[a-zA-Z0-9_-]+" - - name: "body" - in: "body" - description: "Container to create" - schema: - allOf: - - $ref: "#/definitions/Config" - - type: "object" - properties: - HostConfig: - $ref: "#/definitions/HostConfig" - NetworkingConfig: - description: "This container's networking configuration." - type: "object" - properties: - EndpointsConfig: - description: "A mapping of network name to endpoint configuration for that network." - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - example: - Hostname: "" - Domainname: "" - User: "" - AttachStdin: false - AttachStdout: true - AttachStderr: true - Tty: false - OpenStdin: false - StdinOnce: false - Env: - - "FOO=bar" - - "BAZ=quux" - Cmd: - - "date" - Entrypoint: "" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - Volumes: - /volumes/data: {} - WorkingDir: "" - NetworkDisabled: false - MacAddress: "12:34:56:78:9a:bc" - ExposedPorts: - 22/tcp: {} - StopSignal: "SIGTERM" - StopTimeout: 10 - HostConfig: - Binds: - - "/tmp:/tmp" - Links: - - "redis3:redis" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - KernelMemory: 0 - NanoCPUs: 500000 - CpuPercent: 80 - CpuShares: 512 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpuQuota: 50000 - CpusetCpus: "0,1" - CpusetMems: "0,1" - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 300 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceWriteIOps: - - {} - MemorySwappiness: 60 - OomKillDisable: false - OomScoreAdj: 500 - PidMode: "" - PidsLimit: -1 - PortBindings: - 22/tcp: - - HostPort: "11022" - PublishAllPorts: false - Privileged: false - ReadonlyRootfs: false - Dns: - - "8.8.8.8" - DnsOptions: - - "" - DnsSearch: - - "" - VolumesFrom: - - "parent" - - "other:ro" - CapAdd: - - "NET_ADMIN" - CapDrop: - - "MKNOD" - GroupAdd: - - "newgroup" - RestartPolicy: - Name: "" - MaximumRetryCount: 0 - AutoRemove: true - NetworkMode: "bridge" - Devices: [] - Ulimits: - - {} - LogConfig: - Type: "json-file" - Config: {} - SecurityOpt: [] - StorageOpt: {} - CgroupParent: "" - VolumeDriver: "" - ShmSize: 67108864 - NetworkingConfig: - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - - required: true - responses: - 201: - description: "Container created successfully" - schema: - type: "object" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - examples: - application/json: - Id: "e90e34656806" - Warnings: [] - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 406: - description: "impossible to attach" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/{id}/json: - get: - summary: "Inspect a container" - description: "Return low-level information about a container." - operationId: "ContainerInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - properties: - Id: - description: "The ID of the container" - type: "string" - Created: - description: "The time the container was created" - type: "string" - Path: - description: "The path to the command being run" - type: "string" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - State: - description: "The state of the container." - type: "object" - properties: - Status: - description: "The status of the container. For example, `running` or `exited`." - type: "string" - Running: - description: "Whether this container is running." - type: "boolean" - Paused: - description: "Whether this container is paused." - type: "boolean" - Restarting: - description: "Whether this container is restarting." - type: "boolean" - OOMKilled: - description: "Whether this container has been killed because it ran out of memory." - type: "boolean" - Dead: - type: "boolean" - Pid: - description: "The process ID of this container" - type: "integer" - ExitCode: - description: "The last exit code of this container" - type: "integer" - Error: - type: "string" - StartedAt: - description: "The time when this container was last started." - type: "string" - FinishedAt: - description: "The time when this container last exited." - type: "string" - Image: - description: "The container's image" - type: "string" - ResolvConfPath: - type: "string" - HostnamePath: - type: "string" - HostsPath: - type: "string" - LogPath: - type: "string" - Node: - description: "TODO" - type: "object" - Name: - type: "string" - RestartCount: - type: "integer" - Driver: - type: "string" - MountLabel: - type: "string" - ProcessLabel: - type: "string" - AppArmorProfile: - type: "string" - ExecIDs: - type: "string" - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/GraphDriverData" - SizeRw: - description: "The size of files that have been created or changed by this container." - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container." - type: "integer" - format: "int64" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/Config" - NetworkSettings: - $ref: "#/definitions/NetworkConfig" - examples: - application/json: - AppArmorProfile: "" - Args: - - "-c" - - "exit 9" - Config: - AttachStderr: true - AttachStdin: false - AttachStdout: true - Cmd: - - "/bin/sh" - - "-c" - - "exit 9" - Domainname: "" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Hostname: "ba033ac44011" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - MacAddress: "" - NetworkDisabled: false - OpenStdin: false - StdinOnce: false - Tty: false - User: "" - Volumes: - /volumes/data: {} - WorkingDir: "" - StopSignal: "SIGTERM" - StopTimeout: 10 - Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" - HostConfig: - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 0 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteIOps: - - {} - ContainerIDFile: "" - CpusetCpus: "" - CpusetMems: "" - CpuPercent: 80 - CpuShares: 0 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - Devices: [] - IpcMode: "" - LxcConf: [] - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - KernelMemory: 0 - OomKillDisable: false - OomScoreAdj: 500 - NetworkMode: "bridge" - PidMode: "" - PortBindings: {} - Privileged: false - ReadonlyRootfs: false - PublishAllPorts: false - RestartPolicy: - MaximumRetryCount: 2 - Name: "on-failure" - LogConfig: - Type: "json-file" - Sysctls: - net.ipv4.ip_forward: "1" - Ulimits: - - {} - VolumeDriver: "" - ShmSize: 67108864 - HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" - HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" - LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" - Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" - Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" - MountLabel: "" - Name: "/boring_euclid" - NetworkSettings: - Bridge: "" - SandboxID: "" - HairpinMode: false - LinkLocalIPv6Address: "" - LinkLocalIPv6PrefixLen: 0 - SandboxKey: "" - SecondaryIPAddresses: null - SecondaryIPv6Addresses: null - EndpointID: "" - Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - IPAddress: "" - IPPrefixLen: 0 - IPv6Gateway: "" - MacAddress: "" - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Path: "/bin/sh" - ProcessLabel: "" - ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" - RestartCount: 1 - State: - Error: "" - ExitCode: 9 - FinishedAt: "2015-01-06T15:47:32.080254511Z" - OOMKilled: false - Dead: false - Paused: false - Pid: 0 - Restarting: false - Running: true - StartedAt: "2015-01-06T15:47:32.072697474Z" - Status: "running" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "size" - in: "query" - type: "boolean" - default: false - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" - tags: ["Container"] - /containers/{id}/top: - get: - summary: "List processes running inside a container" - description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." - operationId: "ContainerTop" - responses: - 200: - description: "no error" - schema: - type: "object" - properties: - Titles: - description: "The ps column titles" - type: "array" - items: - type: "string" - Processes: - description: "Each process running in the container, where each is process is an array of values corresponding to the titles" - type: "array" - items: - type: "array" - items: - type: "string" - examples: - application/json: - Titles: - - "UID" - - "PID" - - "PPID" - - "C" - - "STIME" - - "TTY" - - "TIME" - - "CMD" - Processes: - - - - "root" - - "13642" - - "882" - - "0" - - "17:03" - - "pts/0" - - "00:00:00" - - "/bin/bash" - - - - "root" - - "13735" - - "13642" - - "0" - - "17:06" - - "pts/0" - - "00:00:00" - - "sleep 10" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "ps_args" - in: "query" - description: "The arguments to pass to `ps`. For example, `aux`" - type: "string" - default: "-ef" - tags: ["Container"] - /containers/{id}/logs: - get: - summary: "Get container logs" - description: | - Get `stdout` and `stderr` logs from a container. - - Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. - operationId: "ContainerLogs" - responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" - 200: - description: "logs returned as a string in response body" - schema: - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "follow" - in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." - type: "string" - default: "all" - tags: ["Container"] - /containers/{id}/changes: - get: - summary: "Get changes on a container’s filesystem" - description: | - Returns which files in a container's filesystem have been added, deleted, - or modified. The `Kind` of modification can be one of: - - - `0`: Modified - - `1`: Added - - `2`: Deleted - operationId: "ContainerChanges" - produces: ["application/json"] - responses: - 200: - description: "The list of changes" - schema: - type: "array" - items: - type: "object" - x-go-name: "ContainerChangeResponseItem" - required: [Path, Kind] - properties: - Path: - description: "Path to file that has changed" - type: "string" - x-nullable: false - Kind: - description: "Kind of change" - type: "integer" - format: "uint8" - enum: [0, 1, 2] - x-nullable: false - examples: - application/json: - - Path: "/dev" - Kind: 0 - - Path: "/dev/kmsg" - Kind: 1 - - Path: "/test" - Kind: 1 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/export: - get: - summary: "Export a container" - description: "Export the contents of a container as a tarball." - operationId: "ContainerExport" - produces: - - "application/octet-stream" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/stats: - get: - summary: "Get container stats based on resource usage" - description: | - This endpoint returns a live stream of a container’s resource usage - statistics. - - The `precpu_stats` is the CPU statistic of last read, which is used - for calculating the CPU usage percentage. It is not the same as the - `cpu_stats` field. - - If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is - nil then for compatibility with older daemons the length of the - corresponding `cpu_usage.percpu_usage` array should be used. - operationId: "ContainerStats" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - type: "object" - examples: - application/json: - read: "2015-01-08T22:57:31.547920715Z" - pids_stats: - current: 3 - networks: - eth0: - rx_bytes: 5338 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 36 - tx_bytes: 648 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 8 - eth5: - rx_bytes: 4641 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 26 - tx_bytes: 690 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 9 - memory_stats: - stats: - total_pgmajfault: 0 - cache: 0 - mapped_file: 0 - total_inactive_file: 0 - pgpgout: 414 - rss: 6537216 - total_mapped_file: 0 - writeback: 0 - unevictable: 0 - pgpgin: 477 - total_unevictable: 0 - pgmajfault: 0 - total_rss: 6537216 - total_rss_huge: 6291456 - total_writeback: 0 - total_inactive_anon: 0 - rss_huge: 6291456 - hierarchical_memory_limit: 67108864 - total_pgfault: 964 - total_active_file: 0 - active_anon: 6537216 - total_active_anon: 6537216 - total_pgpgout: 414 - total_cache: 0 - inactive_anon: 0 - active_file: 0 - pgfault: 964 - inactive_file: 0 - total_pgpgin: 477 - max_usage: 6651904 - usage: 6537216 - failcnt: 0 - limit: 67108864 - blkio_stats: {} - cpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24472255 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100215355 - usage_in_kernelmode: 30000000 - system_cpu_usage: 739306590000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - precpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24350896 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100093996 - usage_in_kernelmode: 30000000 - system_cpu_usage: 9492140000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "stream" - in: "query" - description: "Stream the output. If false, the stats will be output once and then it will disconnect." - type: "boolean" - default: true - tags: ["Container"] - /containers/{id}/resize: - post: - summary: "Resize a container TTY" - description: "Resize the TTY for a container. You must restart the container for the resize to take effect." - operationId: "ContainerResize" - consumes: - - "application/octet-stream" - produces: - - "text/plain" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "cannot resize container" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "h" - in: "query" - description: "Height of the tty session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the tty session in characters" - type: "integer" - tags: ["Container"] - /containers/{id}/start: - post: - summary: "Start a container" - operationId: "ContainerStart" - responses: - 204: - description: "no error" - 304: - description: "container already started" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." - type: "string" - tags: ["Container"] - /containers/{id}/stop: - post: - summary: "Stop a container" - operationId: "ContainerStop" - responses: - 204: - description: "no error" - 304: - description: "container already stopped" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/restart: - post: - summary: "Restart a container" - operationId: "ContainerRestart" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/kill: - post: - summary: "Kill a container" - description: "Send a POSIX signal to a container, defaulting to killing to the container." - operationId: "ContainerKill" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" - type: "string" - default: "SIGKILL" - tags: ["Container"] - /containers/{id}/update: - post: - summary: "Update a container" - description: "Change various configuration options of a container without having to recreate it." - operationId: "ContainerUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "The container has been updated." - schema: - type: "object" - properties: - Warnings: - type: "array" - items: - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "update" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - example: - BlkioWeight: 300 - CpuShares: 512 - CpuPeriod: 100000 - CpuQuota: 50000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpusetCpus: "0,1" - CpusetMems: "0" - Memory: 314572800 - MemorySwap: 514288000 - MemoryReservation: 209715200 - KernelMemory: 52428800 - RestartPolicy: - MaximumRetryCount: 4 - Name: "on-failure" - tags: ["Container"] - /containers/{id}/rename: - post: - summary: "Rename a container" - operationId: "ContainerRename" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "name already in use" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "name" - in: "query" - required: true - description: "New name for the container" - type: "string" - tags: ["Container"] - /containers/{id}/pause: - post: - summary: "Pause a container" - description: | - Use the cgroups freezer to suspend all processes in a container. - - Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. - operationId: "ContainerPause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/unpause: - post: - summary: "Unpause a container" - description: "Resume a container which has been paused." - operationId: "ContainerUnpause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/attach: - post: - summary: "Attach to a container" - description: | - Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. - - Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. - - See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. - - ### Hijacking - - This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. - - This is the response from the daemon for an attach request: - - ``` - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - [STREAM] - ``` - - After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. - - To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 - Upgrade: tcp - Connection: Upgrade - ``` - - The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Content-Type: application/vnd.docker.raw-stream - Connection: Upgrade - Upgrade: tcp - - [STREAM] - ``` - - ### Stream format - - When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. - - The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). - - It is encoded on the first eight bytes like this: - - ```go - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - ``` - - `STREAM_TYPE` can be: - - - 0: `stdin` (is written on `stdout`) - - 1: `stdout` - - 2: `stderr` - - `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. - - Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. - - The simplest way to implement this protocol is the following: - - 1. Read 8 bytes. - 2. Choose `stdout` or `stderr` depending on the first byte. - 3. Extract the frame size from the last four bytes. - 4. Read the extracted size and output it on the correct output. - 5. Goto 1. - - ### Stream format when using a TTY - - When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. - - operationId: "ContainerAttach" - produces: - - "application/vnd.docker.raw-stream" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." - type: "string" - - name: "logs" - in: "query" - description: | - Replay previous logs from the container. - - This is useful for attaching to a container that has started and you want to output everything since the container started. - - If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Stream attached streams from the time the request was made onwards" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/attach/ws: - get: - summary: "Attach to a container via a websocket" - operationId: "ContainerAttachWebsocket" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." - type: "string" - - name: "logs" - in: "query" - description: "Return logs" - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Return stream" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/wait: - post: - summary: "Wait for a container" - description: "Block until a container stops, then returns the exit code." - operationId: "ContainerWait" - produces: ["application/json"] - responses: - 200: - description: "The container has exit." - schema: - type: "object" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - x-nullable: false - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}: - delete: - summary: "Remove a container" - operationId: "ContainerDelete" - responses: - 204: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "v" - in: "query" - description: "Remove the volumes associated with the container." - type: "boolean" - default: false - - name: "force" - in: "query" - description: "If the container is running, kill it before removing it." - type: "boolean" - default: false - - name: "link" - in: "query" - description: "Remove the specified link associated with the container." - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/archive: - head: - summary: "Get information about files in a container" - description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." - operationId: "ContainerArchiveInfo" - responses: - 200: - description: "no error" - headers: - X-Docker-Container-Path-Stat: - type: "string" - description: "TODO" - 400: - description: "Bad parameter" - schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." - type: "string" - x-nullable: false - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - get: - summary: "Get an archive of a filesystem resource in a container" - description: "Get a tar archive of a resource in the filesystem of container id." - operationId: "ContainerArchive" - produces: ["application/x-tar"] - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." - type: "string" - x-nullable: false - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - put: - summary: "Extract an archive of files or folders to a directory in a container" - description: "Upload a tar archive to be extracted to a path in the filesystem of container id." - operationId: "PutContainerArchive" - consumes: ["application/x-tar", "application/octet-stream"] - responses: - 200: - description: "The content was extracted successfully" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "Permission denied, the volume or container rootfs is marked as read-only." - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such container or path does not exist inside the container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Path to a directory in the container to extract the archive’s contents into. " - type: "string" - - name: "noOverwriteDirNonDir" - in: "query" - description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." - type: "string" - - name: "inputStream" - in: "body" - required: true - description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - tags: ["Container"] - /containers/prune: - post: - summary: "Delete stopped containers" - produces: - - "application/json" - operationId: "ContainerPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - properties: - ContainersDeleted: - description: "Container IDs that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /images/json: - get: - summary: "List Images" - description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." - operationId: "ImageList" - produces: - - "application/json" - responses: - 200: - description: "Summary image data for the images matching the query" - schema: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - examples: - application/json: - - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - ParentId: "" - RepoTags: - - "ubuntu:12.04" - - "ubuntu:precise" - RepoDigests: - - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" - Created: 1474925151 - Size: 103579269 - VirtualSize: 103579269 - SharedSize: 0 - Labels: {} - Containers: 2 - - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" - ParentId: "" - RepoTags: - - "ubuntu:12.10" - - "ubuntu:quantal" - RepoDigests: - - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" - Created: 1403128455 - Size: 172064416 - VirtualSize: 172064416 - SharedSize: 0 - Labels: {} - Containers: 5 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "all" - in: "query" - description: "Show all images. Only images from a final layer (no children) are shown by default." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `before`=(`[:]`, `` or ``) - - `dangling=true` - - `label=key` or `label="key=value"` of an image label - - `reference`=(`[:]`) - - `since`=(`[:]`, `` or ``) - type: "string" - - name: "digests" - in: "query" - description: "Show digest information as a `RepoDigests` field on each image." - type: "boolean" - default: false - tags: ["Image"] - /build: - post: - summary: "Build an image" - description: | - Build an image from a tar archive with a `Dockerfile` in it. - - The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). - - The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. - - The build is canceled if the client drops the connection by quitting or being killed. - operationId: "ImageBuild" - consumes: - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "inputStream" - in: "body" - description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - format: "binary" - - name: "dockerfile" - in: "query" - description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." - type: "string" - default: "Dockerfile" - - name: "t" - in: "query" - description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." - type: "string" - - name: "extrahosts" - in: "query" - description: "Extra hosts to add to /etc/hosts" - type: "string" - - name: "remote" - in: "query" - description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." - type: "string" - - name: "q" - in: "query" - description: "Suppress verbose build output." - type: "boolean" - default: false - - name: "nocache" - in: "query" - description: "Do not use the cache when building the image." - type: "boolean" - default: false - - name: "cachefrom" - in: "query" - description: "JSON array of images used for build cache resolution." - type: "string" - - name: "pull" - in: "query" - description: "Attempt to pull the image even if an older image exists locally." - type: "string" - - name: "rm" - in: "query" - description: "Remove intermediate containers after a successful build." - type: "boolean" - default: true - - name: "forcerm" - in: "query" - description: "Always remove intermediate containers, even upon failure." - type: "boolean" - default: false - - name: "memory" - in: "query" - description: "Set memory limit for build." - type: "integer" - - name: "memswap" - in: "query" - description: "Total memory (memory + swap). Set as `-1` to disable swap." - type: "integer" - - name: "cpushares" - in: "query" - description: "CPU shares (relative weight)." - type: "integer" - - name: "cpusetcpus" - in: "query" - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." - type: "string" - - name: "cpuperiod" - in: "query" - description: "The length of a CPU period in microseconds." - type: "integer" - - name: "cpuquota" - in: "query" - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - - name: "buildargs" - in: "query" - description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" - type: "integer" - - name: "shmsize" - in: "query" - description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." - type: "integer" - - name: "squash" - in: "query" - description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" - type: "boolean" - - name: "labels" - in: "query" - description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." - type: "string" - - name: "networkmode" - in: "query" - description: "Sets the networking mode for the run commands during - build. Supported standard values are: `bridge`, `host`, `none`, and - `container:`. Any other value is taken as a custom network's - name to which this container should connect to." - type: "string" - - name: "Content-type" - in: "header" - type: "string" - enum: - - "application/x-tar" - default: "application/x-tar" - - name: "X-Registry-Config" - in: "header" - description: | - This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. - - The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: - - ``` - { - "docker.example.com": { - "username": "janedoe", - "password": "hunter2" - }, - "https://index.docker.io/v1/": { - "username": "mobydock", - "password": "conta1n3rize14" - } - } - ``` - - Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. - type: "string" - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /images/create: - post: - summary: "Create an image" - description: "Create an image by either pulling it from a registry or importing it." - operationId: "ImageCreate" - consumes: - - "text/plain" - - "application/octet-stream" - produces: - - "application/json" - responses: - 200: - description: "no error" - 404: - description: "repository does not exist or no read access" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "fromImage" - in: "query" - description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." - type: "string" - - name: "fromSrc" - in: "query" - description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." - type: "string" - - name: "repo" - in: "query" - description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." - type: "string" - - name: "tag" - in: "query" - description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." - type: "string" - - name: "inputImage" - in: "body" - description: "Image content if the value `-` has been specified in fromSrc query parameter" - schema: - type: "string" - required: false - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" - type: "string" - tags: ["Image"] - /images/{name}/json: - get: - summary: "Inspect an image" - description: "Return low-level information about an image." - operationId: "ImageInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Image" - examples: - application/json: - Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" - Comment: "" - Os: "linux" - Architecture: "amd64" - Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - ContainerConfig: - Tty: false - Hostname: "e611e15f9c9d" - Domainname: "" - AttachStdout: false - PublishService: "" - AttachStdin: false - OpenStdin: false - StdinOnce: false - NetworkDisabled: false - OnBuild: [] - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - User: "" - WorkingDir: "" - MacAddress: "" - AttachStderr: false - Labels: - com.example.license: "GPL" - com.example.version: "1.0" - com.example.vendor: "Acme" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - - "/bin/sh" - - "-c" - - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" - DockerVersion: "1.9.0-dev" - VirtualSize: 188359297 - Size: 0 - Author: "" - Created: "2015-09-10T08:30:53.26995814Z" - GraphDriver: - Name: "aufs" - Data: {} - RepoDigests: - - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" - RepoTags: - - "example:1.0" - - "example:latest" - - "example:stable" - Config: - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - NetworkDisabled: false - OnBuild: [] - StdinOnce: false - PublishService: "" - AttachStdin: false - OpenStdin: false - Domainname: "" - AttachStdout: false - Tty: false - Hostname: "e611e15f9c9d" - Cmd: - - "/bin/bash" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Labels: - com.example.vendor: "Acme" - com.example.version: "1.0" - com.example.license: "GPL" - MacAddress: "" - AttachStderr: false - WorkingDir: "" - User: "" - RootFS: - Type: "layers" - Layers: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Image"] - /images/{name}/history: - get: - summary: "Get the history of an image" - description: "Return parent layers of an image." - operationId: "ImageHistory" - produces: ["application/json"] - responses: - 200: - description: "List of image layers" - schema: - type: "array" - items: - type: "object" - x-go-name: HistoryResponseItem - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false - examples: - application/json: - - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" - Created: 1398108230 - CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" - Tags: - - "ubuntu:lucid" - - "ubuntu:10.04" - Size: 182964289 - Comment: "" - - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" - Created: 1398108222 - CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" - Tags: [] - Size: 0 - Comment: "" - - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" - Created: 1371157430 - CreatedBy: "" - Tags: - - "scratch12:latest" - - "scratch:latest" - Size: 0 - Comment: "Imported from -" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/{name}/push: - post: - summary: "Push an image" - description: | - Push an image to a registry. - - If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. - - The push is cancelled if the HTTP connection is closed. - operationId: "ImagePush" - consumes: - - "application/octet-stream" - responses: - 200: - description: "No error" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID." - type: "string" - required: true - - name: "tag" - in: "query" - description: "The tag to associate with the image on the registry." - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" - type: "string" - required: true - tags: ["Image"] - /images/{name}/tag: - post: - summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." - operationId: "ImageTag" - responses: - 201: - description: "No error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID to tag." - type: "string" - required: true - - name: "repo" - in: "query" - description: "The repository to tag in. For example, `someuser/someimage`." - type: "string" - - name: "tag" - in: "query" - description: "The name of the new tag." - type: "string" - tags: ["Image"] - /images/{name}: - delete: - summary: "Remove an image" - description: | - Remove an image, along with any untagged parent images that were - referenced by that image. - - Images can't be removed if they have descendant images, are being - used by a running container or are being used by a build. - operationId: "ImageDelete" - produces: ["application/json"] - responses: - 200: - description: "The image was deleted successfully" - schema: - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - examples: - application/json: - - Untagged: "3e2f21a89f" - - Deleted: "3e2f21a89f" - - Deleted: "53b4f83ac9" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - - name: "force" - in: "query" - description: "Remove the image even if it is being used by stopped containers or has other tags" - type: "boolean" - default: false - - name: "noprune" - in: "query" - description: "Do not delete untagged parent images" - type: "boolean" - default: false - tags: ["Image"] - /images/search: - get: - summary: "Search images" - description: "Search for an image on Docker Hub." - operationId: "ImageSearch" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - type: "object" - properties: - description: - type: "string" - is_official: - type: "boolean" - is_automated: - type: "boolean" - name: - type: "string" - star_count: - type: "integer" - examples: - application/json: - - description: "" - is_official: false - is_automated: false - name: "wma55/u1210sshd" - star_count: 0 - - description: "" - is_official: false - is_automated: false - name: "jdswinbank/sshd" - star_count: 0 - - description: "" - is_official: false - is_automated: false - name: "vgauthier/sshd" - star_count: 0 - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "term" - in: "query" - description: "Term to search" - type: "string" - required: true - - name: "limit" - in: "query" - description: "Maximum number of results to return" - type: "integer" - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `is-automated=(true|false)` - - `is-official=(true|false)` - - `stars=` Matches images that has at least 'number' stars. - type: "string" - tags: ["Image"] - /images/prune: - post: - summary: "Delete unused images" - produces: - - "application/json" - operationId: "ImagePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - - - `dangling=` When set to `true` (or `1`), prune only - unused *and* untagged images. When set to `false` - (or `0`), all unused images are pruned. - - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - properties: - ImagesDeleted: - description: "Images that were deleted" - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /auth: - post: - summary: "Check auth configuration" - description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." - operationId: "SystemAuth" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "An identity token was generated successfully." - schema: - type: "object" - required: [Status] - properties: - Status: - description: "The status of the authentication" - type: "string" - x-nullable: false - IdentityToken: - description: "An opaque token used to authenticate a user after a successful login" - type: "string" - x-nullable: false - examples: - application/json: - Status: "Login Succeeded" - IdentityToken: "9cbaf023786cd7..." - 204: - description: "No error" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "authConfig" - in: "body" - description: "Authentication to check" - schema: - $ref: "#/definitions/AuthConfig" - tags: ["System"] - /info: - get: - summary: "Get system information" - operationId: "SystemInfo" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "object" - properties: - Architecture: - type: "string" - Containers: - type: "integer" - ContainersRunning: - type: "integer" - ContainersStopped: - type: "integer" - ContainersPaused: - type: "integer" - CpuCfsPeriod: - type: "boolean" - CpuCfsQuota: - type: "boolean" - Debug: - type: "boolean" - DiscoveryBackend: - type: "string" - DockerRootDir: - type: "string" - Driver: - type: "string" - DriverStatus: - type: "array" - items: - type: "array" - items: - type: "string" - SystemStatus: - type: "array" - items: - type: "array" - items: - type: "string" - Plugins: - type: "object" - properties: - Volume: - type: "array" - items: - type: "string" - Network: - type: "array" - items: - type: "string" - ExperimentalBuild: - type: "boolean" - HttpProxy: - type: "string" - HttpsProxy: - type: "string" - ID: - type: "string" - IPv4Forwarding: - type: "boolean" - Images: - type: "integer" - IndexServerAddress: - type: "string" - InitPath: - type: "string" - InitSha1: - type: "string" - KernelVersion: - type: "string" - Labels: - type: "array" - items: - type: "string" - MemTotal: - type: "integer" - MemoryLimit: - type: "boolean" - NCPU: - type: "integer" - NEventsListener: - type: "integer" - NFd: - type: "integer" - NGoroutines: - type: "integer" - Name: - type: "string" - NoProxy: - type: "string" - OomKillDisable: - type: "boolean" - OSType: - type: "string" - OomScoreAdj: - type: "integer" - OperatingSystem: - type: "string" - RegistryConfig: - type: "object" - properties: - IndexConfigs: - type: "object" - additionalProperties: - type: "object" - properties: - Mirrors: - type: "array" - items: - type: "string" - Name: - type: "string" - Official: - type: "boolean" - Secure: - type: "boolean" - InsecureRegistryCIDRs: - type: "array" - items: - type: "string" - SwapLimit: - type: "boolean" - SystemTime: - type: "string" - ServerVersion: - type: "string" - examples: - application/json: - Architecture: "x86_64" - ClusterStore: "etcd://localhost:2379" - CgroupDriver: "cgroupfs" - Containers: 11 - ContainersRunning: 7 - ContainersStopped: 3 - ContainersPaused: 1 - CpuCfsPeriod: true - CpuCfsQuota: true - Debug: false - DockerRootDir: "/var/lib/docker" - Driver: "btrfs" - DriverStatus: - - - - "" - ExperimentalBuild: false - HttpProxy: "http://test:test@localhost:8080" - HttpsProxy: "https://test:test@localhost:8080" - ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" - IPv4Forwarding: true - Images: 16 - IndexServerAddress: "https://index.docker.io/v1/" - InitPath: "/usr/bin/docker" - InitSha1: "" - KernelMemory: true - KernelVersion: "3.12.0-1-amd64" - Labels: - - "storage=ssd" - MemTotal: 2099236864 - MemoryLimit: true - NCPU: 1 - NEventsListener: 0 - NFd: 11 - NGoroutines: 21 - Name: "prod-server-42" - NoProxy: "9.81.1.160" - OomKillDisable: true - OSType: "linux" - OperatingSystem: "Boot2Docker" - Plugins: - Volume: - - "local" - Network: - - "null" - - "host" - - "bridge" - RegistryConfig: - IndexConfigs: - docker.io: - Name: "docker.io" - Official: true - Secure: true - InsecureRegistryCIDRs: - - "127.0.0.0/8" - SecurityOptions: - - Key: "Name" - Value: "seccomp" - - Key: "Profile" - Value: "default" - - Key: "Name" - Value: "apparmor" - - Key: "Name" - Value: "selinux" - - Key: "Name" - Value: "userns" - ServerVersion: "1.9.0" - SwapLimit: false - SystemStatus: - - - - "State" - - "Healthy" - SystemTime: "2015-03-10T11:11:23.730591467-07:00" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /version: - get: - summary: "Get version" - description: "Returns the version of Docker that is running and various information about the system that Docker is running on." - operationId: "SystemVersion" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - type: "object" - properties: - Version: - type: "string" - ApiVersion: - type: "string" - MinAPIVersion: - type: "string" - GitCommit: - type: "string" - GoVersion: - type: "string" - Os: - type: "string" - Arch: - type: "string" - KernelVersion: - type: "string" - Experimental: - type: "boolean" - BuildTime: - type: "string" - examples: - application/json: - Version: "17.04.0" - Os: "linux" - KernelVersion: "3.19.0-23-generic" - GoVersion: "go1.7.5" - GitCommit: "deadbee" - Arch: "amd64" - ApiVersion: "1.27" - MinAPIVersion: "1.12" - BuildTime: "2016-06-14T07:09:13.444803460+00:00" - Experimental: true - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /_ping: - get: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPing" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "OK" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /commit: - post: - summary: "Create a new image from a container" - operationId: "ImageCommit" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "containerConfig" - in: "body" - description: "The container configuration" - schema: - $ref: "#/definitions/Config" - - name: "container" - in: "query" - description: "The ID or name of the container to commit" - type: "string" - - name: "repo" - in: "query" - description: "Repository name for the created image" - type: "string" - - name: "tag" - in: "query" - description: "Tag name for the create image" - type: "string" - - name: "comment" - in: "query" - description: "Commit message" - type: "string" - - name: "author" - in: "query" - description: "Author of the image (e.g., `John Hannibal Smith `)" - type: "string" - - name: "pause" - in: "query" - description: "Whether to pause the container before committing" - type: "boolean" - default: true - - name: "changes" - in: "query" - description: "`Dockerfile` instructions to apply while committing" - type: "string" - tags: ["Image"] - /events: - get: - summary: "Monitor events" - description: | - Stream real-time events from the server. - - Various objects within Docker report events when something happens to them. - - Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update` - - Images report these events: `delete, import, load, pull, push, save, tag, untag` - - Volumes report these events: `create, mount, unmount, destroy` - - Networks report these events: `create, connect, disconnect, destroy` - - The Docker daemon reports these events: `reload` - - operationId: "SystemEvents" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - Action: - description: "The type of event" - type: "string" - Actor: - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - Attributes: - description: "Various key/value attributes of the object, depending on its type" - type: "object" - additionalProperties: - type: "string" - time: - description: "Timestamp of event" - type: "integer" - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - examples: - application/json: - Type: "container" - Action: "create" - Actor: - ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - com.example.some-label: "some-label-value" - image: "alpine" - name: "my-container" - time: 1461943101 - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "since" - in: "query" - description: "Show events created since this timestamp then stream new events." - type: "string" - - name: "until" - in: "query" - description: "Show events created until this timestamp then stop streaming." - type: "string" - - name: "filters" - in: "query" - description: | - A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - - - `container=` container name or ID - - `daemon=` daemon name or ID - - `event=` event type - - `image=` image name or ID - - `label=` image or container label - - `network=` network name or ID - - `plugin`= plugin name or ID - - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon` - - `volume=` volume name or ID - type: "string" - tags: ["System"] - /system/df: - get: - summary: "Get data usage information" - operationId: "SystemDataUsage" - responses: - 200: - description: "no error" - schema: - type: "object" - properties: - LayersSize: - type: "integer" - format: "int64" - Images: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - Containers: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - Volumes: - type: "array" - items: - $ref: "#/definitions/Volume" - example: - LayersSize: 1092588 - Images: - - - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - ParentId: "" - RepoTags: - - "busybox:latest" - RepoDigests: - - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" - Created: 1466724217 - Size: 1092588 - SharedSize: 0 - VirtualSize: 1092588 - Labels: {} - Containers: 1 - Containers: - - - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" - Names: - - "/top" - Image: "busybox" - ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - Command: "top" - Created: 1472592424 - Ports: [] - SizeRootFs: 1092588 - Labels: {} - State: "exited" - Status: "Exited (0) 56 minutes ago" - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - IPAMConfig: null - Links: null - Aliases: null - NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" - EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" - Gateway: "172.18.0.1" - IPAddress: "172.18.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Mounts: [] - Volumes: - - - Name: "my-volume" - Driver: "local" - Mountpoint: "" - Labels: null - Scope: "" - Options: null - UsageData: - Size: 0 - RefCount: 0 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /images/{name}/get: - get: - summary: "Export an image" - description: | - Get a tarball containing all images and metadata for a repository. - - If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. - - ### Image tarball format - - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer - - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. - - If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. - - ```json - { - "hello-world": { - "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" - } - } - ``` - operationId: "ImageGet" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/get: - get: - summary: "Export several images" - description: | - Get a tarball containing all images and metadata for several image repositories. - - For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. - - For details on the format, see [the export image endpoint](#operation/ImageGet). - operationId: "ImageGetAll" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "names" - in: "query" - description: "Image names to filter by" - type: "array" - items: - type: "string" - tags: ["Image"] - /images/load: - post: - summary: "Import images" - description: | - Load a set of images and tags into a repository. - - For details on the format, see [the export image endpoint](#operation/ImageGet). - operationId: "ImageLoad" - consumes: - - "application/x-tar" - produces: - - "application/json" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "imagesTarball" - in: "body" - description: "Tar archive containing images" - schema: - type: "string" - format: "binary" - - name: "quiet" - in: "query" - description: "Suppress progress details during load." - type: "boolean" - default: false - tags: ["Image"] - /containers/{id}/exec: - post: - summary: "Create an exec instance" - description: "Run a command inside a running container." - operationId: "ContainerExec" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is paused" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execConfig" - in: "body" - description: "Exec configuration" - schema: - type: "object" - properties: - AttachStdin: - type: "boolean" - description: "Attach to `stdin` of the exec command." - AttachStdout: - type: "boolean" - description: "Attach to `stdout` of the exec command." - AttachStderr: - type: "boolean" - description: "Attach to `stderr` of the exec command." - DetachKeys: - type: "string" - description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - Env: - description: "A list of environment variables in the form `[\"VAR=value\", ...]`." - type: "array" - items: - type: "string" - Cmd: - type: "array" - description: "Command to run, as a string or array of strings." - items: - type: "string" - Privileged: - type: "boolean" - description: "Runs the exec process with extended privileges." - default: false - User: - type: "string" - description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." - example: - AttachStdin: false - AttachStdout: true - AttachStderr: true - DetachKeys: "ctrl-p,ctrl-q" - Tty: false - Cmd: - - "date" - Env: - - "FOO=bar" - - "BAZ=quux" - required: true - - name: "id" - in: "path" - description: "ID or name of container" - type: "string" - required: true - tags: ["Exec"] - /exec/{id}/start: - post: - summary: "Start an exec instance" - description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." - operationId: "ExecStart" - consumes: - - "application/json" - produces: - - "application/vnd.docker.raw-stream" - responses: - 200: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Container is stopped or paused" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execStartConfig" - in: "body" - schema: - type: "object" - properties: - Detach: - type: "boolean" - description: "Detach from the command." - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - example: - Detach: false - Tty: false - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - /exec/{id}/resize: - post: - summary: "Resize an exec instance" - description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." - operationId: "ExecResize" - responses: - 201: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - - name: "h" - in: "query" - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Exec"] - /exec/{id}/json: - get: - summary: "Inspect an exec instance" - description: "Return low-level information about an exec instance." - operationId: "ExecInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "object" - properties: - ID: - type: "string" - Running: - type: "boolean" - ExitCode: - type: "integer" - ProcessConfig: - $ref: "#/definitions/ProcessConfig" - OpenStdin: - type: "boolean" - OpenStderr: - type: "boolean" - OpenStdout: - type: "boolean" - ContainerID: - type: "string" - Pid: - type: "integer" - description: "The system process ID for the exec process." - examples: - application/json: - CanRemove: false - ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" - DetachKeys: "" - ExitCode: 2 - ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" - OpenStderr: true - OpenStdin: true - OpenStdout: true - ProcessConfig: - arguments: - - "-c" - - "exit 2" - entrypoint: "sh" - privileged: false - tty: true - user: "1000" - Running: false - Pid: 42000 - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - - /volumes: - get: - summary: "List volumes" - operationId: "VolumeList" - produces: ["application/json"] - responses: - 200: - description: "Summary volume data that matches the query" - schema: - type: "object" - required: [Volumes, Warnings] - properties: - Volumes: - type: "array" - x-nullable: false - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - x-nullable: false - description: "Warnings that occurred when fetching the list of volumes" - items: - type: "string" - - examples: - application/json: - Volumes: - - Name: "tardis" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - Options: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Warnings: [] - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to - process on the volumes list. Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - volumes that are not in use by a container. When set to `false` - (or `0`), only volumes that are in use by one or more - containers are returned. - - `driver=` Matches volumes based on their driver. - - `label=` or `label=:` Matches volumes based on - the presence of a `label` alone or a `label` and a value. - - `name=` Matches all or part of a volume name. - type: "string" - format: "json" - tags: ["Volume"] - - /volumes/create: - post: - summary: "Create a volume" - operationId: "VolumeCreate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 201: - description: "The volume was created successfully" - schema: - $ref: "#/definitions/Volume" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "volumeConfig" - in: "body" - required: true - description: "Volume configuration" - schema: - type: "object" - properties: - Name: - description: "The new volume's name. If not specified, Docker generates a name." - type: "string" - x-nullable: false - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - DriverOpts: - description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Driver: "custom" - tags: ["Volume"] - - /volumes/{name}: - get: - summary: "Inspect a volume" - operationId: "VolumeInspect" - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Volume" - 404: - description: "No such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - tags: ["Volume"] - - delete: - summary: "Remove a volume" - description: "Instruct the driver to remove the volume." - operationId: "VolumeDelete" - responses: - 204: - description: "The volume was removed" - 404: - description: "No such volume or volume driver" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Volume is in use and cannot be removed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - - name: "force" - in: "query" - description: "Force the removal of the volume" - type: "boolean" - default: false - tags: ["Volume"] - /volumes/prune: - post: - summary: "Delete unused volumes" - produces: - - "application/json" - operationId: "VolumePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - properties: - VolumesDeleted: - description: "Volumes that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Volume"] - /networks: - get: - summary: "List networks" - operationId: "NetworkList" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Network" - examples: - application/json: - - Name: "bridge" - Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" - Created: "2016-10-19T06:21:00.416543526Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: - - - Subnet: "172.17.0.0/16" - Containers: - 39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867: - EndpointID: "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda" - MacAddress: "02:42:ac:11:00:02" - IPv4Address: "172.17.0.2/16" - IPv6Address: "" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - - Name: "none" - Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "null" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - - Name: "host" - Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "host" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - - - `driver=` Matches a network's driver. - - `id=` Matches all or part of a network ID. - - `label=` or `label==` of a network label. - - `name=` Matches all or part of a network name. - - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. - type: "string" - tags: ["Network"] - - /networks/{id}: - get: - summary: "Inspect a network" - operationId: "NetworkInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Network" - 404: - description: "Network not found" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "verbose" - in: "query" - description: "Detailed inspect output for troubleshooting" - type: "boolean" - default: false - tags: ["Network"] - - delete: - summary: "Remove a network" - operationId: "NetworkDelete" - responses: - 204: - description: "No error" - 404: - description: "no such network" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - tags: ["Network"] - - /networks/create: - post: - summary: "Create a network" - operationId: "NetworkCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "No error" - schema: - type: "object" - properties: - Id: - description: "The ID of the created network." - type: "string" - Warning: - type: "string" - example: - Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" - Warning: "" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "plugin not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "networkConfig" - in: "body" - description: "Network configuration" - required: true - schema: - type: "object" - required: ["Name"] - properties: - Name: - description: "The network's name." - type: "string" - CheckDuplicate: - description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." - type: "boolean" - Driver: - description: "Name of the network driver plugin to use." - type: "string" - default: "bridge" - Internal: - description: "Restrict external access to the network." - type: "boolean" - Attachable: - description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." - type: "boolean" - Ingress: - description: "Ingress network is the network which provides the routing-mesh in swarm mode." - type: "boolean" - IPAM: - description: "Optional custom IP scheme for the network." - $ref: "#/definitions/IPAM" - EnableIPv6: - description: "Enable IPv6 on the network." - type: "boolean" - Options: - description: "Network specific options to be used by the drivers." - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "isolated_nw" - CheckDuplicate: false - Driver: "bridge" - EnableIPv6: true - IPAM: - Driver: "default" - Config: - - Subnet: "172.20.0.0/16" - IPRange: "172.20.10.0/24" - Gateway: "172.20.10.11" - - Subnet: "2001:db8:abcd::/64" - Gateway: "2001:db8:abcd::1011" - Options: - foo: "bar" - Internal: true - Attachable: false - Ingress: false - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - tags: ["Network"] - - /networks/{id}/connect: - post: - summary: "Connect a container to a network" - operationId: "NetworkConnect" - consumes: - - "application/octet-stream" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - properties: - Container: - type: "string" - description: "The ID or name of the container to connect to the network." - EndpointConfig: - $ref: "#/definitions/EndpointSettings" - example: - Container: "3613f73ba0e4" - EndpointConfig: - IPAMConfig: - IPv4Address: "172.24.56.89" - IPv6Address: "2001:db8::5689" - tags: ["Network"] - - /networks/{id}/disconnect: - post: - summary: "Disconnect a container from a network" - operationId: "NetworkDisconnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - properties: - Container: - type: "string" - description: "The ID or name of the container to disconnect from the network." - Force: - type: "boolean" - description: "Force the container to disconnect from the network." - tags: ["Network"] - /networks/prune: - post: - summary: "Delete unused networks" - produces: - - "application/json" - operationId: "NetworkPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - properties: - NetworksDeleted: - description: "Networks that were deleted" - type: "array" - items: - type: "string" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Network"] - /plugins: - get: - summary: "List plugins" - operationId: "PluginList" - description: "Returns information about installed plugins." - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Plugin" - example: - - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" - Name: "tiborvass/sample-volume-plugin" - Tag: "latest" - Active: true - Settings: - Env: - - "DEBUG=0" - Args: null - Devices: null - Config: - Description: "A sample volume plugin for Docker" - Documentation: "https://docs.docker.com/engine/extend/plugins/" - Interface: - Types: - - "docker.volumedriver/1.0" - Socket: "plugins.sock" - Entrypoint: - - "/usr/bin/sample-volume-plugin" - - "/data" - WorkDir: "" - User: {} - Network: - Type: "" - Linux: - Capabilities: null - AllowAllDevices: false - Devices: null - Mounts: null - PropagatedMount: "/data" - Env: - - Name: "DEBUG" - Description: "If set, prints debug messages" - Settable: null - Value: "0" - Args: - Name: "args" - Description: "command line arguments" - Settable: null - Value: [] - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - - - `capability=` - - `enable=|` - tags: ["Plugin"] - - /plugins/privileges: - get: - summary: "Get plugin privileges" - operationId: "GetPluginPrivileges" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - description: "Describes a permission the user has to accept upon installing the plugin." - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - tags: - - "Plugin" - - /plugins/pull: - post: - summary: "Install a plugin" - operationId: "PluginPull" - description: | - Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). - produces: - - "application/json" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - Remote reference for plugin to install. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "name" - in: "query" - description: | - Local name for the pulled plugin. - - The `:latest` tag is optional, and is used as the default if omitted. - required: false - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - description: "Describes a permission accepted by the user upon installing the plugin." - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/{name}/json: - get: - summary: "Inspect a plugin" - operationId: "PluginInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}: - delete: - summary: "Remove a plugin" - operationId: "PluginDelete" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "force" - in: "query" - description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." - type: "boolean" - default: false - tags: ["Plugin"] - /plugins/{name}/enable: - post: - summary: "Enable a plugin" - operationId: "PluginEnable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "timeout" - in: "query" - description: "Set the HTTP client timeout (in seconds)" - type: "integer" - default: 0 - tags: ["Plugin"] - /plugins/{name}/disable: - post: - summary: "Disable a plugin" - operationId: "PluginDisable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}/upgrade: - post: - summary: "Upgrade a plugin" - operationId: "PluginUpgrade" - responses: - 204: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "remote" - in: "query" - description: | - Remote reference to upgrade to. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - description: "Describes a permission accepted by the user upon installing the plugin." - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/create: - post: - summary: "Create a plugin" - operationId: "PluginCreate" - consumes: - - "application/x-tar" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "query" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "tarContext" - in: "body" - description: "Path to tar containing plugin rootfs and manifest" - schema: - type: "string" - format: "binary" - tags: ["Plugin"] - /plugins/{name}/push: - post: - summary: "Push a plugin" - operationId: "PluginPush" - description: | - Push a plugin to the registry. - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - responses: - 200: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /plugins/{name}/set: - post: - summary: "Configure a plugin" - operationId: "PluginSet" - consumes: - - "application/json" - parameters: - - name: "name" - in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." - required: true - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - type: "string" - example: ["DEBUG=1"] - responses: - 204: - description: "No error" - 404: - description: "Plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /nodes: - get: - summary: "List nodes" - operationId: "NodeList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Node" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `id=` - - `label=` - - `membership=`(`accepted`|`pending`)` - - `name=` - - `role=`(`manager`|`worker`)` - type: "string" - tags: ["Node"] - /nodes/{id}: - get: - summary: "Inspect a node" - operationId: "NodeInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Node" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - tags: ["Node"] - delete: - summary: "Delete a node" - operationId: "NodeDelete" - responses: - 200: - description: "no error" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - - name: "force" - in: "query" - description: "Force remove a node from the swarm" - default: false - type: "boolean" - tags: ["Node"] - /nodes/{id}/update: - post: - summary: "Update a node" - operationId: "NodeUpdate" - responses: - 200: - description: "no error" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID of the node" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/NodeSpec" - - name: "version" - in: "query" - description: "The version number of the node object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - tags: ["Node"] - /swarm: - get: - summary: "Inspect swarm" - operationId: "SwarmInspect" - responses: - 200: - description: "no error" - schema: - allOf: - - $ref: "#/definitions/ClusterInfo" - - type: "object" - properties: - JoinTokens: - description: "The tokens workers and managers need to join the swarm." - type: "object" - properties: - Worker: - description: "The token workers can use to join the swarm." - type: "string" - Manager: - description: "The token managers can use to join the swarm." - type: "string" - example: - CreatedAt: "2016-08-15T16:00:20.349727406Z" - Spec: - Dispatcher: - HeartbeatPeriod: 5000000000 - Orchestration: - TaskHistoryRetentionLimit: 10 - CAConfig: - NodeCertExpiry: 7776000000000000 - Raft: - LogEntriesForSlowFollowers: 500 - HeartbeatTick: 1 - SnapshotInterval: 10000 - ElectionTick: 3 - TaskDefaults: {} - EncryptionConfig: - AutoLockManagers: false - Name: "default" - JoinTokens: - Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" - Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" - ID: "70ilmkj2f6sp2137c753w2nmt" - UpdatedAt: "2016-08-15T16:32:09.623207604Z" - Version: - Index: 51 - 404: - description: "no such swarm" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/init: - post: - summary: "Initialize a new swarm" - operationId: "SwarmInit" - produces: - - "application/json" - - "text/plain" - responses: - 200: - description: "no error" - schema: - description: "The node ID" - type: "string" - example: "7v2t30z9blmxuhnyo6s4cpenp" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - properties: - ListenAddr: - description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." - type: "string" - AdvertiseAddr: - description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." - type: "string" - ForceNewCluster: - description: "Force creation of a new swarm." - type: "boolean" - Spec: - $ref: "#/definitions/SwarmSpec" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - ForceNewCluster: false - Spec: - Orchestration: {} - Raft: {} - Dispatcher: {} - CAConfig: {} - EncryptionConfig: - AutoLockManagers: false - tags: ["Swarm"] - /swarm/join: - post: - summary: "Join an existing swarm" - operationId: "SwarmJoin" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - properties: - ListenAddr: - description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." - type: "string" - AdvertiseAddr: - description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." - type: "string" - RemoteAddrs: - description: "Addresses of manager nodes already participating in the swarm." - type: "string" - JoinToken: - description: "Secret token for joining this swarm." - type: "string" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - RemoteAddrs: - - "node1:2377" - JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - tags: ["Swarm"] - /swarm/leave: - post: - summary: "Leave a swarm" - operationId: "SwarmLeave" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "force" - description: "Force leave swarm, even if this is the last manager or that it will break the cluster." - in: "query" - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/update: - post: - summary: "Update a swarm" - operationId: "SwarmUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - $ref: "#/definitions/SwarmSpec" - - name: "version" - in: "query" - description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - - name: "rotateWorkerToken" - in: "query" - description: "Rotate the worker join token." - type: "boolean" - default: false - - name: "rotateManagerToken" - in: "query" - description: "Rotate the manager join token." - type: "boolean" - default: false - - name: "rotateManagerUnlockKey" - in: "query" - description: "Rotate the manager unlock key." - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/unlockkey: - get: - summary: "Get the unlock key" - operationId: "SwarmUnlockkey" - consumes: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/unlock: - post: - summary: "Unlock a locked manager" - operationId: "SwarmUnlock" - consumes: - - "application/json" - produces: - - "application/json" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /services: - get: - summary: "List services" - operationId: "ServiceList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Service" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - - - `id=` - - `label=` - - `mode=["replicated"|"global"]` - - `name=` - tags: ["Service"] - /services/create: - post: - summary: "Create a service" - operationId: "ServiceCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - type: "object" - properties: - ID: - description: "The ID of the created service." - type: "string" - Warning: - description: "Optional warning message" - type: "string" - example: - ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "network is not eligible for services" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "name conflicts with an existing service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "web" - TaskTemplate: - ContainerSpec: - Image: "nginx:alpine" - Mounts: - - - ReadOnly: true - Source: "web-data" - Target: "/usr/share/nginx/html" - Type: "volume" - VolumeOptions: - DriverConfig: {} - Labels: - com.example.something: "something-value" - Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] - User: "33" - DNSConfig: - Nameservers: ["8.8.8.8"] - Search: ["example.org"] - Options: ["timeout:3"] - LogDriver: - Name: "json-file" - Options: - max-file: "3" - max-size: "10M" - Placement: {} - Resources: - Limits: - MemoryBytes: 104857600 - Reservations: {} - RestartPolicy: - Condition: "on-failure" - Delay: 10000000000 - MaxAttempts: 10 - Mode: - Replicated: - Replicas: 4 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Ports: - - - Protocol: "tcp" - PublishedPort: 8080 - TargetPort: 80 - Labels: - foo: "bar" - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" - type: "string" - tags: ["Service"] - /services/{id}: - get: - summary: "Inspect a service" - operationId: "ServiceInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Service" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "insertDefaults" - in: "query" - description: "Fill empty fields with default values." - type: "boolean" - default: false - tags: ["Service"] - delete: - summary: "Delete a service" - operationId: "ServiceDelete" - responses: - 200: - description: "no error" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - tags: ["Service"] - /services/{id}/update: - post: - summary: "Update a service" - operationId: "ServiceUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ServiceUpdateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "top" - TaskTemplate: - ContainerSpec: - Image: "busybox" - Args: - - "top" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - - - name: "version" - in: "query" - description: "The version number of the service object being updated. This is required to avoid conflicting writes." - required: true - type: "integer" - - name: "registryAuthFrom" - in: "query" - type: "string" - description: "If the X-Registry-Auth header is not specified, this - parameter indicates where to find registry authorization credentials. The - valid values are `spec` and `previous-spec`." - default: "spec" - - name: "rollback" - in: "query" - type: "string" - description: "Set to this parameter to `previous` to cause a - server-side rollback to the previous service spec. The supplied spec will be - ignored in this case." - - name: "X-Registry-Auth" - in: "header" - description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" - type: "string" - - tags: ["Service"] - /services/{id}/logs: - get: - summary: "Get service logs" - description: | - Get `stdout` and `stderr` logs from a service. - - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. - operationId: "ServiceLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" - responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" - 200: - description: "logs returned as a string in response body" - schema: - type: "string" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such service: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the service" - type: "string" - - name: "details" - in: "query" - description: "Show service context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." - type: "string" - default: "all" - tags: ["Service"] - /tasks: - get: - summary: "List tasks" - operationId: "TaskList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Task" - example: - - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - - ID: "1yljwbmlr8er2waf8orvqpwms" - Version: - Index: 30 - CreatedAt: "2016-06-07T21:07:30.019104782Z" - UpdatedAt: "2016-06-07T21:07:30.231958098Z" - Name: "hopeful_cori" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:30.202183143Z" - State: "shutdown" - Message: "shutdown" - ContainerStatus: - ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" - DesiredState: "shutdown" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.5/16" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - - - `desired-state=(running | shutdown | accepted)` - - `id=` - - `label=key` or `label="key=value"` - - `name=` - - `node=` - - `service=` - tags: ["Task"] - /tasks/{id}: - get: - summary: "Inspect a task" - operationId: "TaskInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Task" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID of the task" - required: true - type: "string" - tags: ["Task"] - /tasks/{id}/logs: - get: - summary: "Get task logs" - description: | - Get `stdout` and `stderr` logs from a task. - - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. - operationId: "TaskLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" - responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" - 200: - description: "logs returned as a string in response body" - schema: - type: "string" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such task: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID of the task" - type: "string" - - name: "details" - in: "query" - description: "Show task context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." - type: "string" - default: "all" - /secrets: - get: - summary: "List secrets" - operationId: "SecretList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Secret" - example: - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Secret"] - /secrets/create: - post: - summary: "Create a secret" - operationId: "SecretCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - type: "object" - properties: - ID: - description: "The ID of the created secret." - type: "string" - example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/SecretSpec" - - type: "object" - example: - Name: "app-key.crt" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - tags: ["Secret"] - /secrets/{id}: - get: - summary: "Inspect a secret" - operationId: "SecretInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Secret" - example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - delete: - summary: "Delete a secret" - operationId: "SecretDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - /secrets/{id}/update: - post: - summary: "Update a Secret" - operationId: "SecretUpdate" - responses: - 200: - description: "no error" - 404: - description: "no such secret" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the secret" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/SecretSpec" - description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." - - name: "version" - in: "query" - description: "The version number of the secret object being updated. This is required to avoid conflicting writes." - type: "integer" - format: "int64" - required: true - tags: ["Secret"] diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index 1ef911edb..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -## Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md deleted file mode 100644 index 059dfb3ce..000000000 --- a/vendor/github.com/docker/docker/client/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Go client for the Docker Engine API - -The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. - -For example, to list running containers (the equivalent of `docker ps`): - -```go -package main - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" -) - -func main() { - cli, err := client.NewEnvClient() - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } -} -``` - -[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784..000000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 477be8b21..000000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,27 +0,0 @@ -# go-connections maintainers file -# -# This file describes who runs the docker/go-connections project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "calavera", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e13..000000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index 9043b3547..000000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get github.com/golang/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go index 544b5dd3e..ba02af26d 100644 --- a/vendor/github.com/docker/go-units/duration.go +++ b/vendor/github.com/docker/go-units/duration.go @@ -18,9 +18,9 @@ func HumanDuration(d time.Duration) string { return fmt.Sprintf("%d seconds", seconds) } else if minutes := int(d.Minutes()); minutes == 1 { return "About a minute" - } else if minutes < 60 { + } else if minutes < 46 { return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { + } else if hours := int(d.Hours() + 0.5); hours == 1 { return "About an hour" } else if hours < 48 { return fmt.Sprintf("%d hours", hours) diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go index b6485edff..85f6ab071 100644 --- a/vendor/github.com/docker/go-units/size.go +++ b/vendor/github.com/docker/go-units/size.go @@ -31,7 +31,7 @@ type unitMap map[string]int64 var ( decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) ) var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} @@ -58,7 +58,7 @@ func CustomSize(format string, size float64, base float64, _map []string) string // instead of 4 digit precision used in units.HumanSize. func HumanSizeWithPrecision(size float64, precision int) string { size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g %s", precision, size, unit) + return fmt.Sprintf("%.*g%s", precision, size, unit) } // HumanSize returns a human-readable approximation of a size @@ -70,7 +70,7 @@ func HumanSize(size float64) string { // BytesSize returns a human-readable size in bytes, kibibytes, // mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). func BytesSize(size float64) string { - return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) } // FromHumanSize returns an integer from a human-readable specification of a diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md deleted file mode 100644 index 05be0f8ab..000000000 --- a/vendor/github.com/docker/libtrust/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to libtrust - -Want to hack on libtrust? Awesome! Here are instructions to get you -started. - -libtrust is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS deleted file mode 100644 index 9768175fe..000000000 --- a/vendor/github.com/docker/libtrust/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Solomon Hykes -Josh Hawn (github: jlhawn) -Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md deleted file mode 100644 index dcffb31ae..000000000 --- a/vendor/github.com/docker/libtrust/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# libtrust - -> **WARNING** this library is no longer actively developed, and will be integrated -> in the [docker/distribution][https://www.github.com/docker/distribution] -> repository in future. - -Libtrust is library for managing authentication and authorization using public key cryptography. - -Authentication is handled using the identity attached to the public key. -Libtrust provides multiple methods to prove possession of the private key associated with an identity. - - TLS x509 certificates - - Signature verification - - Key Challenge - -Authorization and access control is managed through a distributed trust graph. -Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml deleted file mode 100644 index ba95cdd15..000000000 --- a/vendor/github.com/dustin/go-humanize/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go -go: - - 1.3.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown deleted file mode 100644 index 91b4ae564..000000000 --- a/vendor/github.com/dustin/go-humanize/README.markdown +++ /dev/null @@ -1,124 +0,0 @@ -# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) - -Just a few functions for helping humanize times and sizes. - -`go get` it as `github.com/dustin/go-humanize`, import it as -`"github.com/dustin/go-humanize"`, use it as `humanize`. - -See [godoc](https://godoc.org/github.com/dustin/go-humanize) for -complete documentation. - -## Sizes - -This lets you take numbers like `82854982` and convert them to useful -strings like, `83 MB` or `79 MiB` (whichever you prefer). - -Example: - -```go -fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. -``` - -## Times - -This lets you take a `time.Time` and spit it out in relative terms. -For example, `12 seconds ago` or `3 days from now`. - -Example: - -```go -fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. -``` - -Thanks to Kyle Lemons for the time implementation from an IRC -conversation one day. It's pretty neat. - -## Ordinals - -From a [mailing list discussion][odisc] where a user wanted to be able -to label ordinals. - - 0 -> 0th - 1 -> 1st - 2 -> 2nd - 3 -> 3rd - 4 -> 4th - [...] - -Example: - -```go -fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. -``` - -## Commas - -Want to shove commas into numbers? Be my guest. - - 0 -> 0 - 100 -> 100 - 1000 -> 1,000 - 1000000000 -> 1,000,000,000 - -100000 -> -100,000 - -Example: - -```go -fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. -``` - -## Ftoa - -Nicer float64 formatter that removes trailing zeros. - -```go -fmt.Printf("%f", 2.24) // 2.240000 -fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 -fmt.Printf("%f", 2.0) // 2.000000 -fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 -``` - -## SI notation - -Format numbers with [SI notation][sinotation]. - -Example: - -```go -humanize.SI(0.00000000223, "M") // 2.23 nM -``` - -## English-specific functions - -The following functions are in the `humanize/english` subpackage. - -### Plurals - -Simple English pluralization - -```go -english.PluralWord(1, "object", "") // object -english.PluralWord(42, "object", "") // objects -english.PluralWord(2, "bus", "") // buses -english.PluralWord(99, "locus", "loci") // loci - -english.Plural(1, "object", "") // 1 object -english.Plural(42, "object", "") // 42 objects -english.Plural(2, "bus", "") // 2 buses -english.Plural(99, "locus", "loci") // 99 loci -``` - -### Word series - -Format comma-separated words lists with conjuctions: - -```go -english.WordSeries([]string{"foo"}, "and") // foo -english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar -english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz - -english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz -``` - -[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion -[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/emicklei/go-restful-openapi/.gitignore b/vendor/github.com/emicklei/go-restful-openapi/.gitignore deleted file mode 100644 index f7a9a6520..000000000 --- a/vendor/github.com/emicklei/go-restful-openapi/.gitignore +++ /dev/null @@ -1 +0,0 @@ -examples/examples diff --git a/vendor/github.com/emicklei/go-restful-openapi/.travis.yml b/vendor/github.com/emicklei/go-restful-openapi/.travis.yml deleted file mode 100644 index c74e4fa57..000000000 --- a/vendor/github.com/emicklei/go-restful-openapi/.travis.yml +++ /dev/null @@ -1,4 +0,0 @@ -language: go - -go: - - 1.x \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-openapi/CHANGES.md b/vendor/github.com/emicklei/go-restful-openapi/CHANGES.md deleted file mode 100644 index c6e5d7b27..000000000 --- a/vendor/github.com/emicklei/go-restful-openapi/CHANGES.md +++ /dev/null @@ -1,16 +0,0 @@ -# changes to the go-restful-openapi package - -## v1.0.0 - -- Fix for #19 MapModelTypeNameFunc has incomplete behavior -- prevent array param.Type be overwritten in the else case below (#47) -- Merge paths with existing paths from other webServices (#48) - -## v0.11.0 - - - Register pointer to array/slice of primitives as such rather than as reference to the primitive type definition. (#46) - - Add support for map types using "additional properties" (#44) - -## <= v0.10.0 - -See `git log`. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-openapi/README.md b/vendor/github.com/emicklei/go-restful-openapi/README.md deleted file mode 100644 index cc1283c44..000000000 --- a/vendor/github.com/emicklei/go-restful-openapi/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# go-restful-openapi - -[![Build Status](https://travis-ci.org/emicklei/go-restful-openapi.png)](https://travis-ci.org/emicklei/go-restful-openapi) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-openapi?status.svg)](https://godoc.org/github.com/emicklei/go-restful-openapi) - -[openapi](https://www.openapis.org) extension to the go-restful package, targeting [version 2.0](https://github.com/OAI/OpenAPI-Specification) - -## The following Go field tags are translated to OpenAPI equivalents -- description -- minimum -- maximum -- optional ( if set to "true" then it is not listed in `required`) -- unique -- modelDescription -- type (overrides the Go type String()) -- enum -- readOnly - -See TestThatExtraTagsAreReadIntoModel for examples. - -## dependencies - -- [go-restful](https://github.com/emicklei/go-restful) -- [go-openapi](https://github.com/go-openapi/spec) - -© 2018, ernestmicklei.com. MIT License. Contributions welcome. diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/.gitignore deleted file mode 100644 index cece7be66..000000000 --- a/vendor/github.com/emicklei/go-restful/.gitignore +++ /dev/null @@ -1,70 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -restful.html - -*.out - -tmp.prof - -go-restful.test - -examples/restful-basic-authentication - -examples/restful-encoding-filter - -examples/restful-filters - -examples/restful-hello-world - -examples/restful-resource-functions - -examples/restful-serve-static - -examples/restful-user-service - -*.DS_Store -examples/restful-user-resource - -examples/restful-multi-containers - -examples/restful-form-handling - -examples/restful-CORS-filter - -examples/restful-options-filter - -examples/restful-curly-router - -examples/restful-cpuprofiler-service - -examples/restful-pre-post-filters - -curly.prof - -examples/restful-NCSA-logging - -examples/restful-html-template - -s.html -restful-path-tail diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/.travis.yml deleted file mode 100644 index b22f8f547..000000000 --- a/vendor/github.com/emicklei/go-restful/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.x - -script: go test -v \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md deleted file mode 100644 index 5597fa1cc..000000000 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ /dev/null @@ -1,246 +0,0 @@ -Change history of go-restful -= - -v2.9.0 -- add per Route content encoding setting (overrides container setting) - -v2.8.0 -- add Request.QueryParameters() -- add json-iterator (via build tag) -- disable vgo module (until log is moved) - -v2.7.1 -- add vgo module - -v2.6.1 -- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) - -v2.6.0 -- Make JSR 311 routing and path param processing consistent -- Adding description to RouteBuilder.Reads() -- Update example for Swagger12 and OpenAPI - -2017-09-13 -- added route condition functions using `.If(func)` in route building. - -2017-02-16 -- solved issue #304, make operation names unique - -2017-01-30 - - [IMPORTANT] For swagger users, change your import statement to: - swagger "github.com/emicklei/go-restful-swagger12" - -- moved swagger 1.2 code to go-restful-swagger12 -- created TAG 2.0.0 - -2017-01-27 - -- remove defer request body close -- expose Dispatch for testing filters and Routefunctions -- swagger response model cannot be array -- created TAG 1.0.0 - -2016-12-22 - -- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool) - -2016-11-26 - -- Default change! now use CurlyRouter (was RouterJSR311) -- Default change! no more caching of request content -- Default change! do not recover from panics - -2016-09-22 - -- fix the DefaultRequestContentType feature - -2016-02-14 - -- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response -- add constructors for custom entity accessors for xml and json - -2015-09-27 - -- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency - -2015-09-25 - -- fixed problem with changing Header after WriteHeader (issue 235) - -2015-09-14 - -- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) -- added support for custom EntityReaderWriters. - -2015-08-06 - -- add support for reading entities from compressed request content -- use sync.Pool for compressors of http response and request body -- add Description to Parameter for documentation in Swagger UI - -2015-03-20 - -- add configurable logging - -2015-03-18 - -- if not specified, the Operation is derived from the Route function - -2015-03-17 - -- expose Parameter creation functions -- make trace logger an interface -- fix OPTIONSFilter -- customize rendering of ServiceError -- JSR311 router now handles wildcards -- add Notes to Route - -2014-11-27 - -- (api add) PrettyPrint per response. (as proposed in #167) - -2014-11-12 - -- (api add) ApiVersion(.) for documentation in Swagger UI - -2014-11-10 - -- (api change) struct fields tagged with "description" show up in Swagger UI - -2014-10-31 - -- (api change) ReturnsError -> Returns -- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder -- fix swagger nested structs -- sort Swagger response messages by code - -2014-10-23 - -- (api add) ReturnsError allows you to document Http codes in swagger -- fixed problem with greedy CurlyRouter -- (api add) Access-Control-Max-Age in CORS -- add tracing functionality (injectable) for debugging purposes -- support JSON parse 64bit int -- fix empty parameters for swagger -- WebServicesUrl is now optional for swagger -- fixed duplicate AccessControlAllowOrigin in CORS -- (api change) expose ServeMux in container -- (api add) added AllowedDomains in CORS -- (api add) ParameterNamed for detailed documentation - -2014-04-16 - -- (api add) expose constructor of Request for testing. - -2014-06-27 - -- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). -- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). - -2014-07-03 - -- (api add) CORS can be configured with a list of allowed domains - -2014-03-12 - -- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) - -2014-02-26 - -- (api add) Request now provides information about the matched Route, see method SelectedRoutePath - -2014-02-17 - -- (api change) renamed parameter constants (go-lint checks) - -2014-01-10 - -- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier - -2014-01-07 - -- (api change) Write* methods in Response now return the error or nil. -- added example of serving HTML from a Go template. -- fixed comparing Allowed headers in CORS (is now case-insensitive) - -2013-11-13 - -- (api add) Response knows how many bytes are written to the response body. - -2013-10-29 - -- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. - -2013-10-04 - -- (api add) Response knows what HTTP status has been written -- (api add) Request can have attributes (map of string->interface, also called request-scoped variables - -2013-09-12 - -- (api change) Router interface simplified -- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths - -2013-08-05 - - add OPTIONS support - - add CORS support - -2013-08-27 - -- fixed some reported issues (see github) -- (api change) deprecated use of WriteError; use WriteErrorString instead - -2014-04-15 - -- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString - -2013-08-08 - -- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. -- (api add) the swagger package has be extended to have a UI per container. -- if panic is detected then a small stack trace is printed (thanks to runner-mei) -- (api add) WriteErrorString to Response - -Important API changes: - -- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. -- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. - - -2013-07-06 - -- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. - -2013-06-19 - -- (improve) DoNotRecover option, moved request body closer, improved ReadEntity - -2013-06-03 - -- (api change) removed Dispatcher interface, hide PathExpression -- changed receiver names of type functions to be more idiomatic Go - -2013-06-02 - -- (optimize) Cache the RegExp compilation of Paths. - -2013-05-22 - -- (api add) Added support for request/response filter functions - -2013-05-18 - - -- (api add) Added feature to change the default Http Request Dispatch function (travis cline) -- (api change) Moved Swagger Webservice to swagger package (see example restful-user) - -[2012-11-14 .. 2013-05-18> - -- See https://github.com/emicklei/go-restful/commits - -2012-11-14 - -- Initial commit - - diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile deleted file mode 100644 index b40081cc0..000000000 --- a/vendor/github.com/emicklei/go-restful/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: test - -test: - go test -v . - -ex: - cd examples && ls *.go | xargs go build -o /tmp/ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md deleted file mode 100644 index f52c25acf..000000000 --- a/vendor/github.com/emicklei/go-restful/README.md +++ /dev/null @@ -1,88 +0,0 @@ -go-restful -========== -package for building REST-style Web Services using Google Go - -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) -[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful) - -- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) - -REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: - -- GET = Retrieve a representation of a resource -- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm. -- PUT = Create if you are sending the full content of the specified resource (URI). -- PUT = Update if you are updating the full content of the specified resource. -- DELETE = Delete if you are requesting the server to delete the resource -- PATCH = Update partial content of a resource -- OPTIONS = Get information about the communication options for the request URI - -### Example - -```Go -ws := new(restful.WebService) -ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) - -ws.Route(ws.GET("/{user-id}").To(u.findUser). - Doc("get a user"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Writes(User{})) -... - -func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... -} -``` - -[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) - -### Features - -- Routes for request → function mapping with path parameter (e.g. {id}) support -- Configurable router: - - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*} - - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions -- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) -- Response API for writing structs to JSON/XML and setting headers -- Customizable encoding using EntityReaderWriter registration -- Filters for intercepting the request → response flow on Service or Route level -- Request-scoped variables using attributes -- Containers for WebServices on different HTTP endpoints -- Content encoding (gzip,deflate) of request and response payloads -- Automatic responses on OPTIONS (using a filter) -- Automatic CORS request handling (using a filter) -- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) -- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) -- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) -- Configurable (trace) logging -- Customizable gzip/deflate readers and writers using CompressorProvider registration - -## How to customize -There are several hooks to customize the behavior of the go-restful package. - -- Router algorithm -- Panic recovery -- JSON decoder -- Trace logging -- Compression -- Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` - -TODO: write examples of these. - -## Resources - -- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) -- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) -- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) -- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia) -- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) - -Type ```git shortlog -s``` for a full list of contributors. - -© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/Srcfile deleted file mode 100644 index 16fd18689..000000000 --- a/vendor/github.com/emicklei/go-restful/Srcfile +++ /dev/null @@ -1 +0,0 @@ -{"SkipDirs": ["examples"]} diff --git a/vendor/github.com/emicklei/go-restful/bench_test.sh b/vendor/github.com/emicklei/go-restful/bench_test.sh deleted file mode 100644 index 47ffbe4ac..000000000 --- a/vendor/github.com/emicklei/go-restful/bench_test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out - -go test -c -./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany -./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly - -#go tool pprof go-restful.test tmp.prof -go tool pprof go-restful.test curly.prof - - diff --git a/vendor/github.com/emicklei/go-restful/coverage.sh b/vendor/github.com/emicklei/go-restful/coverage.sh deleted file mode 100644 index e27dbf1a9..000000000 --- a/vendor/github.com/emicklei/go-restful/coverage.sh +++ /dev/null @@ -1,2 +0,0 @@ -go test -coverprofile=coverage.out -go tool cover -html=coverage.out \ No newline at end of file diff --git a/vendor/github.com/flynn/go-shlex/Makefile b/vendor/github.com/flynn/go-shlex/Makefile deleted file mode 100644 index 038d9a489..000000000 --- a/vendor/github.com/flynn/go-shlex/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2011 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include $(GOROOT)/src/Make.inc - -TARG=shlex -GOFILES=\ - shlex.go\ - -include $(GOROOT)/src/Make.pkg diff --git a/vendor/github.com/flynn/go-shlex/README.md b/vendor/github.com/flynn/go-shlex/README.md deleted file mode 100644 index c86bcc066..000000000 --- a/vendor/github.com/flynn/go-shlex/README.md +++ /dev/null @@ -1,2 +0,0 @@ -go-shlex is a simple lexer for go that supports shell-style quoting, -commenting, and escaping. diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore deleted file mode 100644 index e256a31e0..000000000 --- a/vendor/github.com/ghodss/yaml/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc0..000000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md deleted file mode 100644 index 0200f75b4..000000000 --- a/vendor/github.com/ghodss/yaml/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get github.com/ghodss/yaml -``` - -And import using: - -``` -import "github.com/ghodss/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - name: John - age: 30 - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/vendor/github.com/go-ldap/ldap/.gitignore b/vendor/github.com/go-ldap/ldap/.gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/go-ldap/ldap/.travis.yml b/vendor/github.com/go-ldap/ldap/.travis.yml deleted file mode 100644 index d2160fd4a..000000000 --- a/vendor/github.com/go-ldap/ldap/.travis.yml +++ /dev/null @@ -1,32 +0,0 @@ -sudo: false -language: go -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - tip - -git: - depth: 1 - -matrix: - fast_finish: true - allow_failures: - - go: tip -go_import_path: gopkg.in/ldap.v3 -install: - - go get gopkg.in/asn1-ber.v1 - - go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover - - go get github.com/golang/lint/golint || go get golang.org/x/lint/golint || true - - go build -v ./... -script: - - make test - - make fmt - - make vet - - make lint diff --git a/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md b/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md deleted file mode 100644 index a7885231c..000000000 --- a/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md +++ /dev/null @@ -1,12 +0,0 @@ -# Contribution Guidelines - -We welcome contribution and improvements. - -## Guiding Principles - -To begin with here is a draft from an email exchange: - - * take compatibility seriously (our semvers, compatibility with older go versions, etc) - * don't tag untested code for release - * beware of baking in implicit behavior based on other libraries/tools choices - * be as high-fidelity as possible in plumbing through LDAP data (don't mask errors or reduce power of someone using the library) diff --git a/vendor/github.com/go-ldap/ldap/Makefile b/vendor/github.com/go-ldap/ldap/Makefile deleted file mode 100644 index c49664722..000000000 --- a/vendor/github.com/go-ldap/ldap/Makefile +++ /dev/null @@ -1,82 +0,0 @@ -.PHONY: default install build test quicktest fmt vet lint - -# List of all release tags "supported" by our current Go version -# E.g. ":go1.1:go1.2:go1.3:go1.4:go1.5:go1.6:go1.7:go1.8:go1.9:go1.10:go1.11:go1.12:" -GO_RELEASE_TAGS := $(shell go list -f ':{{join (context.ReleaseTags) ":"}}:' runtime) - -# Only use the `-race` flag on newer versions of Go (version 1.3 and newer) -ifeq (,$(findstring :go1.3:,$(GO_RELEASE_TAGS))) - RACE_FLAG := -else - RACE_FLAG := -race -cpu 1,2,4 -endif - -# Run `go vet` on Go 1.12 and newer. For Go 1.5-1.11, use `go tool vet` -ifneq (,$(findstring :go1.12:,$(GO_RELEASE_TAGS))) - GO_VET := go vet \ - -atomic \ - -bool \ - -copylocks \ - -nilfunc \ - -printf \ - -rangeloops \ - -unreachable \ - -unsafeptr \ - -unusedresult \ - . -else ifneq (,$(findstring :go1.5:,$(GO_RELEASE_TAGS))) - GO_VET := go tool vet \ - -atomic \ - -bool \ - -copylocks \ - -nilfunc \ - -printf \ - -shadow \ - -rangeloops \ - -unreachable \ - -unsafeptr \ - -unusedresult \ - . -else - GO_VET := @echo "go vet skipped -- not supported on this version of Go" -endif - -default: fmt vet lint build quicktest - -install: - go get -t -v ./... - -build: - go build -v ./... - -test: - go test -v $(RACE_FLAG) -cover ./... - -quicktest: - go test ./... - -# Capture output and force failure when there is non-empty output -fmt: - @echo gofmt -l . - @OUTPUT=`gofmt -l . 2>&1`; \ - if [ "$$OUTPUT" ]; then \ - echo "gofmt must be run on the following files:"; \ - echo "$$OUTPUT"; \ - exit 1; \ - fi - -vet: - $(GO_VET) - -# https://github.com/golang/lint -# go get github.com/golang/lint/golint -# Capture output and force failure when there is non-empty output -# Only run on go1.5+ -lint: - @echo golint ./... - @OUTPUT=`command -v golint >/dev/null 2>&1 && golint ./... 2>&1`; \ - if [ "$$OUTPUT" ]; then \ - echo "golint errors:"; \ - echo "$$OUTPUT"; \ - exit 1; \ - fi diff --git a/vendor/github.com/go-ldap/ldap/README.md b/vendor/github.com/go-ldap/ldap/README.md deleted file mode 100644 index 25cf730b4..000000000 --- a/vendor/github.com/go-ldap/ldap/README.md +++ /dev/null @@ -1,54 +0,0 @@ -[![GoDoc](https://godoc.org/gopkg.in/ldap.v3?status.svg)](https://godoc.org/gopkg.in/ldap.v3) -[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap) - -# Basic LDAP v3 functionality for the GO programming language. - -## Install - -For the latest version use: - - go get gopkg.in/ldap.v3 - -Import the latest version with: - - import "gopkg.in/ldap.v3" - -## Required Libraries: - - - gopkg.in/asn1-ber.v1 - -## Features: - - - Connecting to LDAP server (non-TLS, TLS, STARTTLS) - - Binding to LDAP server - - Searching for entries - - Filter Compile / Decompile - - Paging Search Results - - Modify Requests / Responses - - Add Requests / Responses - - Delete Requests / Responses - - Modify DN Requests / Responses - -## Examples: - - - search - - modify - -## Contributing: - -Bug reports and pull requests are welcome! - -Before submitting a pull request, please make sure tests and verification scripts pass: -``` -make all -``` - -To set up a pre-push hook to run the tests and verify scripts before pushing: -``` -ln -s ../../.githooks/pre-push .git/hooks/pre-push -``` - ---- -The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/) -The design is licensed under the Creative Commons 3.0 Attributions license. -Read this article for more details: http://blog.golang.org/gopher diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md deleted file mode 100644 index 26296d024..000000000 --- a/vendor/github.com/go-logr/logr/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# A more minimal logging API for Go - -Before you consider this package, please read [this blog post by the inimitable -Dave Cheney](http://dave.cheney.net/2015/11/05/lets-talk-about-logging). I -really appreciate what he has to say, and it largely aligns with my own -experiences. Too many choices of levels means inconsistent logs. - -This package offers a purely abstract interface, based on these ideas but with -a few twists. Code can depend on just this interface and have the actual -logging implementation be injected from callers. Ideally only `main()` knows -what logging implementation is being used. - -# Differences from Dave's ideas - -The main differences are: - -1) Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. I disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. I -restrict the API to just 2 types of logs: info and error. - -Info logs are things you want to tell the user which are not errors. Error -logs are, well, errors. If your code receives an `error` from a subordinate -function call and is logging that `error` *and not returning it*, use error -logs. - -2) Verbosity-levels on info logs. This gives developers a chance to indicate -arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug". Superficially this -may feel very similar, but the primary difference is the lack of semantics. -Because verbosity is a numerical value, it's safe to assume that an app running -with higher verbosity means more (and less important) logs will be generated. - -This is a BETA grade API. I have implemented it for -[glog](https://godoc.org/github.com/golang/glog). Until there is a significant -2nd implementation, I don't really know how it will change. diff --git a/vendor/github.com/go-logr/zapr/.gitignore b/vendor/github.com/go-logr/zapr/.gitignore deleted file mode 100644 index 5ba77727f..000000000 --- a/vendor/github.com/go-logr/zapr/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*~ -*.swp -/vendor diff --git a/vendor/github.com/go-logr/zapr/Gopkg.lock b/vendor/github.com/go-logr/zapr/Gopkg.lock deleted file mode 100644 index 8da0a8f76..000000000 --- a/vendor/github.com/go-logr/zapr/Gopkg.lock +++ /dev/null @@ -1,52 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:edd2fa4578eb086265db78a9201d15e76b298dfd0d5c379da83e9c61712cf6df" - name = "github.com/go-logr/logr" - packages = ["."] - pruneopts = "UT" - revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" - version = "v0.1.0" - -[[projects]] - digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "UT" - revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" - version = "v1.3.2" - -[[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "UT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:9580b1b079114140ade8cec957685344d14f00119e0241f6b369633cb346eeb3" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "UT" - revision = "eeedf312bc6c57391d84767a4cd413f02a917974" - version = "v1.8.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/go-logr/logr", - "go.uber.org/zap", - "go.uber.org/zap/zapcore", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/go-logr/zapr/Gopkg.toml b/vendor/github.com/go-logr/zapr/Gopkg.toml deleted file mode 100644 index ae475d72e..000000000 --- a/vendor/github.com/go-logr/zapr/Gopkg.toml +++ /dev/null @@ -1,38 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/go-logr/logr" - version = "0.1.0" - -[[constraint]] - name = "go.uber.org/zap" - version = "1.8.0" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/github.com/go-logr/zapr/README.md b/vendor/github.com/go-logr/zapr/README.md deleted file mode 100644 index 8472875fa..000000000 --- a/vendor/github.com/go-logr/zapr/README.md +++ /dev/null @@ -1,45 +0,0 @@ -Zapr :zap: -========== - -A [logr](https://github.com/go-logr/logr) implementation using -[Zap](go.uber.org/zap). - -Usage ------ - -```go -import ( - "fmt" - - "go.uber.org/zap" - "github.com/go-logr/logr" - "github.com/directxman12/zapr" -) - -func main() { - var log logr.Logger - - zapLog, err := zap.NewDevelopment() - if err != nil { - panic(fmt.Sprintf("who watches the watchmen (%v)?", err)) - } - log = zapr.NewLogger(zapLog) - - log.Info("Logr in action!", "the answer", 42) -} -``` - -Implementation Details ----------------------- - -For the most part, concepts in Zap correspond directly with those in logr. - -Unlike Zap, all fields *must* be in the form of suggared fields -- -it's illegal to pass a strongly-typed Zap field in a key position to any -of the logging methods (`Log`, `Error`). - -Levels in logr correspond to custom debug levels in Zap. Any given level -in logr is represents by its inverse in Zap (`zapLevel = -1*logrLevel`). - -For example `V(2)` is equivalent to log level -2 in Zap, while `V(1)` is -equivalent to Zap's `DebugLevel`. diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore deleted file mode 100644 index 769c24400..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 3436c4590..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- '1.9' -- 1.10.x -- 1.11.x -install: -- go get -u github.com/stretchr/testify/assert -- go get -u github.com/go-openapi/swag -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md deleted file mode 100644 index 813788aff..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) -An implementation of JSON Pointer - Go language - -## Status -Completed YES - -Tested YES - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/go.mod b/vendor/github.com/go-openapi/jsonpointer/go.mod deleted file mode 100644 index eb4d623c5..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/go-openapi/jsonpointer - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-openapi/swag v0.17.0 - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - gopkg.in/yaml.v2 v2.2.1 // indirect -) diff --git a/vendor/github.com/go-openapi/jsonpointer/go.sum b/vendor/github.com/go-openapi/jsonpointer/go.sum deleted file mode 100644 index c71f4d7a2..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= -github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore deleted file mode 100644 index 769c24400..000000000 --- a/vendor/github.com/go-openapi/jsonreference/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 40034d28d..000000000 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- '1.9' -- 1.10.x -- 1.11.x -install: -- go get -u github.com/stretchr/testify/assert -- go get -u github.com/PuerkitoBio/purell -- go get -u github.com/go-openapi/jsonpointer -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md deleted file mode 100644 index 66345f4c6..000000000 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) -An implementation of JSON Reference - Go language - -## Status -Work in progress ( 90% done ) - -## Dependencies -https://github.com/go-openapi/jsonpointer - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod deleted file mode 100644 index 6d15a7050..000000000 --- a/vendor/github.com/go-openapi/jsonreference/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module github.com/go-openapi/jsonreference - -require ( - github.com/PuerkitoBio/purell v1.1.0 - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-openapi/jsonpointer v0.17.0 - github.com/go-openapi/swag v0.17.0 // indirect - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/yaml.v2 v2.2.1 // indirect -) diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum deleted file mode 100644 index ec9bdbc28..000000000 --- a/vendor/github.com/go-openapi/jsonreference/go.sum +++ /dev/null @@ -1,20 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= -github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/spec/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore deleted file mode 100644 index dd91ed6a0..000000000 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml deleted file mode 100644 index 48c9813ff..000000000 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ /dev/null @@ -1,23 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 25 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 2 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml deleted file mode 100644 index a4f03484b..000000000 --- a/vendor/github.com/go-openapi/spec/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- '1.9' -- 1.10.x -- 1.11.x -install: -- go get -u github.com/stretchr/testify -- go get -u github.com/go-openapi/swag -- go get -u gopkg.in/yaml.v2 -- go get -u github.com/go-openapi/jsonpointer -- go get -u github.com/go-openapi/jsonreference -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md deleted file mode 100644 index 6354742cb..000000000 --- a/vendor/github.com/go-openapi/spec/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/spec.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) - -The object model for OpenAPI specification documents. - -Currently supports Swagger 2.0. diff --git a/vendor/github.com/go-openapi/spec/go.mod b/vendor/github.com/go-openapi/spec/go.mod deleted file mode 100644 index 5af64c10b..000000000 --- a/vendor/github.com/go-openapi/spec/go.mod +++ /dev/null @@ -1,16 +0,0 @@ -module github.com/go-openapi/spec - -require ( - github.com/PuerkitoBio/purell v1.1.0 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-openapi/jsonpointer v0.17.0 - github.com/go-openapi/jsonreference v0.17.0 - github.com/go-openapi/swag v0.17.0 - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/yaml.v2 v2.2.1 -) diff --git a/vendor/github.com/go-openapi/spec/go.sum b/vendor/github.com/go-openapi/spec/go.sum deleted file mode 100644 index ab6bfb608..000000000 --- a/vendor/github.com/go-openapi/spec/go.sum +++ /dev/null @@ -1,22 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.17.0 h1:d/o7/fsLWWQZACbihvZxcyLQ59jfUVs7WOJv/ak7T7A= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= -github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/swag/.editorconfig b/vendor/github.com/go-openapi/swag/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/swag/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore deleted file mode 100644 index 5862205ee..000000000 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -secrets.yml -vendor -Godeps diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml deleted file mode 100644 index 625c3d6af..000000000 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ /dev/null @@ -1,22 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 25 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 3 - min-occurrences: 2 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml deleted file mode 100644 index bd3a2e527..000000000 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- '1.9' -- 1.10.x -- 1.11.x -install: -- go get -u github.com/stretchr/testify -- go get -u github.com/mailru/easyjson -- go get -u gopkg.in/yaml.v2 -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md deleted file mode 100644 index 459a3e18d..000000000 --- a/vendor/github.com/go-openapi/swag/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/swag.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) - -Contains a bunch of helper functions for go-openapi and go-swagger projects. - -You may also use it standalone for your projects. - -* convert between value and pointers for builtin types -* convert from string to builtin types (wraps strconv) -* fast json concatenation -* search in path -* load from file or http -* name mangling - - -This repo has only few dependencies outside of the standard library: - -* JSON utilities depend on github.com/mailru/easyjson -* YAML utilities depend on gopkg.in/yaml.v2 diff --git a/vendor/github.com/go-openapi/swag/go.mod b/vendor/github.com/go-openapi/swag/go.mod deleted file mode 100644 index 9eb936a19..000000000 --- a/vendor/github.com/go-openapi/swag/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/go-openapi/swag - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - gopkg.in/yaml.v2 v2.2.1 -) diff --git a/vendor/github.com/go-openapi/swag/go.sum b/vendor/github.com/go-openapi/swag/go.sum deleted file mode 100644 index d6e717bd4..000000000 --- a/vendor/github.com/go-openapi/swag/go.sum +++ /dev/null @@ -1,9 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-redis/redis/.gitignore b/vendor/github.com/go-redis/redis/.gitignore deleted file mode 100644 index ebfe903bc..000000000 --- a/vendor/github.com/go-redis/redis/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.rdb -testdata/*/ diff --git a/vendor/github.com/go-redis/redis/.travis.yml b/vendor/github.com/go-redis/redis/.travis.yml deleted file mode 100644 index 6b110b4cb..000000000 --- a/vendor/github.com/go-redis/redis/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false -language: go - -services: - - redis-server - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get github.com/onsi/ginkgo - - go get github.com/onsi/gomega diff --git a/vendor/github.com/go-redis/redis/CHANGELOG.md b/vendor/github.com/go-redis/redis/CHANGELOG.md deleted file mode 100644 index 19645661a..000000000 --- a/vendor/github.com/go-redis/redis/CHANGELOG.md +++ /dev/null @@ -1,25 +0,0 @@ -# Changelog - -## Unreleased - -- Cluster and Ring pipelines process commands for each node in its own goroutine. - -## 6.14 - -- Added Options.MinIdleConns. -- Added Options.MaxConnAge. -- PoolStats.FreeConns is renamed to PoolStats.IdleConns. -- Add Client.Do to simplify creating custom commands. -- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. -- Lower memory usage. - -## v6.13 - -- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards. -- Cluster client was optimized to use much less memory when reloading cluster state. -- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead. -- Dialer.KeepAlive is set to 5 minutes by default. - -## v6.12 - -- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/vendor/github.com/go-redis/redis/Makefile b/vendor/github.com/go-redis/redis/Makefile deleted file mode 100644 index fa3b4e004..000000000 --- a/vendor/github.com/go-redis/redis/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -all: testdeps - go test ./... - go test ./... -short -race - env GOOS=linux GOARCH=386 go test ./... - go vet - go get github.com/gordonklaus/ineffassign - ineffassign . - -testdeps: testdata/redis/src/redis-server - -bench: testdeps - go test ./... -test.run=NONE -test.bench=. -test.benchmem - -.PHONY: all test testdeps bench - -testdata/redis: - mkdir -p $@ - wget -qO- https://github.com/antirez/redis/archive/5.0.tar.gz | tar xvz --strip-components=1 -C $@ - -testdata/redis/src/redis-server: testdata/redis - sed -i.bak 's/libjemalloc.a/libjemalloc.a -lrt/g' $ -} - -func ExampleClient() { - err := client.Set("key", "value", 0).Err() - if err != nil { - panic(err) - } - - val, err := client.Get("key").Result() - if err != nil { - panic(err) - } - fmt.Println("key", val) - - val2, err := client.Get("key2").Result() - if err == redis.Nil { - fmt.Println("key2 does not exist") - } else if err != nil { - panic(err) - } else { - fmt.Println("key2", val2) - } - // Output: key value - // key2 does not exist -} -``` - -## Howto - -Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package. - -## Look and feel - -Some corner cases: - -```go -// SET key value EX 10 NX -set, err := client.SetNX("key", "value", 10*time.Second).Result() - -// SORT list LIMIT 0 2 ASC -vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() - -// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 -vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{ - Min: "-inf", - Max: "+inf", - Offset: 0, - Count: 2, -}).Result() - -// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM -vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() - -// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" -vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() -``` - -## Benchmark - -go-redis vs redigo: - -``` -BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op -BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op -BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op -BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op -BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op -BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op -``` - -Redis Cluster: - -``` -BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op -BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op -``` - -## See also - -- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) -- [Golang msgpack](https://github.com/vmihailenco/msgpack) -- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore deleted file mode 100644 index 2de28da16..000000000 --- a/vendor/github.com/go-sql-driver/mysql/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -Icon? -ehthumbs.db -Thumbs.db -.idea diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml deleted file mode 100644 index cc1268c36..000000000 --- a/vendor/github.com/go-sql-driver/mysql/.travis.yml +++ /dev/null @@ -1,107 +0,0 @@ -sudo: false -language: go -go: - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - master - -before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - -before_script: - - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf - - sudo service mysql restart - - .travis/wait_mysql.sh - - mysql -e 'create database gotest;' - -matrix: - include: - - env: DB=MYSQL8 - sudo: required - dist: trusty - go: 1.10.x - services: - - docker - before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - docker pull mysql:8.0 - - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret - mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 - - cp .travis/docker.cnf ~/.my.cnf - - .travis/wait_mysql.sh - before_script: - - export MYSQL_TEST_USER=gotest - - export MYSQL_TEST_PASS=secret - - export MYSQL_TEST_ADDR=127.0.0.1:3307 - - export MYSQL_TEST_CONCURRENT=1 - - - env: DB=MYSQL57 - sudo: required - dist: trusty - go: 1.10.x - services: - - docker - before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - docker pull mysql:5.7 - - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret - mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 - - cp .travis/docker.cnf ~/.my.cnf - - .travis/wait_mysql.sh - before_script: - - export MYSQL_TEST_USER=gotest - - export MYSQL_TEST_PASS=secret - - export MYSQL_TEST_ADDR=127.0.0.1:3307 - - export MYSQL_TEST_CONCURRENT=1 - - - env: DB=MARIA55 - sudo: required - dist: trusty - go: 1.10.x - services: - - docker - before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - docker pull mariadb:5.5 - - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret - mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 - - cp .travis/docker.cnf ~/.my.cnf - - .travis/wait_mysql.sh - before_script: - - export MYSQL_TEST_USER=gotest - - export MYSQL_TEST_PASS=secret - - export MYSQL_TEST_ADDR=127.0.0.1:3307 - - export MYSQL_TEST_CONCURRENT=1 - - - env: DB=MARIA10_1 - sudo: required - dist: trusty - go: 1.10.x - services: - - docker - before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - docker pull mariadb:10.1 - - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret - mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 - - cp .travis/docker.cnf ~/.my.cnf - - .travis/wait_mysql.sh - before_script: - - export MYSQL_TEST_USER=gotest - - export MYSQL_TEST_PASS=secret - - export MYSQL_TEST_ADDR=127.0.0.1:3307 - - export MYSQL_TEST_CONCURRENT=1 - -script: - - go test -v -covermode=count -coverprofile=coverage.out - - go vet ./... - - .travis/gofmt.sh -after_script: - - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md deleted file mode 100644 index ce1b5330a..000000000 --- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md +++ /dev/null @@ -1,178 +0,0 @@ -## Version 1.4.1 (2018-11-14) - -Bugfixes: - - - Fix TIME format for binary columns (#818) - - Fix handling of empty auth plugin names (#835) - - Fix caching_sha2_password with empty password (#826) - - Fix canceled context broke mysqlConn (#862) - - Fix OldAuthSwitchRequest support (#870) - - Fix Auth Response packet for cleartext password (#887) - -## Version 1.4 (2018-06-03) - -Changes: - - - Documentation fixes (#530, #535, #567) - - Refactoring (#575, #579, #580, #581, #603, #615, #704) - - Cache column names (#444) - - Sort the DSN parameters in DSNs generated from a config (#637) - - Allow native password authentication by default (#644) - - Use the default port if it is missing in the DSN (#668) - - Removed the `strict` mode (#676) - - Do not query `max_allowed_packet` by default (#680) - - Dropped support Go 1.6 and lower (#696) - - Updated `ConvertValue()` to match the database/sql/driver implementation (#760) - - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783) - - Improved the compatibility of the authentication system (#807) - -New Features: - - - Multi-Results support (#537) - - `rejectReadOnly` DSN option (#604) - - `context.Context` support (#608, #612, #627, #761) - - Transaction isolation level support (#619, #744) - - Read-Only transactions support (#618, #634) - - `NewConfig` function which initializes a config with default values (#679) - - Implemented the `ColumnType` interfaces (#667, #724) - - Support for custom string types in `ConvertValue` (#623) - - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710) - - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802) - - Implemented `driver.SessionResetter` (#779) - - `sha256_password` authentication plugin support (#808) - -Bugfixes: - - - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718) - - Fixed LOAD LOCAL DATA INFILE for empty files (#590) - - Removed columns definition cache since it sometimes cached invalid data (#592) - - Don't mutate registered TLS configs (#600) - - Make RegisterTLSConfig concurrency-safe (#613) - - Handle missing auth data in the handshake packet correctly (#646) - - Do not retry queries when data was written to avoid data corruption (#302, #736) - - Cache the connection pointer for error handling before invalidating it (#678) - - Fixed imports for appengine/cloudsql (#700) - - Fix sending STMT_LONG_DATA for 0 byte data (#734) - - Set correct capacity for []bytes read from length-encoded strings (#766) - - Make RegisterDial concurrency-safe (#773) - - -## Version 1.3 (2016-12-01) - -Changes: - - - Go 1.1 is no longer supported - - Use decimals fields in MySQL to format time types (#249) - - Buffer optimizations (#269) - - TLS ServerName defaults to the host (#283) - - Refactoring (#400, #410, #437) - - Adjusted documentation for second generation CloudSQL (#485) - - Documented DSN system var quoting rules (#502) - - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512) - -New Features: - - - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) - - Support for returning table alias on Columns() (#289, #359, #382) - - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490) - - Support for uint64 parameters with high bit set (#332, #345) - - Cleartext authentication plugin support (#327) - - Exported ParseDSN function and the Config struct (#403, #419, #429) - - Read / Write timeouts (#401) - - Support for JSON field type (#414) - - Support for multi-statements and multi-results (#411, #431) - - DSN parameter to set the driver-side max_allowed_packet value manually (#489) - - Native password authentication plugin support (#494, #524) - -Bugfixes: - - - Fixed handling of queries without columns and rows (#255) - - Fixed a panic when SetKeepAlive() failed (#298) - - Handle ERR packets while reading rows (#321) - - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349) - - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356) - - Actually zero out bytes in handshake response (#378) - - Fixed race condition in registering LOAD DATA INFILE handler (#383) - - Fixed tests with MySQL 5.7.9+ (#380) - - QueryUnescape TLS config names (#397) - - Fixed "broken pipe" error by writing to closed socket (#390) - - Fixed LOAD LOCAL DATA INFILE buffering (#424) - - Fixed parsing of floats into float64 when placeholders are used (#434) - - Fixed DSN tests with Go 1.7+ (#459) - - Handle ERR packets while waiting for EOF (#473) - - Invalidate connection on error while discarding additional results (#513) - - Allow terminating packets of length 0 (#516) - - -## Version 1.2 (2014-06-03) - -Changes: - - - We switched back to a "rolling release". `go get` installs the current master branch again - - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver - - Exported errors to allow easy checking from application code - - Enabled TCP Keepalives on TCP connections - - Optimized INFILE handling (better buffer size calculation, lazy init, ...) - - The DSN parser also checks for a missing separating slash - - Faster binary date / datetime to string formatting - - Also exported the MySQLWarning type - - mysqlConn.Close returns the first error encountered instead of ignoring all errors - - writePacket() automatically writes the packet size to the header - - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets - -New Features: - - - `RegisterDial` allows the usage of a custom dial function to establish the network connection - - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter - - Logging of critical errors is configurable with `SetLogger` - - Google CloudSQL support - -Bugfixes: - - - Allow more than 32 parameters in prepared statements - - Various old_password fixes - - Fixed TestConcurrent test to pass Go's race detection - - Fixed appendLengthEncodedInteger for large numbers - - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) - - -## Version 1.1 (2013-11-02) - -Changes: - - - Go-MySQL-Driver now requires Go 1.1 - - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore - - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors - - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` - - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. - - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries - - Optimized the buffer for reading - - stmt.Query now caches column metadata - - New Logo - - Changed the copyright header to include all contributors - - Improved the LOAD INFILE documentation - - The driver struct is now exported to make the driver directly accessible - - Refactored the driver tests - - Added more benchmarks and moved all to a separate file - - Other small refactoring - -New Features: - - - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure - - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs - - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used - -Bugfixes: - - - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification - - Convert to DB timezone when inserting `time.Time` - - Splitted packets (more than 16MB) are now merged correctly - - Fixed false positive `io.EOF` errors when the data was fully read - - Avoid panics on reuse of closed connections - - Fixed empty string producing false nil values - - Fixed sign byte for positive TIME fields - - -## Version 1.0 (2013-05-14) - -Initial Release diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md deleted file mode 100644 index 8fe16bcb4..000000000 --- a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# Contributing Guidelines - -## Reporting Issues - -Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). - -## Contributing Code - -By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. -Don't forget to add yourself to the AUTHORS file. - -### Code Review - -Everyone is invited to review and comment on pull requests. -If it looks fine to you, comment with "LGTM" (Looks good to me). - -If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. - -Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". - -## Development Ideas - -If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md deleted file mode 100644 index 2e9b07eeb..000000000 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ /dev/null @@ -1,490 +0,0 @@ -# Go-MySQL-Driver - -A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package - -![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") - ---------------------------------------- - * [Features](#features) - * [Requirements](#requirements) - * [Installation](#installation) - * [Usage](#usage) - * [DSN (Data Source Name)](#dsn-data-source-name) - * [Password](#password) - * [Protocol](#protocol) - * [Address](#address) - * [Parameters](#parameters) - * [Examples](#examples) - * [Connection pool and timeouts](#connection-pool-and-timeouts) - * [context.Context Support](#contextcontext-support) - * [ColumnType Support](#columntype-support) - * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) - * [time.Time support](#timetime-support) - * [Unicode support](#unicode-support) - * [Testing / Development](#testing--development) - * [License](#license) - ---------------------------------------- - -## Features - * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") - * Native Go implementation. No C-bindings, just pure Go - * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc) - * Automatic handling of broken connections - * Automatic Connection Pooling *(by database/sql package)* - * Supports queries larger than 16MB - * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support. - * Intelligent `LONG DATA` handling in prepared statements - * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support - * Optional `time.Time` parsing - * Optional placeholder interpolation - -## Requirements - * Go 1.7 or higher. We aim to support the 3 latest versions of Go. - * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) - ---------------------------------------- - -## Installation -Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell: -```bash -$ go get -u github.com/go-sql-driver/mysql -``` -Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`. - -## Usage -_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then. - -Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: -```go -import "database/sql" -import _ "github.com/go-sql-driver/mysql" - -db, err := sql.Open("mysql", "user:password@/dbname") -``` - -[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). - - -### DSN (Data Source Name) - -The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): -``` -[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] -``` - -A DSN in its fullest form: -``` -username:password@protocol(address)/dbname?param=value -``` - -Except for the databasename, all values are optional. So the minimal DSN is: -``` -/dbname -``` - -If you do not want to preselect a database, leave `dbname` empty: -``` -/ -``` -This has the same effect as an empty DSN string: -``` - -``` - -Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. - -#### Password -Passwords can consist of any character. Escaping is **not** necessary. - -#### Protocol -See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available. -In general you should use an Unix domain socket if available and TCP otherwise for best performance. - -#### Address -For TCP and UDP networks, addresses have the form `host[:port]`. -If `port` is omitted, the default port will be used. -If `host` is a literal IPv6 address, it must be enclosed in square brackets. -The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. - -For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. - -#### Parameters -*Parameters are case-sensitive!* - -Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. - -##### `allowAllFiles` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. -[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) - -##### `allowCleartextPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. - -##### `allowNativePasswords` - -``` -Type: bool -Valid Values: true, false -Default: true -``` -`allowNativePasswords=false` disallows the usage of MySQL native password method. - -##### `allowOldPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` -`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). - -##### `charset` - -``` -Type: string -Valid Values: -Default: none -``` - -Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). - -Usage of the `charset` parameter is discouraged because it issues additional queries to the server. -Unless you need the fallback behavior, please use `collation` instead. - -##### `collation` - -``` -Type: string -Valid Values: -Default: utf8_general_ci -``` - -Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. - -A list of valid charsets for a server is retrievable with `SHOW COLLATION`. - -##### `clientFoundRows` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. - -##### `columnsWithAlias` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: - -``` -SELECT u.id FROM users as u -``` - -will return `u.id` instead of just `id` if `columnsWithAlias=true`. - -##### `interpolateParams` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. - -*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* - -##### `loc` - -``` -Type: string -Valid Values: -Default: UTC -``` - -Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details. - -Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. - -Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. - -##### `maxAllowedPacket` -``` -Type: decimal number -Default: 4194304 -``` - -Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*. - -##### `multiStatements` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. - -When `multiStatements` is used, `?` parameters must only be used in the first statement. - -##### `parseTime` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` -The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`. - - -##### `readTimeout` - -``` -Type: duration -Default: 0 -``` - -I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. - -##### `rejectReadOnly` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - - -`rejectReadOnly=true` causes the driver to reject read-only connections. This -is for a possible race condition during an automatic failover, where the mysql -client gets connected to a read-only replica after the failover. - -Note that this should be a fairly rare case, as an automatic failover normally -happens when the primary is down, and the race condition shouldn't happen -unless it comes back up online as soon as the failover is kicked off. On the -other hand, when this happens, a MySQL application can get stuck on a -read-only connection until restarted. It is however fairly easy to reproduce, -for example, using a manual failover on AWS Aurora's MySQL-compatible cluster. - -If you are not relying on read-only transactions to reject writes that aren't -supposed to happen, setting this on some MySQL providers (such as AWS Aurora) -is safer for failovers. - -Note that ERROR 1290 can be returned for a `read-only` server and this option will -cause a retry for that error. However the same error number is used for some -other cases. You should ensure your application will never cause an ERROR 1290 -except for `read-only` mode when enabling this option. - - -##### `serverPubKey` - -``` -Type: string -Valid Values: -Default: none -``` - -Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN. -Public keys are used to transmit encrypted data, e.g. for authentication. -If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required. - - -##### `timeout` - -``` -Type: duration -Default: OS default -``` - -Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. - - -##### `tls` - -``` -Type: bool / string -Valid Values: true, false, skip-verify, -Default: false -``` - -`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). - - -##### `writeTimeout` - -``` -Type: duration -Default: 0 -``` - -I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. - - -##### System Variables - -Any other parameters are interpreted as system variables: - * `=`: `SET =` - * `=`: `SET =` - * `=%27%27`: `SET =''` - -Rules: -* The values for string variables must be quoted with `'`. -* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed! - (which implies values of string variables must be wrapped with `%27`). - -Examples: - * `autocommit=1`: `SET autocommit=1` - * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'` - * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'` - - -#### Examples -``` -user@unix(/path/to/socket)/dbname -``` - -``` -root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local -``` - -``` -user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true -``` - -Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html): -``` -user:password@/dbname?sql_mode=TRADITIONAL -``` - -TCP via IPv6: -``` -user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci -``` - -TCP on a remote host, e.g. Amazon RDS: -``` -id:password@tcp(your-amazonaws-uri.com:3306)/dbname -``` - -Google Cloud SQL on App Engine (First Generation MySQL Server): -``` -user@cloudsql(project-id:instance-name)/dbname -``` - -Google Cloud SQL on App Engine (Second Generation MySQL Server): -``` -user@cloudsql(project-id:regionname:instance-name)/dbname -``` - -TCP using default port (3306) on localhost: -``` -user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped -``` - -Use the default protocol (tcp) and host (localhost:3306): -``` -user:password@/dbname -``` - -No Database preselected: -``` -user:password@/ -``` - - -### Connection pool and timeouts -The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively. - -## `ColumnType` Support -This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. - -## `context.Context` Support -Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. -See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. - - -### `LOAD DATA LOCAL INFILE` support -For this feature you need direct access to the package. Therefore you must change the import path (no `_`): -```go -import "github.com/go-sql-driver/mysql" -``` - -Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). - -To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. - -See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. - - -### `time.Time` support -The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program. - -However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter. - -**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). - -Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. - - -### Unicode support -Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. - -Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. - -Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. - -See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. - -## Testing / Development -To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. - -Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. -If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). - -See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. - ---------------------------------------- - -## License -Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) - -Mozilla summarizes the license scope as follows: -> MPL: The copyleft applies to any files containing MPLed code. - - -That means: - * You can **use** the **unchanged** source code both in private and commercially. - * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0). - * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**. - -Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license. - -You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE). - -![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") - diff --git a/vendor/github.com/gobuffalo/envy/LICENSE.txt b/vendor/github.com/gobuffalo/envy/LICENSE.txt new file mode 100644 index 000000000..123ddc0d8 --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/LICENSE.txt @@ -0,0 +1,8 @@ +The MIT License (MIT) +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gobuffalo/envy/envy.go b/vendor/github.com/gobuffalo/envy/envy.go new file mode 100644 index 000000000..d3995f455 --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/envy.go @@ -0,0 +1,268 @@ +/* +package envy makes working with ENV variables in Go trivial. + +* Get ENV variables with default values. +* Set ENV variables safely without affecting the underlying system. +* Temporarily change ENV vars; useful for testing. +* Map all of the key/values in the ENV. +* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/)) +* More! +*/ +package envy + +import ( + "errors" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/joho/godotenv" + "github.com/rogpeppe/go-internal/modfile" +) + +var gil = &sync.RWMutex{} +var env = map[string]string{} + +// GO111MODULE is ENV for turning mods on/off +const GO111MODULE = "GO111MODULE" + +func init() { + Load() + loadEnv() +} + +// Load the ENV variables to the env map +func loadEnv() { + gil.Lock() + defer gil.Unlock() + + if os.Getenv("GO_ENV") == "" { + // if the flag "test.v" is *defined*, we're running as a unit test. Note that we don't care + // about v.Value (verbose test mode); we just want to know if the test environment has defined + // it. It's also possible that the flags are not yet fully parsed (i.e. flag.Parsed() == false), + // so we could not depend on v.Value anyway. + // + if v := flag.Lookup("test.v"); v != nil { + env["GO_ENV"] = "test" + } + } + + // set the GOPATH if using >= 1.8 and the GOPATH isn't set + if os.Getenv("GOPATH") == "" { + out, err := exec.Command("go", "env", "GOPATH").Output() + if err == nil { + gp := strings.TrimSpace(string(out)) + os.Setenv("GOPATH", gp) + } + } + + for _, e := range os.Environ() { + pair := strings.Split(e, "=") + env[pair[0]] = os.Getenv(pair[0]) + } +} + +func Mods() bool { + return Get(GO111MODULE, "off") == "on" +} + +// Reload the ENV variables. Useful if +// an external ENV manager has been used +func Reload() { + env = map[string]string{} + loadEnv() +} + +// Load .env files. Files will be loaded in the same order that are received. +// Redefined vars will override previously existing values. +// IE: envy.Load(".env", "test_env/.env") will result in DIR=test_env +// If no arg passed, it will try to load a .env file. +func Load(files ...string) error { + + // If no files received, load the default one + if len(files) == 0 { + err := godotenv.Overload() + if err == nil { + Reload() + } + return err + } + + // We received a list of files + for _, file := range files { + + // Check if it exists or we can access + if _, err := os.Stat(file); err != nil { + // It does not exist or we can not access. + // Return and stop loading + return err + } + + // It exists and we have permission. Load it + if err := godotenv.Overload(file); err != nil { + return err + } + + // Reload the env so all new changes are noticed + Reload() + + } + return nil +} + +// Get a value from the ENV. If it doesn't exist the +// default value will be returned. +func Get(key string, value string) string { + gil.RLock() + defer gil.RUnlock() + if v, ok := env[key]; ok { + return v + } + return value +} + +// Get a value from the ENV. If it doesn't exist +// an error will be returned +func MustGet(key string) (string, error) { + gil.RLock() + defer gil.RUnlock() + if v, ok := env[key]; ok { + return v, nil + } + return "", fmt.Errorf("could not find ENV var with %s", key) +} + +// Set a value into the ENV. This is NOT permanent. It will +// only affect values accessed through envy. +func Set(key string, value string) { + gil.Lock() + defer gil.Unlock() + env[key] = value +} + +// MustSet the value into the underlying ENV, as well as envy. +// This may return an error if there is a problem setting the +// underlying ENV value. +func MustSet(key string, value string) error { + gil.Lock() + defer gil.Unlock() + err := os.Setenv(key, value) + if err != nil { + return err + } + env[key] = value + return nil +} + +// Map all of the keys/values set in envy. +func Map() map[string]string { + gil.RLock() + defer gil.RUnlock() + cp := map[string]string{} + for k, v := range env { + cp[k] = v + } + return env +} + +// Temp makes a copy of the values and allows operation on +// those values temporarily during the run of the function. +// At the end of the function run the copy is discarded and +// the original values are replaced. This is useful for testing. +// Warning: This function is NOT safe to use from a goroutine or +// from code which may access any Get or Set function from a goroutine +func Temp(f func()) { + oenv := env + env = map[string]string{} + for k, v := range oenv { + env[k] = v + } + defer func() { env = oenv }() + f() +} + +func GoPath() string { + return Get("GOPATH", "") +} + +func GoBin() string { + return Get("GO_BIN", "go") +} + +func InGoPath() bool { + pwd, _ := os.Getwd() + for _, p := range GoPaths() { + if strings.HasPrefix(pwd, p) { + return true + } + } + return false +} + +// GoPaths returns all possible GOPATHS that are set. +func GoPaths() []string { + gp := Get("GOPATH", "") + if runtime.GOOS == "windows" { + return strings.Split(gp, ";") // Windows uses a different separator + } + return strings.Split(gp, ":") +} + +func importPath(path string) string { + path = strings.TrimPrefix(path, "/private") + for _, gopath := range GoPaths() { + srcpath := filepath.Join(gopath, "src") + rel, err := filepath.Rel(srcpath, path) + if err == nil { + return filepath.ToSlash(rel) + } + } + + // fallback to trim + rel := strings.TrimPrefix(path, filepath.Join(GoPath(), "src")) + rel = strings.TrimPrefix(rel, string(filepath.Separator)) + return filepath.ToSlash(rel) +} + +// CurrentModule will attempt to return the module name from `go.mod` if +// modules are enabled. +// If modules are not enabled it will fallback to using CurrentPackage instead. +func CurrentModule() (string, error) { + if !Mods() { + return CurrentPackage(), nil + } + moddata, err := ioutil.ReadFile("go.mod") + if err != nil { + return "", errors.New("go.mod cannot be read or does not exist while go module is enabled") + } + packagePath := modfile.ModulePath(moddata) + if packagePath == "" { + return "", errors.New("go.mod is malformed") + } + return packagePath, nil +} + +// CurrentPackage attempts to figure out the current package name from the PWD +// Use CurrentModule for a more accurate package name. +func CurrentPackage() string { + if Mods() { + } + pwd, _ := os.Getwd() + return importPath(pwd) +} + +func Environ() []string { + gil.RLock() + defer gil.RUnlock() + var e []string + for k, v := range env { + e = append(e, fmt.Sprintf("%s=%s", k, v)) + } + return e +} diff --git a/vendor/github.com/gobuffalo/envy/version.go b/vendor/github.com/gobuffalo/envy/version.go new file mode 100644 index 000000000..0bff116a5 --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/version.go @@ -0,0 +1,3 @@ +package envy + +const Version = "v1.6.15" diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS index b1abc4d30..1b4f6c208 100644 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -11,6 +11,7 @@ John Shahid John Tuley Laurent Patrick Lee +Peter Edge Roger Johansson Sam Nguyen Sergio Arbeo diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE index 7be0cc7b6..f57de90da 100644 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -1,7 +1,6 @@ -Protocol Buffers for Go with Gadgets - Copyright (c) 2013, The GoGo Authors. All rights reserved. -http://github.com/gogo/protobuf + +Protocol Buffers for Go with Gadgets Go support for Protocol Buffers - Google's data interchange format diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 41c717573..000000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go index 5d4cba4b5..a26b046d9 100644 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -89,7 +113,7 @@ func mergeStruct(out, in reflect.Value) { bIn := emIn.GetExtensions() bOut := emOut.GetExtensions() *bOut = append(*bOut, *bIn...) - } else if emIn, ok := extendable(in.Addr().Interface()); ok { + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 000000000..24552483c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index 737f2731d..63b0f08be 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -192,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow @@ -267,9 +260,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +301,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +333,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +349,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +374,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,541 +395,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 err := u.Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { - if isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.GetExtensions() - *ext = append(*ext, o.buf[oi:o.index]...) - } - continue - } - } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go deleted file mode 100644 index 6fb74de4c..000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ /dev/null @@ -1,172 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -// Decode a reference to a struct pointer. -func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is a pointer receiver") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - bas := structPointer_FieldPointer(base, p.field) - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers ([]struct). -func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { - newBas := appendStructPointer(base, p.field, p.sstype) - - if is_group { - panic("not supported, maybe in future, if requested.") - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is not a pointer receiver.") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) - - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers. -func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_ref_struct(p, false, base) -} - -func setPtrCustomType(base structPointer, f field, v interface{}) { - if v == nil { - return - } - structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) -} - -func setCustomType(base structPointer, f field, value interface{}) { - if value == nil { - return - } - v := reflect.ValueOf(value).Elem() - t := reflect.TypeOf(value).Elem() - kind := t.Kind() - switch kind { - case reflect.Slice: - slice := reflect.MakeSlice(t, v.Len(), v.Cap()) - reflect.Copy(slice, v) - oldHeader := structPointer_GetSliceHeader(base, f) - oldHeader.Data = slice.Pointer() - oldHeader.Len = v.Len() - oldHeader.Cap = v.Cap() - default: - size := reflect.TypeOf(value).Elem().Size() - structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) - } -} - -func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - setPtrCustomType(base, p.field, custom) - return nil -} - -func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - if custom != nil { - setCustomType(base, p.field, custom) - } - return nil -} - -// Decode a slice of bytes ([]byte) into a slice of custom types. -func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - newBas := appendStructPointer(base, p.field, p.ctype) - - var zero field - setCustomType(newBas, zero, custom) - - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 000000000..35b882c09 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 000000000..fe1bd7d90 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go index 18e2a5f77..e748e1730 100644 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -47,157 +47,3 @@ func (*duration) String() string { return "duration" } func init() { RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") } - -func (o *Buffer) decDuration() (time.Duration, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return 0, err - } - dproto := &duration{} - if err := Unmarshal(b, dproto); err != nil { - return 0, err - } - return durationFromProto(dproto) -} - -func (o *Buffer) dec_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) - var zero field - setPtrCustomType(newBas, zero, &d) - return nil -} - -func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - structPointer_Word64Slice(base, p.field).Append(uint64(d)) - return nil -} - -func size_duration(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_duration(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_duration(p *Properties, base structPointer) (n int) { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return 0 - } - dproto := durationProto(*durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return errRepeatedHasNil - } - dproto := durationProto(*durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index 8b84d1b22..3abfed2cf 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -37,28 +37,9 @@ package proto import ( "errors" - "fmt" "reflect" - "sort" ) -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. @@ -82,10 +63,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +96,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 } - return n + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +135,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +147,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +163,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +172,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +180,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +201,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go index 32111b7f4..0f5fb173e 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -3,11 +3,6 @@ // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// http://github.com/golang/protobuf/ -// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -18,9 +13,6 @@ // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -36,315 +28,6 @@ package proto -import ( - "reflect" -) - func NewRequiredNotSetError(field string) *RequiredNotSetError { return &RequiredNotSetError{field} } - -type Sizer interface { - Size() int -} - -func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, s...) - return nil -} - -func size_ext_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(s) - return -} - -// Encode a reference to bool pointer. -func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - x := 0 - if v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_bool(p *Properties, base structPointer) int { - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode a reference to int32 pointer. -func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a reference to an int64 pointer. -func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_ref_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a reference to a string pointer. -func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_ref_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// Encode a reference to a message struct. -func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -//TODO this is only copied, please fix this -func size_ref_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a slice of references to message struct pointers ([]struct). -func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - } - return state.err -} - -//TODO this is only copied, please fix this -func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return ErrNil - } - custom := i.(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { - custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceAt(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return ErrNil - } - slice := reflect.ValueOf(inter) - l := slice.Len() - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return 0 - } - slice := reflect.ValueOf(inter) - l := slice.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - } - return -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go index 2ed1cf596..d4db5a1c1 100644 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { // set/unset mismatch return false } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { @@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true + return bytes.Equal(u1, u2) } // v1 and v2 are known to have the same type. @@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { m1, m2 := e1.value, e2.value + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { @@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { desc = m[extNum] } if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue + return false } var err error if m1 == nil { diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index 0dfcb538e..686bd2a09 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -38,6 +38,7 @@ package proto import ( "errors" "fmt" + "io" "reflect" "strconv" "sync" @@ -69,12 +70,6 @@ type extendableProtoV1 interface { ExtensionMap() map[int32]Extension } -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - // extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. type extensionAdapter struct { extendableProtoV1 @@ -97,14 +92,31 @@ func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() } // XXX_InternalExtensions is an internal representation of proto extensions. @@ -149,16 +161,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc return e.p.extensionMap, &e.p.mu } -type extensionRange interface { - Message - ExtensionRangeArray() []ExtensionRange -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() -var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() -var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() - // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { @@ -198,8 +200,8 @@ func SetRawExtension(base Message, id int32, b []byte) { *ext = append(*ext, b...) return } - epb, ok := extendable(base) - if !ok { + epb, err := extendable(base) + if err != nil { return } extmap := epb.extensionsWrite() @@ -207,7 +209,7 @@ func SetRawExtension(base Message, id int32, b []byte) { } // isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extensionRange, field int32) bool { +func isExtensionField(pb extendableProto, field int32) bool { for _, er := range pb.ExtensionRangeArray() { if er.Start <= field && field <= er.End { return true @@ -223,8 +225,11 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { if ea, ok := pbi.(extensionAdapter); ok { pbi = ea.extendableProtoV1 } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) } // Check the range. if !isExtensionField(pb, extension.Field) { @@ -269,80 +274,6 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { if epb, doki := pb.(extensionsBytes); doki { @@ -366,8 +297,8 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return false } extmap, mu := epb.extensionsRead() @@ -375,46 +306,26 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } mu.Lock() - _, ok = extmap[extension.Field] + _, ok := extmap[extension.Field] mu.Unlock() return ok } -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} - // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { clearExtension(pb, extension.Field) } func clearExtension(pb Message, fieldNum int32) { - if epb, doki := pb.(extensionsBytes); doki { + if epb, ok := pb.(extensionsBytes); ok { offset := 0 for offset != -1 { offset = deleteExtension(epb, fieldNum, offset) } return } - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -422,39 +333,33 @@ func clearExtension(pb Message, fieldNum int32) { delete(extmap, fieldNum) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { if epb, doki := pb.(extensionsBytes); doki { ext := epb.GetExtensions() - o := 0 - for o < len(*ext) { - tag, n := DecodeVarint((*ext)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size((*ext)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - v, err := decodeExtension((*ext)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) + return decodeExtensionFromBytes(extension, *ext) } - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { + + epb, err := extendable(pb) + if err != nil { return nil, err } + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + emap, mu := epb.extensionsRead() if emap == nil { return defaultExtensionValue(extension) @@ -479,6 +384,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -496,6 +406,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -530,31 +445,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -564,9 +476,13 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } extensions = make([]interface{}, len(es)) for i, e := range es { - extensions[i], err = GetExtension(pb, e) + extensions[i], err = GetExtension(epb, e) if err == ErrMissingExtension { err = nil } @@ -581,9 +497,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -610,30 +526,25 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, doki := pb.(extensionsBytes); doki { - ClearExtension(pb, extension) - ext := epb.GetExtensions() - et := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - p := NewBuffer(nil) - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { + if epb, ok := pb.(extensionsBytes); ok { + newb, err := encodeExtension(extension, value) + if err != nil { return err } - *ext = append(*ext, p.buf...) + bb := epb.GetExtensions() + *bb = append(*bb, newb...) return nil } - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -656,8 +567,8 @@ func ClearAllExtensions(pb Message) { *ext = []byte{} return } - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go index ea6478f00..53ebd8cca 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -32,12 +32,36 @@ import ( "bytes" "errors" "fmt" + "io" "reflect" "sort" "strings" "sync" ) +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { if reflect.ValueOf(pb).IsNil() { return ifnotset @@ -56,19 +80,28 @@ func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool } func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } return bytes.Equal(this.enc, that.enc) } func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } return bytes.Compare(this.enc, that.enc) } func SizeOfInternalExtension(m extendableProto) (n int) { - return SizeOfExtensionMap(m.extensionsWrite()) -} - -func SizeOfExtensionMap(m map[int32]Extension) (n int) { - return extensionsMapSize(m) + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) } type sortableMapElem struct { @@ -122,28 +155,26 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) } func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - if err := encodeExtensionsMap(m); err != nil { - return 0, err + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n } - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - for _, k := range keys { - n += copy(data[n:], m[int32(k)].enc) - } - return n, nil + return o, nil } func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - if m[id].value == nil || m[id].desc == nil { - return m[id].enc, nil - } - if err := encodeExtensionsMap(m); err != nil { + e := m[id] + if err := e.Encode(); err != nil { return nil, err } - return m[id].enc, nil + return e.enc, nil } func size(buf []byte, wire int) (int, error) { @@ -218,35 +249,58 @@ func AppendExtension(e Message, tag int32, buf []byte) { } } -func encodeExtension(e *Extension) error { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - return nil - } - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } } - e.enc = p.buf return nil } func (this Extension) GoString() string { - if this.enc == nil { - if err := encodeExtension(&this); err != nil { - panic(err) - } + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) } return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) } @@ -292,3 +346,23 @@ func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { pb := extendable.(extendableProto) return pb.extensionsWrite() } + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index c98d73da4..d17f80209 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -273,6 +273,67 @@ import ( "sync" ) +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -280,26 +341,6 @@ type Message interface { ProtoMessage() } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -309,16 +350,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +375,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -552,9 +608,11 @@ func SetDefaults(pb Message) { setDefaults(reflect.ValueOf(pb), true, false) } -// v is a pointer to a struct. +// v is a struct. func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() + if v.Kind() == reflect.Ptr { + v = v.Elem() + } defaultMu.RLock() dm, ok := defaults[v.Type()] @@ -656,8 +714,11 @@ func setDefaults(v reflect.Value, recur, zeros bool) { for _, ni := range dm.nested { f := v.Field(ni) - // f is *T or []*T or map[T]*T + // f is *T or T or []*T or []T switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + case reflect.Ptr: if f.IsNil() { continue @@ -667,7 +728,7 @@ func setDefaults(v reflect.Value, recur, zeros bool) { case reflect.Slice: for i := 0; i < f.Len(); i++ { e := f.Index(i) - if e.IsNil() { + if e.Kind() == reflect.Ptr && e.IsNil() { continue } setDefaults(e, recur, zeros) @@ -739,6 +800,9 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { var canHaveDefault bool switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + case reflect.Ptr: if ft.Elem().Kind() == reflect.Struct { nestedMessage = true @@ -748,7 +812,7 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes case reflect.Slice: switch ft.Elem().Kind() { - case reflect.Ptr: + case reflect.Ptr, reflect.Struct: nestedMessage = true // repeated message case reflect.Uint8: canHaveDefault = true // bytes field @@ -831,22 +895,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +909,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +955,13 @@ const GoGoProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const GoGoProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go index 4b4f7c909..b3aa39190 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -33,6 +33,14 @@ import ( "strconv" ) +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { s, ok := m[value] if !ok { diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go index fd982decd..f48a75676 100644 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -36,12 +36,7 @@ package proto */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +89,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -147,50 +139,9 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -228,84 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go index fb512e2e1..b6cad9083 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} +const unsafeAllowed = false -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { v reflect.Value } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { return p.v.IsNil() } -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() if n < m { - p.v.SetLen(n + 1) + s.SetLen(n + 1) } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) } -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) } - panic("unreachable") + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) return } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) } - panic("unreachable") + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) } - panic("unreachable") + p.v.Elem().Set(s) } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func (p word64Slice) Len() int { - return p.v.Len() +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go index 1763a5f22..7ffd3c29d 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -26,7 +26,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. package proto @@ -34,52 +38,22 @@ import ( "reflect" ) -func structPointer_FieldPointer(p structPointer, f field) structPointer { - panic("not implemented") +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} } -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - panic("not implemented") +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) } -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - panic("not implemented") -} - -func structPointer_Add(p structPointer, size field) structPointer { - panic("not implemented") -} - -func structPointer_Len(p structPointer, f field) int { - panic("not implemented") -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - panic("not implemented") -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - panic("not implemented") -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - panic("not implemented") -} - -type structRefSlice struct{} - -func (v *structRefSlice) Len() int { - panic("not implemented") -} - -func (v *structRefSlice) Index(i int) structPointer { - panic("not implemented") +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go index 6b5567d47..d55a335d9 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. +const unsafeAllowed = true -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) isNil() bool { + return p.p == nil } -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -func word64_IsNil(p word64) bool { - return *p == nil +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -func word64_Get(p word64) uint64 { - return **p +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func word64Val_Get(p word64Val) uint64 { - return *p +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index f156a29f0..aca8eed02 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -26,7 +26,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,92 +37,20 @@ import ( "unsafe" ) -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - return r.Interface() +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} } -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - if r.Elem().IsNil() { - return nil - } - return r.Elem().Interface() +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) } -func copyUintPtr(oldptr, newptr uintptr, size int) { - oldbytes := make([]byte, 0) - oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) - oldslice.Data = oldptr - oldslice.Len = size - oldslice.Cap = size - newbytes := make([]byte, 0) - newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) - newslice.Data = newptr - newslice.Len = size - newslice.Cap = size - copy(newbytes, oldbytes) -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - copyUintPtr(uintptr(oldptr), uintptr(newptr), size) -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - size := typ.Elem().Size() - - oldHeader := structPointer_GetSliceHeader(base, f) - oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() - newLen := oldHeader.Len + 1 - newSlice := reflect.MakeSlice(typ, newLen, newLen) - reflect.Copy(newSlice, oldSlice) - bas := toStructPointer(newSlice) - oldHeader.Data = uintptr(bas) - oldHeader.Len = newLen - oldHeader.Cap = newLen - - return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) -} - -func structPointer_FieldPointer(p structPointer, f field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_Add(p structPointer, size field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) -} - -func structPointer_Len(p structPointer, f field) int { - return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - return &structRefSlice{p: p, f: f, size: size} -} - -// A structRefSlice represents a slice of structs (themselves submessages or groups). -type structRefSlice struct { - p structPointer - f field - size uintptr -} - -func (v *structRefSlice) Len() int { - return structPointer_Len(v.p, v.f) -} - -func (v *structRefSlice) Index(i int) structPointer { - ss := structPointer_GetStructPointer(v.p, v.f) - ss1 := structPointer_GetRefStructPointer(ss, 0) - return structPointer_Add(ss1, field(uintptr(i)*v.size)) +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice } diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 2a69e8862..c9e5fa020 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -63,42 +63,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -145,13 +109,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -187,7 +144,7 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only + proto3 bool // whether this is known to be a proto3 field oneof bool // whether this is a oneof field Default string // default value @@ -196,37 +153,21 @@ type Properties struct { CastType string StdTime bool StdDuration bool + WktPointer bool - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sstype reflect.Type // set for slices of structs types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -272,29 +213,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -309,6 +235,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -336,7 +263,7 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } case strings.HasPrefix(f, "embedded="): p.OrigName = strings.Split(f, "=")[1] @@ -348,301 +275,58 @@ func (p *Properties) Parse(s string) { p.StdTime = true case f == "stdduration": p.StdDuration = true + case f == "wktptr": + p.WktPointer = true } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { isMap := typ.Kind() == reflect.Map if len(p.CustomType) > 0 && !isMap { - p.setCustomEncAndDec(typ) + p.ctype = typ p.setTag(lockGetProp) return } if p.StdTime && !isMap { - p.setTimeEncAndDec(typ) p.setTag(lockGetProp) return } if p.StdDuration && !isMap { - p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { p.setTag(lockGetProp) return } switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - } else { - p.enc = (*Buffer).enc_ref_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_ref_bool - } - case reflect.Int32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - } else { - p.enc = (*Buffer).enc_ref_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_int32 - } - case reflect.Uint32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_ref_uint32 - } - case reflect.Int64, reflect.Uint64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.Float32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_uint32 - } - case reflect.Float64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.String: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - } else { - p.enc = (*Buffer).enc_ref_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_ref_string - } case reflect.Struct: p.stype = typ - p.isMarshaler = isMarshaler(typ) - p.isUnmarshaler = isUnmarshaler(typ) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_ref_struct_message - p.dec = (*Buffer).dec_ref_struct_message - p.size = size_ref_struct_message - } else { - fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) - } - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } - case reflect.Slice: switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string case reflect.Ptr: switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte + p.stype = t3 } case reflect.Struct: - p.setSliceOfNonPointerStructs(t1) + p.stype = t2 } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), @@ -650,29 +334,16 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock vtype = reflect.PtrTo(vtype) } - p.mvalprop.CustomType = p.CustomType - p.mvalprop.StdDuration = p.StdDuration - p.mvalprop.StdTime = p.StdTime - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } p.setTag(lockGetProp) } func (p *Properties) setTag(lockGetProp bool) { - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -683,20 +354,9 @@ func (p *Properties) setTag(lockGetProp bool) { } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -706,14 +366,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -734,9 +391,6 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -749,24 +403,14 @@ func GetProperties(t reflect.Type) *StructProperties { // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) || - reflect.PtrTo(t).Implements(extendableBytesType) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -777,23 +421,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - if len(f.Tag.Get("protobuf")) > 0 { - p.enc = (*Buffer).enc_ext_slice_byte - p.dec = nil // not needed - p.size = size_ext_slice_byte - } else { - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { isOneofMessage = true @@ -809,9 +436,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -822,8 +446,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -873,30 +496,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -925,20 +524,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -954,7 +575,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go index b6b7176c5..40ea3dd93 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -29,83 +29,8 @@ package proto import ( - "fmt" - "os" "reflect" ) -func (p *Properties) setCustomEncAndDec(typ reflect.Type) { - p.ctype = typ - if p.Repeated { - p.enc = (*Buffer).enc_custom_slice_bytes - p.dec = (*Buffer).dec_custom_slice_bytes - p.size = size_custom_slice_bytes - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_custom_bytes - p.dec = (*Buffer).dec_custom_bytes - p.size = size_custom_bytes - } else { - p.enc = (*Buffer).enc_custom_ref_bytes - p.dec = (*Buffer).dec_custom_ref_bytes - p.size = size_custom_ref_bytes - } -} - -func (p *Properties) setDurationEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_duration - p.dec = (*Buffer).dec_slice_duration - p.size = size_slice_duration - } else { - p.enc = (*Buffer).enc_slice_ref_duration - p.dec = (*Buffer).dec_slice_ref_duration - p.size = size_slice_ref_duration - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_duration - p.dec = (*Buffer).dec_duration - p.size = size_duration - } else { - p.enc = (*Buffer).enc_ref_duration - p.dec = (*Buffer).dec_ref_duration - p.size = size_ref_duration - } -} - -func (p *Properties) setTimeEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_time - p.dec = (*Buffer).dec_slice_time - p.size = size_slice_time - } else { - p.enc = (*Buffer).enc_slice_ref_time - p.dec = (*Buffer).dec_slice_ref_time - p.size = size_slice_ref_time - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_time - p.dec = (*Buffer).dec_time - p.size = size_time - } else { - p.enc = (*Buffer).enc_ref_time - p.dec = (*Buffer).dec_ref_time - p.size = size_ref_time - } - -} - -func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { - t2 := typ.Elem() - p.sstype = typ - p.stype = t2 - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - p.enc = (*Buffer).enc_slice_ref_struct_message - p.dec = (*Buffer).dec_slice_ref_struct_message - p.size = size_slice_ref_struct_message - if p.Wire != "bytes" { - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) - } -} +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 000000000..9b1538d05 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3006 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 000000000..997f57c1e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 000000000..f520106e0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,657 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 000000000..bb2622f28 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2245 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if fn.IsValid() && len(oneofFields) > 0 { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 000000000..00d6c7ad9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index f609d1d45..0407ba85d 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -57,7 +57,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -177,11 +176,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -276,6 +270,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -366,7 +364,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -383,7 +381,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.MapValProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -447,12 +445,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } if len(props.Enum) > 0 { if err := tm.writeEnum(w, fv, props); err != nil { @@ -475,7 +467,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { pv = reflect.New(sv.Type()) pv.Elem().Set(sv) } - if pv.Type().Implements(extensionRangeType) { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -484,27 +476,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -605,6 +576,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -613,8 +597,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index f1276729a..1ce0be2fa 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -212,7 +212,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -283,60 +282,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -650,17 +636,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err := p.consumeToken(":"); err != nil { return err } - if err := p.readAny(key, props.mkeyprop); err != nil { + if err := p.readAny(key, props.MapKeyProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { return err } - if err := p.readAny(val, props.mvalprop); err != nil { + if err := p.readAny(val, props.MapValProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { @@ -734,6 +720,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -934,6 +923,16 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.SetFloat(f) return nil } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } case reflect.Int32: if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { fv.SetInt(x) @@ -981,9 +980,19 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { } // TODO: Handle nested messages which implement encoding.TextUnmarshaler. return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -1001,13 +1010,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go index d42764743..38439fa99 100644 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -47,183 +47,3 @@ func (*timestamp) String() string { return "timestamp" } func init() { RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") } - -func (o *Buffer) decTimestamp() (time.Time, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return time.Time{}, err - } - tproto := ×tamp{} - if err := Unmarshal(b, tproto); err != nil { - return time.Time{}, err - } - return timestampFromProto(tproto) -} - -func (o *Buffer) dec_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setPtrCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) - var zero field - setPtrCustomType(newBas, zero, &t) - return nil -} - -func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) - var zero field - setCustomType(newBas, zero, &t) - return nil -} - -func size_time(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_time(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_time(p *Properties, base structPointer) (n int) { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return 0 - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return errRepeatedHasNil - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 000000000..b175d1b64 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 000000000..c1cf7bf85 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README deleted file mode 100644 index 387b4eb68..000000000 --- a/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go index cdfe2991f..eac1c7664 100644 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -25,7 +25,7 @@ type Cache struct { // an item is evicted. Zero means no limit. MaxEntries int - // OnEvicted optionally specificies a callback function to be + // OnEvicted optionally specifies a callback function to be // executed when an entry is purged from the cache. OnEvicted func(key Key, value interface{}) @@ -119,3 +119,15 @@ func (c *Cache) Len() int { } return c.ll.Len() } + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE index 1b1b1921e..0f646931a 100644 --- a/vendor/github.com/golang/protobuf/LICENSE +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -1,7 +1,4 @@ -Go support for Protocol Buffers - Google's data interchange format - Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index d9aa3c42d..63b0f08be 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 000000000..35b882c09 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index c27d35f86..3abfed2cf 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -37,27 +37,9 @@ package proto import ( "errors" - "fmt" "reflect" ) -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index d4db5a1c1..f9b6e41b3 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { return false } - m1, m2 := e1.value, e2.value + m1 := extensionAsLegacyType(e1.value) + m2 := extensionAsLegacyType(e2.value) if m1 == nil && m2 == nil { // Both have only encoded form. diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 816a3b9d6..fa88add30 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -185,9 +185,25 @@ type Extension struct { // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. - desc *ExtensionDesc + desc *ExtensionDesc + + // value is a concrete value for the extension field. Let the type of + // desc.ExtensionType be the "API type" and the type of Extension.value + // be the "storage type". The API type and storage type are the same except: + // * For scalars (except []byte), the API type uses *T, + // while the storage type uses T. + // * For repeated fields, the API type uses []T, while the storage type + // uses *[]T. + // + // The reason for the divergence is so that the storage type more naturally + // matches what is expected of when retrieving the values through the + // protobuf reflection APIs. + // + // The value may only be populated if desc is also populated. value interface{} - enc []byte + + // enc is the raw bytes for the extension field. + enc []byte } // SetRawExtension is for testing only. @@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } - return e.value, nil + return extensionAsLegacyType(e.value), nil } if extension.ExtensionType == nil { @@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. - e.value = v + e.value = extensionAsStorageType(v) e.desc = extension e.enc = nil emap[extension.Field] = e - return e.value, nil + return extensionAsLegacyType(e.value), nil } // defaultExtensionValue returns the default value for extension. @@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} return nil } @@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) { func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 0e2191b8a..fdd328bb7 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -265,7 +265,6 @@ package proto import ( "encoding/json" - "errors" "fmt" "log" "reflect" @@ -274,7 +273,66 @@ import ( "sync" ) -var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} // Message is implemented by generated protocol buffer messages. type Message interface { @@ -283,26 +341,6 @@ type Message interface { ProtoMessage() } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -902,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool { return false } -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index 3b6ca41d5..f48a75676 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -36,13 +36,7 @@ package proto */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" - "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index b6cad9083..94fa9194a 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -79,10 +79,13 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) + if deref { + u = u.Elem() + } return pointer{v: u} } diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index d55a335d9..dbfffe071 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -85,16 +85,21 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p } // valToPointer converts v to a pointer. v must be of pointer type. diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index f710adab0..79668ff5c 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -139,7 +139,7 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only + proto3 bool // whether this is known to be a proto3 field oneof bool // whether this is a oneof field Default string // default value @@ -148,9 +148,9 @@ type Properties struct { stype reflect.Type // set for struct types only sprop *StructProperties // set for struct types only - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only } // String formats the properties in the protobuf struct field tag style. @@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc case reflect.Map: p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } if p.stype != nil { @@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. @@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - + if len(oots) > 0 { // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go index 0f212b302..5cb11fa95 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -87,6 +87,7 @@ type marshalElemInfo struct { sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr } var ( @@ -231,7 +232,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte return b, err } - var err, errreq error + var err, errLater error // The old marshaler encodes extensions at beginning. if u.extensions.IsValid() { e := ptr.offset(u.extensions).toExtensions() @@ -252,11 +253,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte } } for _, f := range u.fields { - if f.required && errreq == nil { + if f.required { if ptr.offset(f.field).getPointer().isNil() { // Required field is not set. // We record the error but keep going, to give a complete marshaling. - errreq = &RequiredNotSetError{f.name} + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } continue } } @@ -269,14 +272,21 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte if err1, ok := err.(*RequiredNotSetError); ok { // Required field in submessage is not set. // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = &RequiredNotSetError{f.name + "." + err1.field} + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} } continue } if err == errRepeatedHasNil { err = errors.New("proto: repeated field " + f.name + " has nil element") } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } return b, err } } @@ -284,7 +294,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte s := *ptr.offset(u.unrecognized).toBytes() b = append(b, s...) } - return b, errreq + return b, errLater } // computeMarshalInfo initializes the marshal info. @@ -311,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() { // get oneof implementers var oneofImplementers []interface{} - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() } n := t.NumField() @@ -398,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { panic("tag is not an integer") } wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), sizer: sizer, marshaler: marshaler, isptr: t.Kind() == reflect.Ptr, + deref: deref, } // update cache @@ -439,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -467,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI } } -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -530,6 +548,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma packed := false proto3 := false + validateUTF8 := true for i := 2; i < len(tags); i++ { if tags[i] == "packed" { packed = true @@ -538,6 +557,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma proto3 = true } } + validateUTF8 = validateUTF8 && proto3 switch t.Kind() { case reflect.Bool: @@ -735,6 +755,18 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma } return sizeFloat64Value, appendFloat64Value case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } if pointer { return sizeStringPtr, appendStringPtr } @@ -1984,9 +2016,6 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt } func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { v := *ptr.toString() - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) @@ -1997,9 +2026,6 @@ func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]b if v == "" { return b, nil } - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) @@ -2011,24 +2037,83 @@ func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, err return b, nil } v := *p - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) return b, nil } func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool s := *ptr.toStringSlice() for _, v := range s { if !utf8.ValidString(v) { - return nil, errInvalidUTF8 + invalidUTF8 = true } b = appendVarint(b, wiretag) b = appendVarint(b, uint64(len(v))) b = append(b, v...) } + if invalidUTF8 { + return b, errInvalidUTF8 + } return b, nil } func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { @@ -2107,7 +2192,8 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { s := ptr.getPointerSlice() - var err, errreq error + var err error + var nerr nonFatal for _, v := range s { if v.isNil() { return b, errRepeatedHasNil @@ -2115,22 +2201,14 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { b = appendVarint(b, wiretag) // start group b, err = u.marshal(b, v, deterministic) b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } + if !nerr.Merge(err) { if err == ErrNil { err = errRepeatedHasNil } return b, err } } - return b, errreq + return b, nerr.E } } @@ -2174,7 +2252,8 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { s := ptr.getPointerSlice() - var err, errreq error + var err error + var nerr nonFatal for _, v := range s { if v.isNil() { return b, errRepeatedHasNil @@ -2184,22 +2263,14 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { b = appendVarint(b, uint64(siz)) b, err = u.marshal(b, v, deterministic) - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } + if !nerr.Merge(err) { if err == ErrNil { err = errRepeatedHasNil } return b, err } } - return b, errreq + return b, nerr.E } } @@ -2223,14 +2294,33 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { // value. // Key cannot be pointer-typed. valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } return func(ptr pointer, tagsize int) int { m := ptr.asPointerTo(t).Elem() // the map n := 0 for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2243,24 +2333,26 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { if len(keys) > 1 && deterministic { sort.Sort(mapKeys(keys)) } + + var nerr nonFatal for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != nil && err != ErrNil { // allow nil value in map + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map return b, err } } - return b, nil + return b, nerr.E } } @@ -2316,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2333,6 +2425,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de defer mu.Unlock() var err error + var nerr nonFatal // Fast-path for common cases: zero or one extensions. // Don't bother sorting the keys. @@ -2350,13 +2443,13 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } } - return b, nil + return b, nerr.E } // Sort the keys to provide a deterministic encoding. @@ -2381,13 +2474,13 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } } - return b, nil + return b, nerr.E } // message set format is: @@ -2426,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2444,6 +2537,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de defer mu.Unlock() var err error + var nerr nonFatal // Fast-path for common cases: zero or one extensions. // Don't bother sorting the keys. @@ -2468,14 +2562,14 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } b = append(b, 1<<3|WireEndGroup) } - return b, nil + return b, nerr.E } // Sort the keys to provide a deterministic encoding. @@ -2506,14 +2600,14 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) - if err != nil { + if !nerr.Merge(err) { return b, err } } - return b, nil + return b, nerr.E } // sizeV1Extensions computes the size of encoded data for a V1-API extension field. @@ -2536,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } return n @@ -2556,6 +2650,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ sort.Ints(keys) var err error + var nerr nonFatal for _, k := range keys { e := m[int32(k)] if e.value == nil || e.desc == nil { @@ -2570,13 +2665,13 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if err != nil { + if !nerr.Merge(err) { return b, err } } - return b, nil + return b, nerr.E } // newMarshaler is the interface representing objects that can marshal themselves. diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go index 55f0340a3..acee2fc52 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -97,6 +97,8 @@ type unmarshalFieldInfo struct { // if a required field, contains a single set bit at this field's index in the required field list. reqMask uint64 + + name string // name of the field, for error reporting } var ( @@ -134,10 +136,10 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } - var reqMask uint64 // bitmask of required fields we've seen. - var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage. + var reqMask uint64 // bitmask of required fields we've seen. + var errLater error for len(b) > 0 { // Read tag and wire type. // Special case 1 and 2 byte varints. @@ -176,11 +178,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { if r, ok := err.(*RequiredNotSetError); ok { // Remember this error, but keep parsing. We need to produce // a full parse even if a required field is missing. - rnse = r + if errLater == nil { + errLater = r + } reqMask |= f.reqMask continue } if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } return err } // Fragments with bad wire type are treated as unknown fields. @@ -239,20 +250,16 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { emap[int32(tag)] = e } } - if rnse != nil { - // A required field of a submessage/group is missing. Return that error. - return rnse - } - if reqMask != u.reqMask { + if reqMask != u.reqMask && errLater == nil { // A required field of this message is missing. for _, n := range u.reqFields { if reqMask&1 == 0 { - return &RequiredNotSetError{n} + errLater = &RequiredNotSetError{n} } reqMask >>= 1 } } - return nil + return errLater } // computeUnmarshalInfo fills in u with information for use @@ -351,43 +358,52 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask) + u.setTag(tag, toField(&f), unmarshal, reqMask, name) } // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - if fn.IsValid() { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] - tag, err := strconv.Atoi(tagstr) - if err != nil { - panic("protobuf tag field not an integer: " + tagstr) - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(tag, of.field, unmarshal, 0) - } + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break } } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) @@ -401,7 +417,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0) + }, 0, "") // Set mask for required field check. u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? for len(u.dense) <= tag { @@ -442,11 +459,17 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { tagArray := strings.Split(tags, ",") encoding := tagArray[0] name := "unknown" + proto3 := false + validateUTF8 := true for _, tag := range tagArray[3:] { if strings.HasPrefix(tag, "name=") { name = tag[5:] } + if tag == "proto3" { + proto3 = true + } } + validateUTF8 = validateUTF8 && proto3 // Figure out packaging (pointer, slice, or both) slice := false @@ -594,6 +617,15 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { } return unmarshalBytesValue case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } if pointer { return unmarshalStringPtr } @@ -1448,9 +1480,6 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } *f.toString() = v return b[x:], nil } @@ -1468,9 +1497,6 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } *f.toStringPtr() = &v return b[x:], nil } @@ -1488,14 +1514,72 @@ func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { return nil, io.ErrUnexpectedEOF } v := string(b[:x]) - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } s := f.toStringSlice() *s = append(*s, v) return b[x:], nil } +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + var emptyBuf [0]byte func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { @@ -1674,6 +1758,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { // Maps will be somewhat slow. Oh well. // Read key and value from data. + var nerr nonFatal k := reflect.New(kt) v := reflect.New(vt) for len(b) > 0 { @@ -1694,7 +1779,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { err = errInternalBadWireType // skip unknown tag } - if err == nil { + if nerr.Merge(err) { continue } if err != errInternalBadWireType { @@ -1717,7 +1802,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { // Insert into map. m.SetMapIndex(k.Elem(), v.Elem()) - return r, nil + return r, nerr.E } } @@ -1743,15 +1828,16 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal // Unmarshal data into holder. // We unmarshal into the first field of the holder object. var err error + var nerr nonFatal b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if err != nil { + if !nerr.Merge(err) { return nil, err } // Write pointer to holder into target field. f.asPointerTo(ityp).Elem().Set(v) - return b, nil + return b, nerr.E } } @@ -1864,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) <= 0 { + if len(b) == 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 2205fdaad..1aaee725b 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.MapValProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 0685bae36..bb55a3af2 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err := p.consumeToken(":"); err != nil { return err } - if err := p.readAny(key, props.mkeyprop); err != nil { + if err := p.readAny(key, props.MapKeyProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { return err } - if err := p.readAny(val, props.mvalprop); err != nil { + if err := p.readAny(val, props.MapValProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index b2af97f4a..70276e8f5 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -130,10 +130,12 @@ func UnmarshalAny(any *any.Any, pb proto.Message) error { // Is returns true if any value contains a given message type. func Is(any *any.Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { return false } - - return aname == proto.MessageName(pb) + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f67edc7dc..78ee52334 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -package any // import "github.com/golang/protobuf/ptypes/any" +package any -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. @@ -99,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // } // type Any struct { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -118,10 +121,14 @@ type Any struct { // on changes to types. (Use versioned type names to manage // breaking changes.) // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -133,17 +140,19 @@ func (m *Any) Reset() { *m = Any{} } func (m *Any) String() string { return proto.CompactTextString(m) } func (*Any) ProtoMessage() {} func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_any_744b9ca530f228db, []int{0} + return fileDescriptor_b53526c13ae22eb4, []int{0} } + func (*Any) XXX_WellKnownType() string { return "Any" } + func (m *Any) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Any.Unmarshal(m, b) } func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Any.Marshal(b, m, deterministic) } -func (dst *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(dst, src) +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) } func (m *Any) XXX_Size() int { return xxx_messageInfo_Any.Size(m) @@ -172,9 +181,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } -var fileDescriptor_any_744b9ca530f228db = []byte{ +var fileDescriptor_b53526c13ae22eb4 = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index c74866762..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,149 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. - // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: - // - // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 65cb0f8eb..26d1ca2fb 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } if p.Nanos != 0 { - d += time.Duration(p.Nanos) + d += time.Duration(p.Nanos) * time.Nanosecond if (d < 0) != (p.Nanos < 0) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 4d75473b8..0d681ee21 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -package duration // import "github.com/golang/protobuf/ptypes/duration" +package duration -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // A Duration represents a signed, fixed-length span of time represented // as a count of seconds and fractions of seconds at nanosecond @@ -82,14 +84,14 @@ type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 // `seconds` field and a positive or negative `nanos` field. For durations // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -99,17 +101,19 @@ func (m *Duration) Reset() { *m = Duration{} } func (m *Duration) String() string { return proto.CompactTextString(m) } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_duration_e7d612259e3f0613, []int{0} + return fileDescriptor_23597b2ebd7ac6c5, []int{0} } + func (*Duration) XXX_WellKnownType() string { return "Duration" } + func (m *Duration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Duration.Unmarshal(m, b) } func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Duration.Marshal(b, m, deterministic) } -func (dst *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(dst, src) +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) } func (m *Duration) XXX_Size() int { return xxx_messageInfo_Duration.Size(m) @@ -138,11 +142,9 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { - proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) -} +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } -var fileDescriptor_duration_e7d612259e3f0613 = []byte{ +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce41a..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 47f10dbc2..8da0df01a 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -111,11 +111,9 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) ts := &tspb.Timestamp{ - Seconds: seconds, - Nanos: nanos, + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), } if err := validateTimestamp(ts); err != nil { return nil, err diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index e9c222282..31cd846de 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" +package timestamp -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // A Timestamp represents a point in time independent of any time zone // or calendar, represented as seconds and fractions of seconds at @@ -81,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -92,20 +96,20 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) -// to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -115,17 +119,19 @@ func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} + return fileDescriptor_292007bbfe81227e, []int{0} } + func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + func (m *Timestamp) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Timestamp.Unmarshal(m, b) } func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) } -func (dst *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(dst, src) +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) } func (m *Timestamp) XXX_Size() int { return xxx_messageInfo_Timestamp.Size(m) @@ -154,11 +160,9 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { - proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) -} +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } -var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ +var fileDescriptor_292007bbfe81227e = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index 06750ab1f..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,133 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) -// to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d97..000000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md deleted file mode 100644 index 6062a4dac..000000000 --- a/vendor/github.com/google/btree/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# BTree implementation for Go - -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - -This package provides an in-memory B-Tree implementation for Go, useful as -an ordered, mutable data structure. - -The API is based off of the wonderful -http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to -act as a drop-in replacement for gollrb trees. - -See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml deleted file mode 100644 index f8684d99f..000000000 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - -script: - - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md deleted file mode 100644 index 51cf5cd1a..000000000 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Go makes it very simple to ensure properly formatted code, so always run - `go fmt` on your code before committing it. You should also run - [golint][] over your code. As noted in the [golint readme][], it's not - strictly necessary that your code be completely "lint-free", but this will - help you find common style issues. - - 1. Any significant changes should almost always be accompanied by tests. The - project already has good test coverage, so look at some of the existing - tests if you're unsure how to go about it. [gocov][] and [gocov-html][] - are invaluable tools for seeing which parts of your code aren't being - exercised by your tests. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[golint]: https://github.com/golang/lint -[golint readme]: https://github.com/golang/lint/blob/master/README -[gocov]: https://github.com/axw/gocov -[gocov-html]: https://github.com/matm/gocov-html -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md deleted file mode 100644 index 64869af34..000000000 --- a/vendor/github.com/google/gofuzz/README.md +++ /dev/null @@ -1,71 +0,0 @@ -gofuzz -====== - -gofuzz is a library for populating go objects with random values. - -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) -[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) - -This is useful for testing: - -* Do your project's objects really serialize/unserialize correctly in all cases? -* Is there an incorrectly formatted object that will cause your project to panic? - -Import with ```import "github.com/google/gofuzz"``` - -You can use it on single variables: -```go -f := fuzz.New() -var myInt int -f.Fuzz(&myInt) // myInt gets a random value. -``` - -You can use it on maps: -```go -f := fuzz.New().NilChance(0).NumElements(1, 1) -var myMap map[ComplexKeyType]string -f.Fuzz(&myMap) // myMap will have exactly one element. -``` - -Customize the chance of getting a nil pointer: -```go -f := fuzz.New().NilChance(.5) -var fancyStruct struct { - A, B, C, D *string -} -f.Fuzz(&fancyStruct) // About half the pointers should be set. -``` - -You can even customize the randomization completely if needed: -```go -type MyEnum string -const ( - A MyEnum = "A" - B MyEnum = "B" -) -type MyInfo struct { - Type MyEnum - AInfo *string - BInfo *string -} - -f := fuzz.New().NilChance(0).Funcs( - func(e *MyInfo, c fuzz.Continue) { - switch c.Intn(2) { - case 0: - e.Type = A - c.Fuzz(&e.AInfo) - case 1: - e.Type = B - c.Fuzz(&e.BInfo) - } - }, -) - -var myObject MyInfo -f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. -``` - -See more examples in ```example_test.go```. - -Happy testing! diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index 4f888fbc8..1dfa80a6f 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -34,21 +34,27 @@ type Fuzzer struct { nilChance float64 minElements int maxElements int + maxDepth int } // New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, // RandSource, NilChance, or NumElements in any order. func New() *Fuzzer { + return NewWithSeed(time.Now().UnixNano()) +} + +func NewWithSeed(seed int64) *Fuzzer { f := &Fuzzer{ defaultFuzzFuncs: fuzzFuncMap{ reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), }, fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(time.Now().UnixNano())), + r: rand.New(rand.NewSource(seed)), nilChance: .2, minElements: 1, maxElements: 10, + maxDepth: 100, } return f } @@ -136,6 +142,14 @@ func (f *Fuzzer) genShouldFill() bool { return f.r.Float64() > f.nilChance } +// MaxDepth sets the maximum number of recursive fuzz calls that will be made +// before stopping. This includes struct members, pointers, and map and slice +// elements. +func (f *Fuzzer) MaxDepth(d int) *Fuzzer { + f.maxDepth = d + return f +} + // Fuzz recursively fills all of obj's fields with something random. First // this tries to find a custom fuzz function (see Funcs). If there is no // custom function this tests whether the object implements fuzz.Interface and, @@ -144,17 +158,19 @@ func (f *Fuzzer) genShouldFill() bool { // fails, this will generate random values for all primitive fields and then // recurse for all non-primitives. // -// Not safe for cyclic or tree-like structs! +// This is safe for cyclic or tree-like structs, up to a limit. Use the +// MaxDepth method to adjust how deep you need it to recurse. // -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. +// obj must be a pointer. Only exported (public) fields can be set (thanks, +// golang :/ ) Intended for tests, so will panic on bad input or unimplemented +// fields. func (f *Fuzzer) Fuzz(obj interface{}) { v := reflect.ValueOf(obj) if v.Kind() != reflect.Ptr { panic("needed ptr!") } v = v.Elem() - f.doFuzz(v, 0) + f.fuzzWithContext(v, 0) } // FuzzNoCustom is just like Fuzz, except that any custom fuzz function for @@ -170,7 +186,7 @@ func (f *Fuzzer) FuzzNoCustom(obj interface{}) { panic("needed ptr!") } v = v.Elem() - f.doFuzz(v, flagNoCustomFuzz) + f.fuzzWithContext(v, flagNoCustomFuzz) } const ( @@ -178,69 +194,87 @@ const ( flagNoCustomFuzz uint64 = 1 << iota ) -func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) { +func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { + fc := &fuzzerContext{fuzzer: f} + fc.doFuzz(v, flags) +} + +// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer +// be thread-safe. +type fuzzerContext struct { + fuzzer *Fuzzer + curDepth int +} + +func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { + if fc.curDepth >= fc.fuzzer.maxDepth { + return + } + fc.curDepth++ + defer func() { fc.curDepth-- }() + if !v.CanSet() { return } if flags&flagNoCustomFuzz == 0 { // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && f.tryCustom(v.Addr()) { + if v.CanAddr() && fc.tryCustom(v.Addr()) { return } - if f.tryCustom(v) { + if fc.tryCustom(v) { return } } if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, f.r) + fn(v, fc.fuzzer.r) return } switch v.Kind() { case reflect.Map: - if f.genShouldFill() { + if fc.fuzzer.genShouldFill() { v.Set(reflect.MakeMap(v.Type())) - n := f.genElementCount() + n := fc.fuzzer.genElementCount() for i := 0; i < n; i++ { key := reflect.New(v.Type().Key()).Elem() - f.doFuzz(key, 0) + fc.doFuzz(key, 0) val := reflect.New(v.Type().Elem()).Elem() - f.doFuzz(val, 0) + fc.doFuzz(val, 0) v.SetMapIndex(key, val) } return } v.Set(reflect.Zero(v.Type())) case reflect.Ptr: - if f.genShouldFill() { + if fc.fuzzer.genShouldFill() { v.Set(reflect.New(v.Type().Elem())) - f.doFuzz(v.Elem(), 0) + fc.doFuzz(v.Elem(), 0) return } v.Set(reflect.Zero(v.Type())) case reflect.Slice: - if f.genShouldFill() { - n := f.genElementCount() + if fc.fuzzer.genShouldFill() { + n := fc.fuzzer.genElementCount() v.Set(reflect.MakeSlice(v.Type(), n, n)) for i := 0; i < n; i++ { - f.doFuzz(v.Index(i), 0) + fc.doFuzz(v.Index(i), 0) } return } v.Set(reflect.Zero(v.Type())) case reflect.Array: - if f.genShouldFill() { + if fc.fuzzer.genShouldFill() { n := v.Len() for i := 0; i < n; i++ { - f.doFuzz(v.Index(i), 0) + fc.doFuzz(v.Index(i), 0) } return } v.Set(reflect.Zero(v.Type())) case reflect.Struct: for i := 0; i < v.NumField(); i++ { - f.doFuzz(v.Field(i), 0) + fc.doFuzz(v.Field(i), 0) } case reflect.Chan: fallthrough @@ -255,20 +289,20 @@ func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) { // tryCustom searches for custom handlers, and returns true iff it finds a match // and successfully randomizes v. -func (f *Fuzzer) tryCustom(v reflect.Value) bool { +func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { // First: see if we have a fuzz function for it. - doCustom, ok := f.fuzzFuncs[v.Type()] + doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] if !ok { // Second: see if it can fuzz itself. if v.CanInterface() { intf := v.Interface() if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{f: f, Rand: f.r}) + fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) return true } } // Finally: see if there is a default fuzz function. - doCustom, ok = f.defaultFuzzFuncs[v.Type()] + doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] if !ok { return false } @@ -294,8 +328,8 @@ func (f *Fuzzer) tryCustom(v reflect.Value) bool { } doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - f: f, - Rand: f.r, + fc: fc, + Rand: fc.fuzzer.r, })}) return true } @@ -310,7 +344,7 @@ type Interface interface { // Continue can be passed to custom fuzzing functions to allow them to use // the correct source of randomness and to continue fuzzing their members. type Continue struct { - f *Fuzzer + fc *fuzzerContext // For convenience, Continue implements rand.Rand via embedding. // Use this for generating any randomness if you want your fuzzing @@ -325,7 +359,7 @@ func (c Continue) Fuzz(obj interface{}) { panic("needed ptr!") } v = v.Elem() - c.f.doFuzz(v, 0) + c.fc.doFuzz(v, 0) } // FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for @@ -338,7 +372,7 @@ func (c Continue) FuzzNoCustom(obj interface{}) { panic("needed ptr!") } v = v.Elem() - c.f.doFuzz(v, flagNoCustomFuzz) + c.fc.doFuzz(v, flagNoCustomFuzz) } // RandString makes a random string up to 20 characters long. The returned string diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60b..000000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f1..000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 9d92c11f1..000000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod deleted file mode 100644 index fc84cd79d..000000000 --- a/vendor/github.com/google/uuid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/google/uuid diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go index 0e32451a3..5351f36f3 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -7106,20 +7106,20 @@ func (m *Any) ToRawInfo() interface{} { func (m *ApiKeySecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7130,14 +7130,14 @@ func (m *ApiKeySecurity) ToRawInfo() interface{} { func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7148,24 +7148,24 @@ func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { func (m *BodyParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) } // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7176,17 +7176,17 @@ func (m *BodyParameter) ToRawInfo() interface{} { func (m *Contact) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } if m.Email != "" { - info = append(info, yaml.MapItem{"email", m.Email}) + info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7198,7 +7198,7 @@ func (m *Default) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} @@ -7210,7 +7210,7 @@ func (m *Definitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} @@ -7221,41 +7221,41 @@ func (m *Definitions) ToRawInfo() interface{} { func (m *Document) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Swagger != "" { - info = append(info, yaml.MapItem{"swagger", m.Swagger}) + info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) } if m.Info != nil { - info = append(info, yaml.MapItem{"info", m.Info.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) } // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Host != "" { - info = append(info, yaml.MapItem{"host", m.Host}) + info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) } if m.BasePath != "" { - info = append(info, yaml.MapItem{"basePath", m.BasePath}) + info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) } if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) } if m.Paths != nil { - info = append(info, yaml.MapItem{"paths", m.Paths.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) } // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Definitions != nil { - info = append(info, yaml.MapItem{"definitions", m.Definitions.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) } // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Parameters != nil { - info = append(info, yaml.MapItem{"parameters", m.Parameters.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) } // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) } // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Security) != 0 { @@ -7263,11 +7263,11 @@ func (m *Document) ToRawInfo() interface{} { for _, item := range m.Security { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"security", items}) + info = append(info, yaml.MapItem{Key: "security", Value: items}) } // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.SecurityDefinitions != nil { - info = append(info, yaml.MapItem{"securityDefinitions", m.SecurityDefinitions.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) } // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Tags) != 0 { @@ -7275,16 +7275,16 @@ func (m *Document) ToRawInfo() interface{} { for _, item := range m.Tags { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"tags", items}) + info = append(info, yaml.MapItem{Key: "tags", Value: items}) } // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7296,7 +7296,7 @@ func (m *Examples) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} @@ -7307,14 +7307,14 @@ func (m *Examples) ToRawInfo() interface{} { func (m *ExternalDocs) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7325,38 +7325,38 @@ func (m *ExternalDocs) ToRawInfo() interface{} { func (m *FileSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) } // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7367,81 +7367,81 @@ func (m *FileSchema) ToRawInfo() interface{} { func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7452,69 +7452,69 @@ func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { func (m *Header) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7525,78 +7525,78 @@ func (m *Header) ToRawInfo() interface{} { func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7608,7 +7608,7 @@ func (m *Headers) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} @@ -7619,28 +7619,28 @@ func (m *Headers) ToRawInfo() interface{} { func (m *Info) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) } if m.Version != "" { - info = append(info, yaml.MapItem{"version", m.Version}) + info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.TermsOfService != "" { - info = append(info, yaml.MapItem{"termsOfService", m.TermsOfService}) + info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) } if m.Contact != nil { - info = append(info, yaml.MapItem{"contact", m.Contact.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) } // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.License != nil { - info = append(info, yaml.MapItem{"license", m.License.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) } // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7655,7 +7655,7 @@ func (m *ItemsItem) ToRawInfo() interface{} { for _, item := range m.Schema { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"schema", items}) + info = append(info, yaml.MapItem{Key: "schema", Value: items}) } // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} return info @@ -7665,10 +7665,10 @@ func (m *ItemsItem) ToRawInfo() interface{} { func (m *JsonReference) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } return info } @@ -7677,14 +7677,14 @@ func (m *JsonReference) ToRawInfo() interface{} { func (m *License) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7695,7 +7695,7 @@ func (m *License) ToRawInfo() interface{} { func (m *NamedAny) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7705,7 +7705,7 @@ func (m *NamedAny) ToRawInfo() interface{} { func (m *NamedHeader) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7715,7 +7715,7 @@ func (m *NamedHeader) ToRawInfo() interface{} { func (m *NamedParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7725,7 +7725,7 @@ func (m *NamedParameter) ToRawInfo() interface{} { func (m *NamedPathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7735,7 +7735,7 @@ func (m *NamedPathItem) ToRawInfo() interface{} { func (m *NamedResponse) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7745,7 +7745,7 @@ func (m *NamedResponse) ToRawInfo() interface{} { func (m *NamedResponseValue) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7755,7 +7755,7 @@ func (m *NamedResponseValue) ToRawInfo() interface{} { func (m *NamedSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7765,7 +7765,7 @@ func (m *NamedSchema) ToRawInfo() interface{} { func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7775,10 +7775,10 @@ func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { func (m *NamedString) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Value != "" { - info = append(info, yaml.MapItem{"value", m.Value}) + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) } return info } @@ -7787,7 +7787,7 @@ func (m *NamedString) ToRawInfo() interface{} { func (m *NamedStringArray) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7824,27 +7824,27 @@ func (m *NonBodyParameter) ToRawInfo() interface{} { func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) } if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7855,24 +7855,24 @@ func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7883,24 +7883,24 @@ func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7911,24 +7911,24 @@ func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7946,56 +7946,56 @@ func (m *Oauth2Scopes) ToRawInfo() interface{} { func (m *Operation) ToRawInfo() interface{} { info := yaml.MapSlice{} if len(m.Tags) != 0 { - info = append(info, yaml.MapItem{"tags", m.Tags}) + info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) } if m.Summary != "" { - info = append(info, yaml.MapItem{"summary", m.Summary}) + info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.OperationId != "" { - info = append(info, yaml.MapItem{"operationId", m.OperationId}) + info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) } if len(m.Parameters) != 0 { items := make([]interface{}, 0) for _, item := range m.Parameters { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"parameters", items}) + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) } // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) } // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) } if m.Deprecated != false { - info = append(info, yaml.MapItem{"deprecated", m.Deprecated}) + info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) } if len(m.Security) != 0 { items := make([]interface{}, 0) for _, item := range m.Security { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"security", items}) + info = append(info, yaml.MapItem{Key: "security", Value: items}) } // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8024,7 +8024,7 @@ func (m *ParameterDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} @@ -8052,34 +8052,34 @@ func (m *ParametersItem) ToRawInfo() interface{} { func (m *PathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } if m.Get != nil { - info = append(info, yaml.MapItem{"get", m.Get.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) } // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Put != nil { - info = append(info, yaml.MapItem{"put", m.Put.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) } // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Post != nil { - info = append(info, yaml.MapItem{"post", m.Post.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) } // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Delete != nil { - info = append(info, yaml.MapItem{"delete", m.Delete.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) } // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Options != nil { - info = append(info, yaml.MapItem{"options", m.Options.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) } // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Head != nil { - info = append(info, yaml.MapItem{"head", m.Head.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) } // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Patch != nil { - info = append(info, yaml.MapItem{"patch", m.Patch.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) } // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Parameters) != 0 { @@ -8087,12 +8087,12 @@ func (m *PathItem) ToRawInfo() interface{} { for _, item := range m.Parameters { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"parameters", items}) + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) } // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8103,78 +8103,78 @@ func (m *PathItem) ToRawInfo() interface{} { func (m *PathParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8186,13 +8186,13 @@ func (m *Paths) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} if m.Path != nil { for _, item := range m.Path { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} @@ -8203,66 +8203,66 @@ func (m *Paths) ToRawInfo() interface{} { func (m *PrimitivesItems) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8274,7 +8274,7 @@ func (m *Properties) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} @@ -8285,81 +8285,81 @@ func (m *Properties) ToRawInfo() interface{} { func (m *QueryParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8370,23 +8370,23 @@ func (m *QueryParameterSubSchema) ToRawInfo() interface{} { func (m *Response) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) } // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Headers != nil { - info = append(info, yaml.MapItem{"headers", m.Headers.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) } // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Examples != nil { - info = append(info, yaml.MapItem{"examples", m.Examples.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) } // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8398,7 +8398,7 @@ func (m *ResponseDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} @@ -8427,13 +8427,13 @@ func (m *Responses) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.ResponseCode != nil { for _, item := range m.ResponseCode { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8444,80 +8444,80 @@ func (m *Responses) ToRawInfo() interface{} { func (m *Schema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if m.MaxProperties != 0 { - info = append(info, yaml.MapItem{"maxProperties", m.MaxProperties}) + info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) } if m.MinProperties != 0 { - info = append(info, yaml.MapItem{"minProperties", m.MinProperties}) + info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) } if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.AdditionalProperties != nil { - info = append(info, yaml.MapItem{"additionalProperties", m.AdditionalProperties.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) } // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Type != nil { if len(m.Type.Value) == 1 { - info = append(info, yaml.MapItem{"type", m.Type.Value[0]}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) } else { - info = append(info, yaml.MapItem{"type", m.Type.Value}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) } } // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8526,7 +8526,7 @@ func (m *Schema) ToRawInfo() interface{} { for _, item := range m.Items.Schema { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"items", items[0]}) + info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) } // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.AllOf) != 0 { @@ -8534,34 +8534,34 @@ func (m *Schema) ToRawInfo() interface{} { for _, item := range m.AllOf { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"allOf", items}) + info = append(info, yaml.MapItem{Key: "allOf", Value: items}) } // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.Properties != nil { - info = append(info, yaml.MapItem{"properties", m.Properties.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) } // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Discriminator != "" { - info = append(info, yaml.MapItem{"discriminator", m.Discriminator}) + info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) } if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) } if m.Xml != nil { - info = append(info, yaml.MapItem{"xml", m.Xml.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) } // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) } // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8590,7 +8590,7 @@ func (m *SecurityDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} @@ -8639,7 +8639,7 @@ func (m *SecurityRequirement) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} @@ -8655,18 +8655,18 @@ func (m *StringArray) ToRawInfo() interface{} { func (m *Tag) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8677,7 +8677,7 @@ func (m *Tag) ToRawInfo() interface{} { func (m *TypeItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if len(m.Value) != 0 { - info = append(info, yaml.MapItem{"value", m.Value}) + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) } return info } @@ -8687,7 +8687,7 @@ func (m *VendorExtension) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} @@ -8698,23 +8698,23 @@ func (m *VendorExtension) ToRawInfo() interface{} { func (m *Xml) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Namespace != "" { - info = append(info, yaml.MapItem{"namespace", m.Namespace}) + info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) } if m.Prefix != "" { - info = append(info, yaml.MapItem{"prefix", m.Prefix}) + info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) } if m.Attribute != false { - info = append(info, yaml.MapItem{"attribute", m.Attribute}) + info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) } if m.Wrapped != false { - info = append(info, yaml.MapItem{"wrapped", m.Wrapped}) + info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go index 37da7df25..a030fa676 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: OpenAPIv2/OpenAPIv2.proto -// DO NOT EDIT! /* Package openapi_v2 is a generated protocol buffer package. @@ -4257,7 +4256,7 @@ func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 3129 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto deleted file mode 100644 index 557c88072..000000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto +++ /dev/null @@ -1,663 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -syntax = "proto3"; - -package openapi.v2; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v2"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -message AdditionalPropertiesItem { - oneof oneof { - Schema schema = 1; - bool boolean = 2; - } -} - -message Any { - google.protobuf.Any value = 1; - string yaml = 2; -} - -message ApiKeySecurity { - string type = 1; - string name = 2; - string in = 3; - string description = 4; - repeated NamedAny vendor_extension = 5; -} - -message BasicAuthenticationSecurity { - string type = 1; - string description = 2; - repeated NamedAny vendor_extension = 3; -} - -message BodyParameter { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 1; - // The name of the parameter. - string name = 2; - // Determines the location of the parameter. - string in = 3; - // Determines whether or not this parameter is required or optional. - bool required = 4; - Schema schema = 5; - repeated NamedAny vendor_extension = 6; -} - -// Contact information for the owners of the API. -message Contact { - // The identifying name of the contact person/organization. - string name = 1; - // The URL pointing to the contact information. - string url = 2; - // The email address of the contact person/organization. - string email = 3; - repeated NamedAny vendor_extension = 4; -} - -message Default { - repeated NamedAny additional_properties = 1; -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -message Definitions { - repeated NamedSchema additional_properties = 1; -} - -message Document { - // The Swagger version of this document. - string swagger = 1; - Info info = 2; - // The host (name or ip) of the API. Example: 'swagger.io' - string host = 3; - // The base path to the API. Example: '/api'. - string base_path = 4; - // The transfer protocol of the API. - repeated string schemes = 5; - // A list of MIME types accepted by the API. - repeated string consumes = 6; - // A list of MIME types the API can produce. - repeated string produces = 7; - Paths paths = 8; - Definitions definitions = 9; - ParameterDefinitions parameters = 10; - ResponseDefinitions responses = 11; - repeated SecurityRequirement security = 12; - SecurityDefinitions security_definitions = 13; - repeated Tag tags = 14; - ExternalDocs external_docs = 15; - repeated NamedAny vendor_extension = 16; -} - -message Examples { - repeated NamedAny additional_properties = 1; -} - -// information about external documentation -message ExternalDocs { - string description = 1; - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// A deterministic version of a JSON Schema object. -message FileSchema { - string format = 1; - string title = 2; - string description = 3; - Any default = 4; - repeated string required = 5; - string type = 6; - bool read_only = 7; - ExternalDocs external_docs = 8; - Any example = 9; - repeated NamedAny vendor_extension = 10; -} - -message FormDataParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Header { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - string description = 18; - repeated NamedAny vendor_extension = 19; -} - -message HeaderParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -message Headers { - repeated NamedHeader additional_properties = 1; -} - -// General information about the API. -message Info { - // A unique and precise title of the API. - string title = 1; - // A semantic version number of the API. - string version = 2; - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - string description = 3; - // The terms of service for the API. - string terms_of_service = 4; - Contact contact = 5; - License license = 6; - repeated NamedAny vendor_extension = 7; -} - -message ItemsItem { - repeated Schema schema = 1; -} - -message JsonReference { - string _ref = 1; - string description = 2; -} - -message License { - // The name of the license type. It's encouraged to use an OSI compatible license. - string name = 1; - // The URL pointing to the license. - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -message NamedAny { - // Map key - string name = 1; - // Mapped value - Any value = 2; -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -message NamedHeader { - // Map key - string name = 1; - // Mapped value - Header value = 2; -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -message NamedParameter { - // Map key - string name = 1; - // Mapped value - Parameter value = 2; -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -message NamedPathItem { - // Map key - string name = 1; - // Mapped value - PathItem value = 2; -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -message NamedResponse { - // Map key - string name = 1; - // Mapped value - Response value = 2; -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -message NamedResponseValue { - // Map key - string name = 1; - // Mapped value - ResponseValue value = 2; -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -message NamedSchema { - // Map key - string name = 1; - // Mapped value - Schema value = 2; -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -message NamedSecurityDefinitionsItem { - // Map key - string name = 1; - // Mapped value - SecurityDefinitionsItem value = 2; -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -message NamedString { - // Map key - string name = 1; - // Mapped value - string value = 2; -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -message NamedStringArray { - // Map key - string name = 1; - // Mapped value - StringArray value = 2; -} - -message NonBodyParameter { - oneof oneof { - HeaderParameterSubSchema header_parameter_sub_schema = 1; - FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - QueryParameterSubSchema query_parameter_sub_schema = 3; - PathParameterSubSchema path_parameter_sub_schema = 4; - } -} - -message Oauth2AccessCodeSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string token_url = 5; - string description = 6; - repeated NamedAny vendor_extension = 7; -} - -message Oauth2ApplicationSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2ImplicitSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2PasswordSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2Scopes { - repeated NamedString additional_properties = 1; -} - -message Operation { - repeated string tags = 1; - // A brief summary of the operation. - string summary = 2; - // A longer description of the operation, GitHub Flavored Markdown is allowed. - string description = 3; - ExternalDocs external_docs = 4; - // A unique identifier of the operation. - string operation_id = 5; - // A list of MIME types the API can produce. - repeated string produces = 6; - // A list of MIME types the API can consume. - repeated string consumes = 7; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 8; - Responses responses = 9; - // The transfer protocol of the API. - repeated string schemes = 10; - bool deprecated = 11; - repeated SecurityRequirement security = 12; - repeated NamedAny vendor_extension = 13; -} - -message Parameter { - oneof oneof { - BodyParameter body_parameter = 1; - NonBodyParameter non_body_parameter = 2; - } -} - -// One or more JSON representations for parameters -message ParameterDefinitions { - repeated NamedParameter additional_properties = 1; -} - -message ParametersItem { - oneof oneof { - Parameter parameter = 1; - JsonReference json_reference = 2; - } -} - -message PathItem { - string _ref = 1; - Operation get = 2; - Operation put = 3; - Operation post = 4; - Operation delete = 5; - Operation options = 6; - Operation head = 7; - Operation patch = 8; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 9; - repeated NamedAny vendor_extension = 10; -} - -message PathParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -message Paths { - repeated NamedAny vendor_extension = 1; - repeated NamedPathItem path = 2; -} - -message PrimitivesItems { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - repeated NamedAny vendor_extension = 18; -} - -message Properties { - repeated NamedSchema additional_properties = 1; -} - -message QueryParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Response { - string description = 1; - SchemaItem schema = 2; - Headers headers = 3; - Examples examples = 4; - repeated NamedAny vendor_extension = 5; -} - -// One or more JSON representations for parameters -message ResponseDefinitions { - repeated NamedResponse additional_properties = 1; -} - -message ResponseValue { - oneof oneof { - Response response = 1; - JsonReference json_reference = 2; - } -} - -// Response objects names can either be any valid HTTP status code or 'default'. -message Responses { - repeated NamedResponseValue response_code = 1; - repeated NamedAny vendor_extension = 2; -} - -// A deterministic version of a JSON Schema object. -message Schema { - string _ref = 1; - string format = 2; - string title = 3; - string description = 4; - Any default = 5; - double multiple_of = 6; - double maximum = 7; - bool exclusive_maximum = 8; - double minimum = 9; - bool exclusive_minimum = 10; - int64 max_length = 11; - int64 min_length = 12; - string pattern = 13; - int64 max_items = 14; - int64 min_items = 15; - bool unique_items = 16; - int64 max_properties = 17; - int64 min_properties = 18; - repeated string required = 19; - repeated Any enum = 20; - AdditionalPropertiesItem additional_properties = 21; - TypeItem type = 22; - ItemsItem items = 23; - repeated Schema all_of = 24; - Properties properties = 25; - string discriminator = 26; - bool read_only = 27; - Xml xml = 28; - ExternalDocs external_docs = 29; - Any example = 30; - repeated NamedAny vendor_extension = 31; -} - -message SchemaItem { - oneof oneof { - Schema schema = 1; - FileSchema file_schema = 2; - } -} - -message SecurityDefinitions { - repeated NamedSecurityDefinitionsItem additional_properties = 1; -} - -message SecurityDefinitionsItem { - oneof oneof { - BasicAuthenticationSecurity basic_authentication_security = 1; - ApiKeySecurity api_key_security = 2; - Oauth2ImplicitSecurity oauth2_implicit_security = 3; - Oauth2PasswordSecurity oauth2_password_security = 4; - Oauth2ApplicationSecurity oauth2_application_security = 5; - Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - } -} - -message SecurityRequirement { - repeated NamedStringArray additional_properties = 1; -} - -message StringArray { - repeated string value = 1; -} - -message Tag { - string name = 1; - string description = 2; - ExternalDocs external_docs = 3; - repeated NamedAny vendor_extension = 4; -} - -message TypeItem { - repeated string value = 1; -} - -// Any property starting with x- is valid. -message VendorExtension { - repeated NamedAny additional_properties = 1; -} - -message Xml { - string name = 1; - string namespace = 2; - string prefix = 3; - bool attribute = 4; - bool wrapped = 5; - repeated NamedAny vendor_extension = 6; -} - diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md deleted file mode 100644 index 836fb32a7..000000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# OpenAPI v2 Protocol Buffer Models - -This directory contains a Protocol Buffer-language model -and related code for supporting OpenAPI v2. - -Gnostic applications and plugins can use OpenAPIv2.proto -to generate Protocol Buffer support code for their preferred languages. - -OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI -descriptions into the Protocol Buffer-based datastructures -generated from OpenAPIv2.proto. - -OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic -compiler generator, and OpenAPIv2.pb.go is generated by -protoc, the Protocol Buffer compiler, and protoc-gen-go, the -Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json deleted file mode 100644 index 2815a26ea..000000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json +++ /dev/null @@ -1,1610 +0,0 @@ -{ - "title": "A JSON Schema for Swagger 2.0 API.", - "id": "http://swagger.io/v2/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "required": [ - "swagger", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "swagger": { - "type": "string", - "enum": [ - "2.0" - ], - "description": "The Swagger version of this document." - }, - "info": { - "$ref": "#/definitions/info" - }, - "host": { - "type": "string", - "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", - "description": "The host (name or ip) of the API. Example: 'swagger.io'" - }, - "basePath": { - "type": "string", - "pattern": "^/", - "description": "The base path to the API. Example: '/api'." - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "consumes": { - "description": "A list of MIME types accepted by the API.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "definitions": { - "$ref": "#/definitions/definitions" - }, - "parameters": { - "$ref": "#/definitions/parameterDefinitions" - }, - "responses": { - "$ref": "#/definitions/responseDefinitions" - }, - "security": { - "$ref": "#/definitions/security" - }, - "securityDefinitions": { - "$ref": "#/definitions/securityDefinitions" - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "General information about the API.", - "required": [ - "version", - "title" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "title": { - "type": "string", - "description": "A unique and precise title of the API." - }, - "version": { - "type": "string", - "description": "A semantic version number of the API." - }, - "description": { - "type": "string", - "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." - }, - "termsOfService": { - "type": "string", - "description": "The terms of service for the API." - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the owners of the API.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the contact person/organization." - }, - "url": { - "type": "string", - "description": "The URL pointing to the contact information.", - "format": "uri" - }, - "email": { - "type": "string", - "description": "The email address of the contact person/organization.", - "format": "email" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "license": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The name of the license type. It's encouraged to use an OSI compatible license." - }, - "url": { - "type": "string", - "description": "The URL pointing to the license.", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "paths": { - "type": "object", - "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - }, - "^/": { - "$ref": "#/definitions/pathItem" - } - }, - "additionalProperties": false - }, - "definitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "description": "One or more JSON objects describing the schemas being consumed and produced by the API." - }, - "parameterDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameter" - }, - "description": "One or more JSON representations for parameters" - }, - "responseDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/response" - }, - "description": "One or more JSON representations for parameters" - }, - "externalDocs": { - "type": "object", - "additionalProperties": false, - "description": "information about external documentation", - "required": [ - "url" - ], - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "examples": { - "type": "object", - "additionalProperties": true - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the HTTP message." - }, - "operation": { - "type": "object", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string", - "description": "A brief summary of the operation." - }, - "description": { - "type": "string", - "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string", - "description": "A unique identifier of the operation." - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "consumes": { - "description": "A list of MIME types the API can consume.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "parameters": { - "$ref": "#/definitions/parametersList" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "deprecated": { - "type": "boolean", - "default": false - }, - "security": { - "$ref": "#/definitions/security" - } - } - }, - "pathItem": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "parameters": { - "$ref": "#/definitions/parametersList" - } - } - }, - "responses": { - "type": "object", - "description": "Response objects names can either be any valid HTTP status code or 'default'.", - "minProperties": 1, - "additionalProperties": false, - "patternProperties": { - "^([0-9]{3})$|^(default)$": { - "$ref": "#/definitions/responseValue" - }, - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "not": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - } - }, - "responseValue": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "response": { - "type": "object", - "required": [ - "description" - ], - "properties": { - "description": { - "type": "string" - }, - "schema": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/fileSchema" - } - ] - }, - "headers": { - "$ref": "#/definitions/headers" - }, - "examples": { - "$ref": "#/definitions/examples" - } - }, - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "headers": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/header" - } - }, - "header": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "vendorExtension": { - "description": "Any property starting with x- is valid.", - "additionalProperties": true, - "additionalItems": true - }, - "bodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "schema" - ], - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "body" - ] - }, - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "schema": { - "$ref": "#/definitions/schema" - } - }, - "additionalProperties": false - }, - "headerParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "header" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "queryParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "query" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "formDataParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "formData" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array", - "file" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "pathParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "required" - ], - "properties": { - "required": { - "type": "boolean", - "enum": [ - true - ], - "description": "Determines whether or not this parameter is required or optional." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "path" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "nonBodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "type" - ], - "oneOf": [ - { - "$ref": "#/definitions/headerParameterSubSchema" - }, - { - "$ref": "#/definitions/formDataParameterSubSchema" - }, - { - "$ref": "#/definitions/queryParameterSubSchema" - }, - { - "$ref": "#/definitions/pathParameterSubSchema" - } - ] - }, - "parameter": { - "oneOf": [ - { - "$ref": "#/definitions/bodyParameter" - }, - { - "$ref": "#/definitions/nonBodyParameter" - } - ] - }, - "schema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "boolean" - } - ], - "default": {} - }, - "type": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/type" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - } - ], - "default": {} - }, - "allOf": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "default": {} - }, - "discriminator": { - "type": "string" - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "fileSchema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "type" - ], - "properties": { - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "type": { - "type": "string", - "enum": [ - "file" - ] - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "primitivesItems": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "securityRequirement": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "xml": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean", - "default": false - }, - "wrapped": { - "type": "boolean", - "default": false - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "tag": { - "type": "object", - "additionalProperties": false, - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "securityDefinitions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/basicAuthenticationSecurity" - }, - { - "$ref": "#/definitions/apiKeySecurity" - }, - { - "$ref": "#/definitions/oauth2ImplicitSecurity" - }, - { - "$ref": "#/definitions/oauth2PasswordSecurity" - }, - { - "$ref": "#/definitions/oauth2ApplicationSecurity" - }, - { - "$ref": "#/definitions/oauth2AccessCodeSecurity" - } - ] - } - }, - "basicAuthenticationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "basic" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "apiKeySecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "name", - "in" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "apiKey" - ] - }, - "name": { - "type": "string" - }, - "in": { - "type": "string", - "enum": [ - "header", - "query" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ImplicitSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "implicit" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2PasswordSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "password" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ApplicationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "application" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2AccessCodeSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "accessCode" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2Scopes": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "mediaTypeList": { - "type": "array", - "items": { - "$ref": "#/definitions/mimeType" - }, - "uniqueItems": true - }, - "parametersList": { - "type": "array", - "description": "The parameters needed to send a valid API call.", - "additionalItems": false, - "items": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "uniqueItems": true - }, - "schemesList": { - "type": "array", - "description": "The transfer protocol of the API.", - "items": { - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss" - ] - }, - "uniqueItems": true - }, - "collectionFormat": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes" - ], - "default": "csv" - }, - "collectionFormatWithMulti": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes", - "multi" - ], - "default": "csv" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "jsonReference": { - "type": "object", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "description": { - "type": "string" - } - } - } - } -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md deleted file mode 100644 index 848b16c69..000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Compiler support code - -This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go index 604a46a6a..c954a2d9b 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -15,6 +15,7 @@ package compiler import ( + "errors" "fmt" "gopkg.in/yaml.v2" "io/ioutil" @@ -53,11 +54,16 @@ func FetchFile(fileurl string) ([]byte, error) { } return bytes, nil } - log.Printf("Fetching %s", fileurl) + if verboseReader { + log.Printf("Fetching %s", fileurl) + } response, err := http.Get(fileurl) if err != nil { return nil, err } + if response.StatusCode != 200 { + return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) + } defer response.Body.Close() bytes, err = ioutil.ReadAll(response.Body) if err == nil { @@ -104,7 +110,9 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { if err != nil { return nil, err } - infoCache[filename] = info + if len(filename) > 0 { + infoCache[filename] = info + } return info, nil } diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh deleted file mode 100755 index 68d02a02a..000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh +++ /dev/null @@ -1,5 +0,0 @@ -go get github.com/golang/protobuf/protoc-gen-go - -protoc \ ---go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto - diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md deleted file mode 100644 index ff1c2eb1e..000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Extensions - -This directory contains support code for building Gnostic extensions and associated examples. - -Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go index b14f1f945..749ff7841 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: extension.proto -// DO NOT EDIT! /* Package openapiextension_v1 is a generated protocol buffer package. @@ -78,7 +77,7 @@ func (m *Version) GetSuffix() string { // An encoded Request is written to the ExtensionHandler's stdin. type ExtensionHandlerRequest struct { // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to openapic. + // The specifications will appear in the order they are specified to gnostic. Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` // The version number of openapi compiler. CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` @@ -192,28 +191,28 @@ func init() { func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, - 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, - 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, - 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, - 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, - 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, - 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, - 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, - 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, - 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, - 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, - 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, - 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, - 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, - 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, - 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, - 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, - 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, - 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, - 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, - 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, - 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, - 0x02, 0x00, 0x00, + // 357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40, + 0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64, + 0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46, + 0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7, + 0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22, + 0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11, + 0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18, + 0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8, + 0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c, + 0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69, + 0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef, + 0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3, + 0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87, + 0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44, + 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80, + 0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4, + 0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54, + 0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c, + 0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e, + 0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2, + 0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa, + 0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b, + 0x80, 0x51, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto deleted file mode 100644 index 806760a13..000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -package openapiextension.v1; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIExtensionV1"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapic.v1"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -// -option objc_class_prefix = "OAE"; // "OpenAPI Extension" - -// The version number of OpenAPI compiler. -message Version { - int32 major = 1; - int32 minor = 2; - int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - string suffix = 4; -} - -// An encoded Request is written to the ExtensionHandler's stdin. -message ExtensionHandlerRequest { - - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to openapic. - Wrapper wrapper = 1; - - // The version number of openapi compiler. - Version compiler_version = 3; -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -message ExtensionHandlerResponse { - - // true if the extension is handled by the extension handler; false otherwise - bool handled = 1; - - // Error message. If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - repeated string error = 2; - - // text output - google.protobuf.Any value = 3; -} - -message Wrapper { - // version of the OpenAPI specification in which this extension was written. - string version = 1; - - // Name of the extension - string extension_name = 2; - - // Must be a valid yaml for the proto - string yaml = 3; -} diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS new file mode 100644 index 000000000..b722392ee --- /dev/null +++ b/vendor/github.com/gorilla/mux/AUTHORS @@ -0,0 +1,8 @@ +# This is the official list of gorilla/mux authors for copyright purposes. +# +# Please keep the list sorted. + +Google LLC (https://opensource.google.com/) +Kamil Kisielk +Matt Silverlock +Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE new file mode 100644 index 000000000..6903df638 --- /dev/null +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/context.go b/vendor/github.com/gorilla/mux/context.go new file mode 100644 index 000000000..665940a26 --- /dev/null +++ b/vendor/github.com/gorilla/mux/context.go @@ -0,0 +1,18 @@ +package mux + +import ( + "context" + "net/http" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return r.Context().Value(key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + return r.WithContext(context.WithValue(r.Context(), key, val)) +} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go new file mode 100644 index 000000000..38957deea --- /dev/null +++ b/vendor/github.com/gorilla/mux/doc.go @@ -0,0 +1,306 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts, paths and query values can have variables with an optional + regular expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: + + r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +Note that if any capturing groups are present, mux will panic() during parsing. To prevent +this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to +"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably +when capturing groups were present. + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.example.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.example.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host and query value variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. + + type MiddlewareFunc func(http.Handler) http.Handler + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). + +A very basic middleware which logs the URI of the request being handled could be written as: + + func simpleMw(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) + } + +Middlewares can be added to a router using `Router.Use()`: + + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.Use(simpleMw) + +A more complex authentication middleware, which maps session token to users, could be written as: + + // Define our struct + type authenticationMiddleware struct { + tokenUsers map[string]string + } + + // Initialize it somewhere + func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" + } + + // Middleware function, which will be called for each request + func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + next.ServeHTTP(w, r) + } else { + http.Error(w, "Forbidden", http.StatusForbidden) + } + }) + } + + r := mux.NewRouter() + r.HandleFunc("/", handler) + + amw := authenticationMiddleware{} + amw.Populate() + + r.Use(amw.Middleware) + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. + +*/ +package mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go new file mode 100644 index 000000000..ceb812cee --- /dev/null +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -0,0 +1,72 @@ +package mux + +import ( + "net/http" + "strings" +) + +// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. +// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed +// to it, and then calls the handler passed as parameter to the MiddlewareFunc. +type MiddlewareFunc func(http.Handler) http.Handler + +// middleware interface is anything which implements a MiddlewareFunc named Middleware. +type middleware interface { + Middleware(handler http.Handler) http.Handler +} + +// Middleware allows MiddlewareFunc to implement the middleware interface. +func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { + return mw(handler) +} + +// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) Use(mwf ...MiddlewareFunc) { + for _, fn := range mwf { + r.middlewares = append(r.middlewares, fn) + } +} + +// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) useInterface(mw middleware) { + r.middlewares = append(r.middlewares, mw) +} + +// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header +// on a request, by matching routes based only on paths. It also handles +// OPTIONS requests, by settings Access-Control-Allow-Methods, and then +// returning without calling the next http handler. +func CORSMethodMiddleware(r *Router) MiddlewareFunc { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var allMethods []string + + err := r.Walk(func(route *Route, _ *Router, _ []*Route) error { + for _, m := range route.matchers { + if _, ok := m.(*routeRegexp); ok { + if m.Match(req, &RouteMatch{}) { + methods, err := route.GetMethods() + if err != nil { + return err + } + + allMethods = append(allMethods, methods...) + } + break + } + } + return nil + }) + + if err == nil { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ",")) + + if req.Method == "OPTIONS" { + return + } + } + + next.ServeHTTP(w, req) + }) + } +} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go new file mode 100644 index 000000000..8aca972d2 --- /dev/null +++ b/vendor/github.com/gorilla/mux/mux.go @@ -0,0 +1,601 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "path" + "regexp" +) + +var ( + // ErrMethodMismatch is returned when the method in the request does not match + // the method defined against the route. + ErrMethodMismatch = errors.New("method is not allowed") + // ErrNotFound is returned when no route match is found. + ErrNotFound = errors.New("no matching route was found") +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route)} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + + // Configurable Handler to be used when the request method does not match the route. + MethodNotAllowedHandler http.Handler + + // Routes to be matched, in order. + routes []*Route + + // Routes by name for URL building. + namedRoutes map[string]*Route + + // If true, do not clear the request context after handling the request. + // + // Deprecated: No effect when go1.7+ is used, since the context is stored + // on the request itself. + KeepContext bool + + // Slice of middlewares to be called after a match is found + middlewares []middleware + + // configuration shared with `Route` + routeConf +} + +// common route configuration shared between `Router` and `Route` +type routeConf struct { + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool + + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + + // Manager for the variables from host and path. + regexp routeRegexpGroup + + // List of matchers. + matchers []matcher + + // The scheme used when building URLs. + buildScheme string + + buildVarsFunc BuildVarsFunc +} + +// returns an effective deep copy of `routeConf` +func copyRouteConf(r routeConf) routeConf { + c := r + + if r.regexp.path != nil { + c.regexp.path = copyRouteRegexp(r.regexp.path) + } + + if r.regexp.host != nil { + c.regexp.host = copyRouteRegexp(r.regexp.host) + } + + c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) + for _, q := range r.regexp.queries { + c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) + } + + c.matchers = make([]matcher, 0, len(r.matchers)) + for _, m := range r.matchers { + c.matchers = append(c.matchers, m) + } + + return c +} + +func copyRouteRegexp(r *routeRegexp) *routeRegexp { + c := *r + return &c +} + +// Match attempts to match the given request against the router's registered routes. +// +// If the request matches a route of this router or one of its subrouters the Route, +// Handler, and Vars fields of the the match argument are filled and this function +// returns true. +// +// If the request does not match any of this router's or its subrouters' routes +// then this function returns false. If available, a reason for the match failure +// will be filled in the match argument's MatchErr field. If the match failure type +// (eg: not found) has a registered handler, the handler is assigned to the Handler +// field of the match argument. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + // Build middleware chain if no error was found + if match.MatchErr == nil { + for i := len(r.middlewares) - 1; i >= 0; i-- { + match.Handler = r.middlewares[i].Middleware(match.Handler) + } + } + return true + } + } + + if match.MatchErr == ErrMethodMismatch { + if r.MethodNotAllowedHandler != nil { + match.Handler = r.MethodNotAllowedHandler + return true + } + + return false + } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + match.MatchErr = ErrNotFound + return true + } + + match.MatchErr = ErrNotFound + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if !r.skipClean { + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Clean path to canonical form and redirect. + if p := cleanPath(path); p != path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + req = setVars(req, match.Vars) + req = setCurrentRoute(req, match.Route) + } + + if handler == nil && match.MatchErr == ErrMethodMismatch { + handler = methodNotAllowedHandler() + } + + if handler == nil { + handler = http.NotFoundHandler() + } + + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.namedRoutes[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.namedRoutes[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will perform a redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for +// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed +// request will be made as a GET by most clients. Use middleware or client settings +// to modify this behaviour as needed. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r +} + +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.useEncodedPath = true + return r +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + // initialize a route with a copy of the parent router's configuration + route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVarsFunc registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + if err != nil { + return err + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string + + // MatchErr is set to appropriate matching error + // It is set to ErrMethodMismatch if there is a mismatch in + // the request method and route method + MatchErr error +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := contextGet(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. +func CurrentRoute(r *http.Request) *Route { + if rv := contextGet(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) *http.Request { + return contextSet(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) *http.Request { + return contextSet(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// mapFromPairsToRegex converts variadic string parameters to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// methodNotAllowed replies to the request with an HTTP status code 405. +func methodNotAllowed(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusMethodNotAllowed) +} + +// methodNotAllowedHandler returns a simple request handler +// that replies to each request with a status code 405. +func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go new file mode 100644 index 000000000..f25288675 --- /dev/null +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -0,0 +1,328 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" +) + +type routeRegexpOptions struct { + strictSlash bool + useEncodedPath bool +} + +type regexpType int + +const ( + regexpTypePath regexpType = 0 + regexpTypeHost regexpType = 1 + regexpTypePrefix regexpType = 2 + regexpTypeQuery regexpType = 3 +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if typ == regexpTypeQuery { + defaultPattern = ".*" + } else if typ == regexpTypeHost { + defaultPattern = "[^.]+" + } + // Only match strict slash if not matching + if typ != regexpTypePath { + options.strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if options.strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if options.strictSlash { + pattern.WriteString("[/]?") + } + if typ == regexpTypeQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if typ != regexpTypePrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + + // Check for capturing groups which used to work in older versions + if reg.NumSubexp() != len(idxs)/2 { + panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + + "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") + } + + // Done! + return &routeRegexp{ + template: template, + regexpType: typ, + options: options, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // The type of match + regexpType regexpType + // Options for matching + options routeRegexpOptions + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if r.regexpType != regexpTypeHost { + if r.regexpType == regexpTypeQuery { + return r.matchQueryString(req) + } + path := req.URL.Path + if r.options.useEncodedPath { + path = req.URL.EscapedPath() + } + return r.regexp.MatchString(path) + } + + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + if r.regexpType == regexpTypeQuery { + value = url.QueryEscape(value) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getURLQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getURLQuery(req *http.Request) string { + if r.regexpType != regexpTypeQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getURLQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + var idxs []int + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + host := getHost(req) + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) + } + } + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Store path variables. + if v.path != nil { + matches := v.path.regexp.FindStringSubmatchIndex(path) + if len(matches) > 0 { + extractVars(path, matches, v.path.varsN, m.Vars) + // Check if we should redirect. + if v.path.options.strictSlash { + p1 := strings.HasSuffix(path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) + } + } +} + +// getHost tries its best to return the request host. +// According to section 14.23 of RFC 2616 the Host header +// can include the port number if the default value of 80 is not used. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + return r.Host +} + +func extractVars(input string, matches []int, names []string, output map[string]string) { + for i, name := range names { + output[name] = input[matches[2*i+2]:matches[2*i+3]] + } +} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go new file mode 100644 index 000000000..16a7cdf4e --- /dev/null +++ b/vendor/github.com/gorilla/mux/route.go @@ -0,0 +1,710 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Request handler for the route. + handler http.Handler + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + // "global" reference to all named routes + namedRoutes map[string]*Route + + // config possibly passed in from `Router` + routeConf +} + +// SkipClean reports whether path cleaning is enabled for this route via +// Router.SkipClean. +func (r *Route) SkipClean() bool { + return r.skipClean +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + + var matchErr error + + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + if _, ok := m.(methodMatcher); ok { + matchErr = ErrMethodMismatch + continue + } + + // Ignore ErrNotFound errors. These errors arise from match call + // to Subrouters. + // + // This prevents subsequent matching subrouters from failing to + // run middleware. If not ignored, the middleware would see a + // non-nil MatchErr and be skipped, even when there was a + // matching route. + if match.MatchErr == ErrNotFound { + match.MatchErr = nil + } + + matchErr = nil + return false + } + } + + if matchErr != nil { + match.MatchErr = matchErr + return false + } + + if match.MatchErr == ErrMethodMismatch { + // We found a route which matches request method, clear MatchErr + match.MatchErr = nil + // Then override the mis-matched handler + match.Handler = r.handler + } + + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + + // Set variables. + r.regexp.setMatch(req, match, r) + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// It is an error to call Name more than once on a route. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.namedRoutes[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { + if r.err != nil { + return r.err + } + if typ == regexpTypePath || typ == regexpTypePrefix { + if len(tpl) > 0 && tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ + strictSlash: r.strictSlash, + useEncodedPath: r.useEncodedPath, + }) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if typ == regexpTypeHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if typ == regexpTypeQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// If the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// If the value is an empty string, it will match any value if the key is set. +// Use the start and end of string anchors (^ and $) to match an exact value. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypeHost) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +// Match returns the match for a given request. +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePath) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + if len(schemes) > 0 { + r.buildScheme = schemes[0] + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + if r.buildVarsFunc != nil { + // compose the old and new functions + old := r.buildVarsFunc + r.buildVarsFunc = func(m map[string]string) map[string]string { + return f(old(m)) + } + } else { + r.buildVarsFunc = f + } + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + // initialize a subrouter with a copy of the parent route's configuration + router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + queries := make([]string, 0, len(r.regexp.queries)) + if r.regexp.host != nil { + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + scheme = "http" + if r.buildScheme != "" { + scheme = r.buildScheme + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + for _, q := range r.regexp.queries { + var query string + if query, err = q.url(values); err != nil { + return nil, err + } + queries = append(queries, query) + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + RawQuery: strings.Join(queries, "&"), + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + u := &url.URL{ + Scheme: "http", + Host: host, + } + if r.buildScheme != "" { + u.Scheme = r.buildScheme + } + return u, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} + +// GetPathRegexp returns the expanded regular expression used to match route path. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathRegexp() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route does not have a path") + } + return r.regexp.path.regexp.String(), nil +} + +// GetQueriesRegexp returns the expanded regular expressions used to match the +// route queries. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not have queries. +func (r *Route) GetQueriesRegexp() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + var queries []string + for _, query := range r.regexp.queries { + queries = append(queries, query.regexp.String()) + } + return queries, nil +} + +// GetQueriesTemplates returns the templates used to build the +// query matching. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define queries. +func (r *Route) GetQueriesTemplates() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + var queries []string + for _, query := range r.regexp.queries { + queries = append(queries, query.template) + } + return queries, nil +} + +// GetMethods returns the methods the route matches against +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if route does not have methods. +func (r *Route) GetMethods() ([]string, error) { + if r.err != nil { + return nil, r.err + } + for _, m := range r.matchers { + if methods, ok := m.(methodMatcher); ok { + return []string(methods), nil + } + } + return nil, errors.New("mux: route doesn't have methods") +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go new file mode 100644 index 000000000..32ecffde4 --- /dev/null +++ b/vendor/github.com/gorilla/mux/test_helpers.go @@ -0,0 +1,19 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import "net/http" + +// SetURLVars sets the URL variables for the given request, to be accessed via +// mux.Vars for testing route behaviour. Arguments are not modified, a shallow +// copy is returned. +// +// This API should only be used for testing purposes; it provides a way to +// inject variables into the request context. Alternatively, URL variables +// can be set by making a route that captures the required variables, +// starting a server and sending the request to that server. +func SetURLVars(r *http.Request, val map[string]string) *http.Request { + return setVars(r, val) +} diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore deleted file mode 100644 index cd3fcd1ef..000000000 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.idea/ -*.iml diff --git a/vendor/github.com/gorilla/websocket/.travis.yml b/vendor/github.com/gorilla/websocket/.travis.yml deleted file mode 100644 index a49db51c4..000000000 --- a/vendor/github.com/gorilla/websocket/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go -sudo: false - -matrix: - include: - - go: 1.7.x - - go: 1.8.x - - go: 1.9.x - - go: 1.10.x - - go: 1.11.x - - go: tip - allow_failures: - - go: tip - -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go vet $(go list ./... | grep -v /vendor/) - - go test -v -race ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md deleted file mode 100644 index 20e391f86..000000000 --- a/vendor/github.com/gorilla/websocket/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# Gorilla WebSocket - -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) - -### Documentation - -* [API Reference](http://godoc.org/github.com/gorilla/websocket) -* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) -* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) -* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) -* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) - -### Status - -The Gorilla WebSocket package provides a complete and tested implementation of -the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The -package API is stable. - -### Installation - - go get github.com/gorilla/websocket - -### Protocol Compliance - -The Gorilla WebSocket package passes the server tests in the [Autobahn Test -Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn -subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - -### Gorilla WebSocket compared with other packages - - - - - - - - - - - - - - - - - - -
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
- -Notes: - -1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). -2. The application can get the type of a received data message by implementing - a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) - function. -3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. - Read returns when the input buffer is full or a frame boundary is - encountered. Each call to Write sends a single frame message. The Gorilla - io.Reader and io.WriteCloser operate on a single WebSocket message. - diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml deleted file mode 100644 index 2bca4c599..000000000 --- a/vendor/github.com/gregjones/httpcache/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false -language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md deleted file mode 100644 index 61bd830e5..000000000 --- a/vendor/github.com/gregjones/httpcache/README.md +++ /dev/null @@ -1,24 +0,0 @@ -httpcache -========= - -[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) - -Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses. - -It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). - -Cache Backends --------------- - -- The built-in 'memory' cache stores responses in an in-memory map. -- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. -- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. -- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. -- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). -- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. -- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. - -License -------- - -- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go index 8239edc2c..b41a63d1f 100644 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -10,7 +10,6 @@ import ( "bufio" "bytes" "errors" - "fmt" "io" "io/ioutil" "net/http" @@ -41,7 +40,11 @@ type Cache interface { // cacheKey returns the cache key for req. func cacheKey(req *http.Request) string { - return req.URL.String() + if req.Method == http.MethodGet { + return req.URL.String() + } else { + return req.Method + " " + req.URL.String() + } } // CachedResponse returns the cached http.Response for req if present, and nil @@ -189,16 +192,11 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error for _, header := range endToEndHeaders { cachedResp.Header[header] = resp.Header[header] } - cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) - cachedResp.StatusCode = http.StatusOK - resp = cachedResp } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { // In case of transport failure and stale-if-error activated, returns cached content // when available - cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) - cachedResp.StatusCode = http.StatusOK return cachedResp, nil } else { if err != nil || resp.StatusCode != http.StatusOK { @@ -418,14 +416,14 @@ func canStaleOnError(respHeaders, reqHeaders http.Header) bool { func getEndToEndHeaders(respHeaders http.Header) []string { // These headers are always hop-by-hop hopByHopHeaders := map[string]struct{}{ - "Connection": struct{}{}, - "Keep-Alive": struct{}{}, - "Proxy-Authenticate": struct{}{}, - "Proxy-Authorization": struct{}{}, - "Te": struct{}{}, - "Trailers": struct{}{}, - "Transfer-Encoding": struct{}{}, - "Upgrade": struct{}{}, + "Connection": {}, + "Keep-Alive": {}, + "Proxy-Authenticate": {}, + "Proxy-Authorization": {}, + "Te": {}, + "Trailers": {}, + "Transfer-Encoding": {}, + "Upgrade": {}, } for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { @@ -435,7 +433,7 @@ func getEndToEndHeaders(respHeaders http.Header) []string { } } endToEndHeaders := []string{} - for respHeader, _ := range respHeaders { + for respHeader := range respHeaders { if _, ok := hopByHopHeaders[respHeader]; !ok { endToEndHeaders = append(endToEndHeaders, respHeader) } diff --git a/vendor/github.com/hashicorp/go-syslog/.gitignore b/vendor/github.com/hashicorp/go-syslog/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/hashicorp/go-syslog/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/hashicorp/go-syslog/README.md b/vendor/github.com/hashicorp/go-syslog/README.md deleted file mode 100644 index bbfae8f9b..000000000 --- a/vendor/github.com/hashicorp/go-syslog/README.md +++ /dev/null @@ -1,11 +0,0 @@ -go-syslog -========= - -This repository provides a very simple `gsyslog` package. The point of this -package is to allow safe importing of syslog without introducing cross-compilation -issues. The stdlib `log/syslog` cannot be imported on Windows systems, and without -conditional compilation this adds complications. - -Instead, `gsyslog` provides a very simple wrapper around `log/syslog` but returns -a runtime error if attempting to initialize on a non Linux or OSX system. - diff --git a/vendor/github.com/hashicorp/go-syslog/go.mod b/vendor/github.com/hashicorp/go-syslog/go.mod deleted file mode 100644 index 0e4c2d0dc..000000000 --- a/vendor/github.com/hashicorp/go-syslog/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/hashicorp/go-syslog diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 000000000..d05575961 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,204 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + check *Version + original string +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintFunc + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintFunc{ + "": constraintEqual, + "=": constraintEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "~>": constraintPessimistic, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + return &Constraint{ + f: constraintOperators[matches[1]], + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 000000000..186fd7cc1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,370 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + si := 0 + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = int64(val) + si++ + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: si, + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// or GreaterThan methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 000000000..cc888d43e --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore deleted file mode 100644 index 836562412..000000000 --- a/vendor/github.com/hashicorp/golang-lru/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go index 337d96329..e474cd075 100644 --- a/vendor/github.com/hashicorp/golang-lru/2q.go +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -30,9 +30,9 @@ type TwoQueueCache struct { size int recentSize int - recent *simplelru.LRU - frequent *simplelru.LRU - recentEvict *simplelru.LRU + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache lock sync.RWMutex } @@ -84,7 +84,8 @@ func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCa return c, nil } -func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() @@ -105,6 +106,7 @@ func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { return nil, false } +// Add adds a value to the cache. func (c *TwoQueueCache) Add(key, value interface{}) { c.lock.Lock() defer c.lock.Unlock() @@ -160,12 +162,15 @@ func (c *TwoQueueCache) ensureSpace(recentEvict bool) { c.frequent.RemoveOldest() } +// Len returns the number of items in the cache. func (c *TwoQueueCache) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. func (c *TwoQueueCache) Keys() []interface{} { c.lock.RLock() defer c.lock.RUnlock() @@ -174,6 +179,7 @@ func (c *TwoQueueCache) Keys() []interface{} { return append(k1, k2...) } +// Remove removes the provided key from the cache. func (c *TwoQueueCache) Remove(key interface{}) { c.lock.Lock() defer c.lock.Unlock() @@ -188,6 +194,7 @@ func (c *TwoQueueCache) Remove(key interface{}) { } } +// Purge is used to completely clear the cache. func (c *TwoQueueCache) Purge() { c.lock.Lock() defer c.lock.Unlock() @@ -196,13 +203,17 @@ func (c *TwoQueueCache) Purge() { c.recentEvict.Purge() } +// Contains is used to check if the cache contains a key +// without updating recency or frequency. func (c *TwoQueueCache) Contains(key interface{}) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) } -func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) { +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md deleted file mode 100644 index 33e58cfaf..000000000 --- a/vendor/github.com/hashicorp/golang-lru/README.md +++ /dev/null @@ -1,25 +0,0 @@ -golang-lru -========== - -This provides the `lru` package which implements a fixed-size -thread safe LRU cache. It is based on the cache in Groupcache. - -Documentation -============= - -Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) - -Example -======= - -Using the LRU is very simple: - -```go -l, _ := New(128) -for i := 0; i < 256; i++ { - l.Add(i, nil) -} -if l.Len() != 128 { - panic(fmt.Sprintf("bad len: %v", l.Len())) -} -``` diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go index a2a252817..555225a21 100644 --- a/vendor/github.com/hashicorp/golang-lru/arc.go +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -18,11 +18,11 @@ type ARCCache struct { size int // Size is the total capacity of the cache p int // P is the dynamic preference towards T1 or T2 - t1 *simplelru.LRU // T1 is the LRU for recently accessed items - b1 *simplelru.LRU // B1 is the LRU for evictions from t1 + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 - t2 *simplelru.LRU // T2 is the LRU for frequently accessed items - b2 *simplelru.LRU // B2 is the LRU for evictions from t2 + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 lock sync.RWMutex } @@ -60,11 +60,11 @@ func NewARC(size int) (*ARCCache, error) { } // Get looks up a key's value from the cache. -func (c *ARCCache) Get(key interface{}) (interface{}, bool) { +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() defer c.lock.Unlock() - // Ff the value is contained in T1 (recent), then + // If the value is contained in T1 (recent), then // promote it to T2 (frequent) if val, ok := c.t1.Peek(key); ok { c.t1.Remove(key) @@ -153,7 +153,7 @@ func (c *ARCCache) Add(key, value interface{}) { // Remove from B2 c.b2.Remove(key) - // Add the key to the frequntly used list + // Add the key to the frequently used list c.t2.Add(key, value) return } @@ -247,7 +247,7 @@ func (c *ARCCache) Contains(key interface{}) bool { // Peek is used to inspect the cache value of a key // without updating recency or frequency. -func (c *ARCCache) Peek(key interface{}) (interface{}, bool) { +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.t1.Peek(key); ok { diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 000000000..2547df979 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go index a6285f989..1cbe04b7d 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -1,6 +1,3 @@ -// This package provides a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru package lru import ( @@ -11,11 +8,11 @@ import ( // Cache is a thread-safe fixed size LRU cache. type Cache struct { - lru *simplelru.LRU + lru simplelru.LRUCache lock sync.RWMutex } -// New creates an LRU of the given size +// New creates an LRU of the given size. func New(size int) (*Cache, error) { return NewWithEvict(size, nil) } @@ -33,7 +30,7 @@ func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) return c, nil } -// Purge is used to completely clear the cache +// Purge is used to completely clear the cache. func (c *Cache) Purge() { c.lock.Lock() c.lru.Purge() @@ -41,48 +38,51 @@ func (c *Cache) Purge() { } // Add adds a value to the cache. Returns true if an eviction occurred. -func (c *Cache) Add(key, value interface{}) bool { +func (c *Cache) Add(key, value interface{}) (evicted bool) { c.lock.Lock() - defer c.lock.Unlock() - return c.lru.Add(key, value) + evicted = c.lru.Add(key, value) + c.lock.Unlock() + return evicted } // Get looks up a key's value from the cache. -func (c *Cache) Get(key interface{}) (interface{}, bool) { +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { c.lock.Lock() - defer c.lock.Unlock() - return c.lru.Get(key) + value, ok = c.lru.Get(key) + c.lock.Unlock() + return value, ok } -// Check if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. func (c *Cache) Contains(key interface{}) bool { c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Contains(key) + containKey := c.lru.Contains(key) + c.lock.RUnlock() + return containKey } -// Returns the key value (or undefined if not found) without updating +// Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. -func (c *Cache) Peek(key interface{}) (interface{}, bool) { +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Peek(key) + value, ok = c.lru.Peek(key) + c.lock.RUnlock() + return value, ok } // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. -func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) { +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { c.lock.Lock() defer c.lock.Unlock() if c.lru.Contains(key) { return true, false - } else { - evict := c.lru.Add(key, value) - return false, evict } + evicted = c.lru.Add(key, value) + return false, evicted } // Remove removes the provided key from the cache. @@ -102,13 +102,15 @@ func (c *Cache) RemoveOldest() { // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *Cache) Keys() []interface{} { c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Keys() + keys := c.lru.Keys() + c.lock.RUnlock() + return keys } // Len returns the number of items in the cache. func (c *Cache) Len() int { c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Len() + length := c.lru.Len() + c.lock.RUnlock() + return length } diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index 68d097a1c..5673773b2 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -36,7 +36,7 @@ func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { return c, nil } -// Purge is used to completely clear the cache +// Purge is used to completely clear the cache. func (c *LRU) Purge() { for k, v := range c.items { if c.onEvict != nil { @@ -47,8 +47,8 @@ func (c *LRU) Purge() { c.evictList.Init() } -// Add adds a value to the cache. Returns true if an eviction occured. -func (c *LRU) Add(key, value interface{}) bool { +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) @@ -78,17 +78,18 @@ func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { return } -// Check if a key is in the cache, without updating the recent-ness +// Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. func (c *LRU) Contains(key interface{}) (ok bool) { _, ok = c.items[key] return ok } -// Returns the key value (or undefined if not found) without updating +// Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { + var ent *list.Element + if ent, ok = c.items[key]; ok { return ent.Value.(*entry).value, true } return nil, ok @@ -96,7 +97,7 @@ func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { // Remove removes the provided key from the cache, returning if the // key was contained. -func (c *LRU) Remove(key interface{}) bool { +func (c *LRU) Remove(key interface{}) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true @@ -105,7 +106,7 @@ func (c *LRU) Remove(key interface{}) bool { } // RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) { +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) @@ -116,7 +117,7 @@ func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) { } // GetOldest returns the oldest entry -func (c *LRU) GetOldest() (interface{}, interface{}, bool) { +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..74c707744 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,36 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/hpcloud/tail/LICENSE.txt new file mode 100644 index 000000000..818d802a5 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/LICENSE.txt @@ -0,0 +1,21 @@ +# The MIT License (MIT) + +# © Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/Licence b/vendor/github.com/hpcloud/tail/ratelimiter/Licence new file mode 100644 index 000000000..434aab19f --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/Licence @@ -0,0 +1,7 @@ +Copyright (C) 2013 99designs + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go new file mode 100644 index 000000000..358b69e7f --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go @@ -0,0 +1,97 @@ +// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends. +package ratelimiter + +import ( + "time" +) + +type LeakyBucket struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time + Now func() time.Time +} + +func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket { + bucket := LeakyBucket{ + Size: size, + Fill: 0, + LeakInterval: leakInterval, + Now: time.Now, + Lastupdate: time.Now(), + } + + return &bucket +} + +func (b *LeakyBucket) updateFill() { + now := b.Now() + if b.Fill > 0 { + elapsed := now.Sub(b.Lastupdate) + + b.Fill -= float64(elapsed) / float64(b.LeakInterval) + if b.Fill < 0 { + b.Fill = 0 + } + } + b.Lastupdate = now +} + +func (b *LeakyBucket) Pour(amount uint16) bool { + b.updateFill() + + var newfill float64 = b.Fill + float64(amount) + + if newfill > float64(b.Size) { + return false + } + + b.Fill = newfill + + return true +} + +// The time at which this bucket will be completely drained +func (b *LeakyBucket) DrainedAt() time.Time { + return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval))) +} + +// The duration until this bucket is completely drained +func (b *LeakyBucket) TimeToDrain() time.Duration { + return b.DrainedAt().Sub(b.Now()) +} + +func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration { + return b.Now().Sub(b.Lastupdate) +} + +type LeakyBucketSer struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time +} + +func (b *LeakyBucket) Serialise() *LeakyBucketSer { + bucket := LeakyBucketSer{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + } + + return &bucket +} + +func (b *LeakyBucketSer) DeSerialise() *LeakyBucket { + bucket := LeakyBucket{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + Now: time.Now, + } + + return &bucket +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go new file mode 100644 index 000000000..8f6a5784a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go @@ -0,0 +1,58 @@ +package ratelimiter + +import ( + "errors" + "time" +) + +const GC_SIZE int = 100 + +type Memory struct { + store map[string]LeakyBucket + lastGCCollected time.Time +} + +func NewMemory() *Memory { + m := new(Memory) + m.store = make(map[string]LeakyBucket) + m.lastGCCollected = time.Now() + return m +} + +func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) { + + bucket, ok := m.store[key] + if !ok { + return nil, errors.New("miss") + } + + return &bucket, nil +} + +func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error { + + if len(m.store) > GC_SIZE { + m.GarbageCollect() + } + + m.store[key] = bucket + + return nil +} + +func (m *Memory) GarbageCollect() { + now := time.Now() + + // rate limit GC to once per minute + if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() { + + for key, bucket := range m.store { + // if the bucket is drained, then GC + if bucket.DrainedAt().Unix() > now.Unix() { + delete(m.store, key) + } + } + + m.lastGCCollected = now + } +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go new file mode 100644 index 000000000..89b2fe882 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go @@ -0,0 +1,6 @@ +package ratelimiter + +type Storage interface { + GetBucketFor(string) (*LeakyBucket, error) + SetBucketFor(string, LeakyBucket) error +} diff --git a/vendor/github.com/hpcloud/tail/tail.go b/vendor/github.com/hpcloud/tail/tail.go new file mode 100644 index 000000000..2d252d605 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail.go @@ -0,0 +1,438 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package tail + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "sync" + "time" + + "github.com/hpcloud/tail/ratelimiter" + "github.com/hpcloud/tail/util" + "github.com/hpcloud/tail/watch" + "gopkg.in/tomb.v1" +) + +var ( + ErrStop = fmt.Errorf("tail should now stop") +) + +type Line struct { + Text string + Time time.Time + Err error // Error from tail +} + +// NewLine returns a Line with present time. +func NewLine(text string) *Line { + return &Line{text, time.Now(), nil} +} + +// SeekInfo represents arguments to `os.Seek` +type SeekInfo struct { + Offset int64 + Whence int // os.SEEK_* +} + +type logger interface { + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + Fatalln(v ...interface{}) + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) + Panicln(v ...interface{}) + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// Config is used to specify how a file must be tailed. +type Config struct { + // File-specifc + Location *SeekInfo // Seek to this location before tailing + ReOpen bool // Reopen recreated files (tail -F) + MustExist bool // Fail early if the file does not exist + Poll bool // Poll for file changes instead of using inotify + Pipe bool // Is a named pipe (mkfifo) + RateLimiter *ratelimiter.LeakyBucket + + // Generic IO + Follow bool // Continue looking for new lines (tail -f) + MaxLineSize int // If non-zero, split longer lines into multiple lines + + // Logger, when nil, is set to tail.DefaultLogger + // To disable logging: set field to tail.DiscardingLogger + Logger logger +} + +type Tail struct { + Filename string + Lines chan *Line + Config + + file *os.File + reader *bufio.Reader + + watcher watch.FileWatcher + changes *watch.FileChanges + + tomb.Tomb // provides: Done, Kill, Dying + + lk sync.Mutex +} + +var ( + // DefaultLogger is used when Config.Logger == nil + DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) + // DiscardingLogger can be used to disable logging output + DiscardingLogger = log.New(ioutil.Discard, "", 0) +) + +// TailFile begins tailing the file. Output stream is made available +// via the `Tail.Lines` channel. To handle errors during tailing, +// invoke the `Wait` or `Err` method after finishing reading from the +// `Lines` channel. +func TailFile(filename string, config Config) (*Tail, error) { + if config.ReOpen && !config.Follow { + util.Fatal("cannot set ReOpen without Follow.") + } + + t := &Tail{ + Filename: filename, + Lines: make(chan *Line), + Config: config, + } + + // when Logger was not specified in config, use default logger + if t.Logger == nil { + t.Logger = log.New(os.Stderr, "", log.LstdFlags) + } + + if t.Poll { + t.watcher = watch.NewPollingFileWatcher(filename) + } else { + t.watcher = watch.NewInotifyFileWatcher(filename) + } + + if t.MustExist { + var err error + t.file, err = OpenFile(t.Filename) + if err != nil { + return nil, err + } + } + + go t.tailFileSync() + + return t, nil +} + +// Return the file's current position, like stdio's ftell(). +// But this value is not very accurate. +// it may readed one line in the chan(tail.Lines), +// so it may lost one line. +func (tail *Tail) Tell() (offset int64, err error) { + if tail.file == nil { + return + } + offset, err = tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return + } + + tail.lk.Lock() + defer tail.lk.Unlock() + if tail.reader == nil { + return + } + + offset -= int64(tail.reader.Buffered()) + return +} + +// Stop stops the tailing activity. +func (tail *Tail) Stop() error { + tail.Kill(nil) + return tail.Wait() +} + +// StopAtEOF stops tailing as soon as the end of the file is reached. +func (tail *Tail) StopAtEOF() error { + tail.Kill(errStopAtEOF) + return tail.Wait() +} + +var errStopAtEOF = errors.New("tail: stop at eof") + +func (tail *Tail) close() { + close(tail.Lines) + tail.closeFile() +} + +func (tail *Tail) closeFile() { + if tail.file != nil { + tail.file.Close() + tail.file = nil + } +} + +func (tail *Tail) reopen() error { + tail.closeFile() + for { + var err error + tail.file, err = OpenFile(tail.Filename) + if err != nil { + if os.IsNotExist(err) { + tail.Logger.Printf("Waiting for %s to appear...", tail.Filename) + if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil { + if err == tomb.ErrDying { + return err + } + return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err) + } + continue + } + return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err) + } + break + } + return nil +} + +func (tail *Tail) readLine() (string, error) { + tail.lk.Lock() + line, err := tail.reader.ReadString('\n') + tail.lk.Unlock() + if err != nil { + // Note ReadString "returns the data read before the error" in + // case of an error, including EOF, so we return it as is. The + // caller is expected to process it if err is EOF. + return line, err + } + + line = strings.TrimRight(line, "\n") + + return line, err +} + +func (tail *Tail) tailFileSync() { + defer tail.Done() + defer tail.close() + + if !tail.MustExist { + // deferred first open. + err := tail.reopen() + if err != nil { + if err != tomb.ErrDying { + tail.Kill(err) + } + return + } + } + + // Seek to requested location on first open of the file. + if tail.Location != nil { + _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) + tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location) + if err != nil { + tail.Killf("Seek error on %s: %s", tail.Filename, err) + return + } + } + + tail.openReader() + + var offset int64 = 0 + var err error + + // Read line by line. + for { + // do not seek in named pipes + if !tail.Pipe { + // grab the position in case we need to back up in the event of a half-line + offset, err = tail.Tell() + if err != nil { + tail.Kill(err) + return + } + } + + line, err := tail.readLine() + + // Process `line` even if err is EOF. + if err == nil { + cooloff := !tail.sendLine(line) + if cooloff { + // Wait a second before seeking till the end of + // file when rate limit is reached. + msg := fmt.Sprintf( + "Too much log activity; waiting a second " + + "before resuming tailing") + tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)} + select { + case <-time.After(time.Second): + case <-tail.Dying(): + return + } + if err := tail.seekEnd(); err != nil { + tail.Kill(err) + return + } + } + } else if err == io.EOF { + if !tail.Follow { + if line != "" { + tail.sendLine(line) + } + return + } + + if tail.Follow && line != "" { + // this has the potential to never return the last line if + // it's not followed by a newline; seems a fair trade here + err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0}) + if err != nil { + tail.Kill(err) + return + } + } + + // When EOF is reached, wait for more data to become + // available. Wait strategy is based on the `tail.watcher` + // implementation (inotify or polling). + err := tail.waitForChanges() + if err != nil { + if err != ErrStop { + tail.Kill(err) + } + return + } + } else { + // non-EOF error + tail.Killf("Error reading %s: %s", tail.Filename, err) + return + } + + select { + case <-tail.Dying(): + if tail.Err() == errStopAtEOF { + continue + } + return + default: + } + } +} + +// waitForChanges waits until the file has been appended, deleted, +// moved or truncated. When moved or deleted - the file will be +// reopened if ReOpen is true. Truncated files are always reopened. +func (tail *Tail) waitForChanges() error { + if tail.changes == nil { + pos, err := tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos) + if err != nil { + return err + } + } + + select { + case <-tail.changes.Modified: + return nil + case <-tail.changes.Deleted: + tail.changes = nil + if tail.ReOpen { + // XXX: we must not log from a library. + tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened %s", tail.Filename) + tail.openReader() + return nil + } else { + tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) + return ErrStop + } + case <-tail.changes.Truncated: + // Always reopen truncated files (Follow is true) + tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename) + tail.openReader() + return nil + case <-tail.Dying(): + return ErrStop + } + panic("unreachable") +} + +func (tail *Tail) openReader() { + if tail.MaxLineSize > 0 { + // add 2 to account for newline characters + tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) + } else { + tail.reader = bufio.NewReader(tail.file) + } +} + +func (tail *Tail) seekEnd() error { + return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END}) +} + +func (tail *Tail) seekTo(pos SeekInfo) error { + _, err := tail.file.Seek(pos.Offset, pos.Whence) + if err != nil { + return fmt.Errorf("Seek error on %s: %s", tail.Filename, err) + } + // Reset the read buffer whenever the file is re-seek'ed + tail.reader.Reset(tail.file) + return nil +} + +// sendLine sends the line(s) to Lines channel, splitting longer lines +// if necessary. Return false if rate limit is reached. +func (tail *Tail) sendLine(line string) bool { + now := time.Now() + lines := []string{line} + + // Split longer lines + if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize { + lines = util.PartitionString(line, tail.MaxLineSize) + } + + for _, line := range lines { + tail.Lines <- &Line{line, now, nil} + } + + if tail.Config.RateLimiter != nil { + ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) + if !ok { + tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n", + tail.Filename) + return false + } + } + + return true +} + +// Cleanup removes inotify watches added by the tail package. This function is +// meant to be invoked from a process's exit handler. Linux kernel may not +// automatically remove inotify watches after the process exits. +func (tail *Tail) Cleanup() { + watch.Cleanup(tail.Filename) +} diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/hpcloud/tail/tail_posix.go new file mode 100644 index 000000000..bc4dc3357 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_posix.go @@ -0,0 +1,11 @@ +// +build linux darwin freebsd netbsd openbsd + +package tail + +import ( + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return os.Open(name) +} diff --git a/vendor/github.com/hpcloud/tail/tail_windows.go b/vendor/github.com/hpcloud/tail/tail_windows.go new file mode 100644 index 000000000..ef2cfca1b --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package tail + +import ( + "github.com/hpcloud/tail/winfile" + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return winfile.OpenFile(name, os.O_RDONLY, 0) +} diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/hpcloud/tail/util/util.go new file mode 100644 index 000000000..54151fe39 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/util/util.go @@ -0,0 +1,48 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package util + +import ( + "fmt" + "log" + "os" + "runtime/debug" +) + +type Logger struct { + *log.Logger +} + +var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} + +// fatal is like panic except it displays only the current goroutine's stack. +func Fatal(format string, v ...interface{}) { + // https://github.com/hpcloud/log/blob/master/log.go#L45 + LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) + os.Exit(1) +} + +// partitionString partitions the string into chunks of given size, +// with the last chunk of variable size. +func PartitionString(s string, chunkSize int) []string { + if chunkSize <= 0 { + panic("invalid chunkSize") + } + length := len(s) + chunks := 1 + length/chunkSize + start := 0 + end := chunkSize + parts := make([]string, 0, chunks) + for { + if end > length { + end = length + } + parts = append(parts, s[start:end]) + if end == length { + break + } + start, end = end, end+chunkSize + } + return parts +} diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/hpcloud/tail/watch/filechanges.go new file mode 100644 index 000000000..3ce5dcecb --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/filechanges.go @@ -0,0 +1,36 @@ +package watch + +type FileChanges struct { + Modified chan bool // Channel to get notified of modifications + Truncated chan bool // Channel to get notified of truncations + Deleted chan bool // Channel to get notified of deletions/renames +} + +func NewFileChanges() *FileChanges { + return &FileChanges{ + make(chan bool), make(chan bool), make(chan bool)} +} + +func (fc *FileChanges) NotifyModified() { + sendOnlyIfEmpty(fc.Modified) +} + +func (fc *FileChanges) NotifyTruncated() { + sendOnlyIfEmpty(fc.Truncated) +} + +func (fc *FileChanges) NotifyDeleted() { + sendOnlyIfEmpty(fc.Deleted) +} + +// sendOnlyIfEmpty sends on a bool channel only if the channel has no +// backlog to be read by other goroutines. This concurrency pattern +// can be used to notify other goroutines if and only if they are +// looking for it (i.e., subsequent notifications can be compressed +// into one). +func sendOnlyIfEmpty(ch chan bool) { + select { + case ch <- true: + default: + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/hpcloud/tail/watch/inotify.go new file mode 100644 index 000000000..4478f1e1a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify.go @@ -0,0 +1,128 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" + "gopkg.in/tomb.v1" +) + +// InotifyFileWatcher uses inotify to monitor file changes. +type InotifyFileWatcher struct { + Filename string + Size int64 +} + +func NewInotifyFileWatcher(filename string) *InotifyFileWatcher { + fw := &InotifyFileWatcher{filepath.Clean(filename), 0} + return fw +} + +func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + err := WatchCreate(fw.Filename) + if err != nil { + return err + } + defer RemoveWatchCreate(fw.Filename) + + // Do a real check now as the file might have been created before + // calling `WatchFlags` above. + if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) { + // file exists, or stat returned an error. + return err + } + + events := Events(fw.Filename) + + for { + select { + case evt, ok := <-events: + if !ok { + return fmt.Errorf("inotify watcher has been closed") + } + evtName, err := filepath.Abs(evt.Name) + if err != nil { + return err + } + fwFilename, err := filepath.Abs(fw.Filename) + if err != nil { + return err + } + if evtName == fwFilename { + return nil + } + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + err := Watch(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + fw.Size = pos + + go func() { + defer RemoveWatch(fw.Filename) + + events := Events(fw.Filename) + + for { + prevSize := fw.Size + + var evt fsnotify.Event + var ok bool + + select { + case evt, ok = <-events: + if !ok { + return + } + case <-t.Dying(): + return + } + + switch { + case evt.Op&fsnotify.Remove == fsnotify.Remove: + fallthrough + + case evt.Op&fsnotify.Rename == fsnotify.Rename: + changes.NotifyDeleted() + return + + case evt.Op&fsnotify.Write == fsnotify.Write: + fi, err := os.Stat(fw.Filename) + if err != nil { + if os.IsNotExist(err) { + changes.NotifyDeleted() + return + } + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + fw.Size = fi.Size() + + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + } else { + changes.NotifyModified() + } + prevSize = fw.Size + } + } + }() + + return changes, nil +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go new file mode 100644 index 000000000..03be4275c --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go @@ -0,0 +1,260 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "log" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" +) + +type InotifyTracker struct { + mux sync.Mutex + watcher *fsnotify.Watcher + chans map[string]chan fsnotify.Event + done map[string]chan bool + watchNums map[string]int + watch chan *watchInfo + remove chan *watchInfo + error chan error +} + +type watchInfo struct { + op fsnotify.Op + fname string +} + +func (this *watchInfo) isCreate() bool { + return this.op == fsnotify.Create +} + +var ( + // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used + shared *InotifyTracker + + // these are used to ensure the shared InotifyTracker is run exactly once + once = sync.Once{} + goRun = func() { + shared = &InotifyTracker{ + mux: sync.Mutex{}, + chans: make(map[string]chan fsnotify.Event), + done: make(map[string]chan bool), + watchNums: make(map[string]int), + watch: make(chan *watchInfo), + remove: make(chan *watchInfo), + error: make(chan error), + } + go shared.run() + } + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +// Watch signals the run goroutine to begin watching the input filename +func Watch(fname string) error { + return watch(&watchInfo{ + fname: fname, + }) +} + +// Watch create signals the run goroutine to begin watching the input filename +// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate +func WatchCreate(fname string) error { + return watch(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func watch(winfo *watchInfo) error { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.watch <- winfo + return <-shared.error +} + +// RemoveWatch signals the run goroutine to remove the watch for the input filename +func RemoveWatch(fname string) { + remove(&watchInfo{ + fname: fname, + }) +} + +// RemoveWatch create signals the run goroutine to remove the watch for the input filename +func RemoveWatchCreate(fname string) { + remove(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func remove(winfo *watchInfo) { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.mux.Lock() + done := shared.done[winfo.fname] + if done != nil { + delete(shared.done, winfo.fname) + close(done) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + shared.watchNums[fname]-- + watchNum := shared.watchNums[fname] + if watchNum == 0 { + delete(shared.watchNums, fname) + } + shared.mux.Unlock() + + // If we were the last ones to watch this file, unsubscribe from inotify. + // This needs to happen after releasing the lock because fsnotify waits + // synchronously for the kernel to acknowledge the removal of the watch + // for this file, which causes us to deadlock if we still held the lock. + if watchNum == 0 { + shared.watcher.Remove(fname) + } + shared.remove <- winfo +} + +// Events returns a channel to which FileEvents corresponding to the input filename +// will be sent. This channel will be closed when removeWatch is called on this +// filename. +func Events(fname string) <-chan fsnotify.Event { + shared.mux.Lock() + defer shared.mux.Unlock() + + return shared.chans[fname] +} + +// Cleanup removes the watch for the input filename if necessary. +func Cleanup(fname string) { + RemoveWatch(fname) +} + +// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating +// a new Watcher if the previous Watcher was closed. +func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { + shared.mux.Lock() + defer shared.mux.Unlock() + + if shared.chans[winfo.fname] == nil { + shared.chans[winfo.fname] = make(chan fsnotify.Event) + shared.done[winfo.fname] = make(chan bool) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + + // already in inotify watch + if shared.watchNums[fname] > 0 { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + return nil + } + + err := shared.watcher.Add(fname) + if err == nil { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + } + return err +} + +// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the +// corresponding events channel. +func (shared *InotifyTracker) removeWatch(winfo *watchInfo) { + shared.mux.Lock() + defer shared.mux.Unlock() + + ch := shared.chans[winfo.fname] + if ch == nil { + return + } + + delete(shared.chans, winfo.fname) + close(ch) + + if !winfo.isCreate() { + return + } + + shared.watchNums[winfo.fname]-- + if shared.watchNums[winfo.fname] == 0 { + delete(shared.watchNums, winfo.fname) + } +} + +// sendEvent sends the input event to the appropriate Tail. +func (shared *InotifyTracker) sendEvent(event fsnotify.Event) { + name := filepath.Clean(event.Name) + + shared.mux.Lock() + ch := shared.chans[name] + done := shared.done[name] + shared.mux.Unlock() + + if ch != nil && done != nil { + select { + case ch <- event: + case <-done: + } + } +} + +// run starts the goroutine in which the shared struct reads events from its +// Watcher's Event channel and sends the events to the appropriate Tail. +func (shared *InotifyTracker) run() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + util.Fatal("failed to create Watcher") + } + shared.watcher = watcher + + for { + select { + case winfo := <-shared.watch: + shared.error <- shared.addWatch(winfo) + + case winfo := <-shared.remove: + shared.removeWatch(winfo) + + case event, open := <-shared.watcher.Events: + if !open { + return + } + shared.sendEvent(event) + + case err, open := <-shared.watcher.Errors: + if !open { + return + } else if err != nil { + sysErr, ok := err.(*os.SyscallError) + if !ok || sysErr.Err != syscall.EINTR { + logger.Printf("Error in Watcher Error channel: %s", err) + } + } + } + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/hpcloud/tail/watch/polling.go new file mode 100644 index 000000000..49491f21d --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/polling.go @@ -0,0 +1,118 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "os" + "runtime" + "time" + + "github.com/hpcloud/tail/util" + "gopkg.in/tomb.v1" +) + +// PollingFileWatcher polls the file for changes. +type PollingFileWatcher struct { + Filename string + Size int64 +} + +func NewPollingFileWatcher(filename string) *PollingFileWatcher { + fw := &PollingFileWatcher{filename, 0} + return fw +} + +var POLL_DURATION time.Duration + +func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + for { + if _, err := os.Stat(fw.Filename); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + select { + case <-time.After(POLL_DURATION): + continue + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + origFi, err := os.Stat(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + var prevModTime time.Time + + // XXX: use tomb.Tomb to cleanly manage these goroutines. replace + // the fatal (below) with tomb's Kill. + + fw.Size = pos + + go func() { + prevSize := fw.Size + for { + select { + case <-t.Dying(): + return + default: + } + + time.Sleep(POLL_DURATION) + fi, err := os.Stat(fw.Filename) + if err != nil { + // Windows cannot delete a file if a handle is still open (tail keeps one open) + // so it gives access denied to anything trying to read it until all handles are released. + if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) { + // File does not exist (has been deleted). + changes.NotifyDeleted() + return + } + + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + + // File got moved/renamed? + if !os.SameFile(origFi, fi) { + changes.NotifyDeleted() + return + } + + // File got truncated? + fw.Size = fi.Size() + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + prevSize = fw.Size + continue + } + // File got bigger? + if prevSize > 0 && prevSize < fw.Size { + changes.NotifyModified() + prevSize = fw.Size + continue + } + prevSize = fw.Size + + // File was appended to (changed)? + modTime := fi.ModTime() + if modTime != prevModTime { + prevModTime = modTime + changes.NotifyModified() + } + } + }() + + return changes, nil +} + +func init() { + POLL_DURATION = 250 * time.Millisecond +} diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/hpcloud/tail/watch/watch.go new file mode 100644 index 000000000..2e1783ef0 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/watch.go @@ -0,0 +1,20 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import "gopkg.in/tomb.v1" + +// FileWatcher monitors file-level events. +type FileWatcher interface { + // BlockUntilExists blocks until the file comes into existence. + BlockUntilExists(*tomb.Tomb) error + + // ChangeEvents reports on changes to a file, be it modification, + // deletion, renames or truncations. Returned FileChanges group of + // channels will be closed, thus become unusable, after a deletion + // or truncation event. + // In order to properly report truncations, ChangeEvents requires + // the caller to pass their current offset in the file. + ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error) +} diff --git a/vendor/github.com/hpcloud/tail/winfile/winfile.go b/vendor/github.com/hpcloud/tail/winfile/winfile.go new file mode 100644 index 000000000..aa7e7bc5d --- /dev/null +++ b/vendor/github.com/hpcloud/tail/winfile/winfile.go @@ -0,0 +1,92 @@ +// +build windows + +package winfile + +import ( + "os" + "syscall" + "unsafe" +) + +// issue also described here +//https://codereview.appspot.com/8203043/ + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218 +func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211 +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133 +func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) { + r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm)) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61 +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= syscall.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= syscall.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= syscall.S_ISVTX + } + // No mapping for Go's ModeTemporary (plan9 only). + return +} diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412b..000000000 --- a/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index b13a50ed1..000000000 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907..000000000 --- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index d1cefa871..000000000 --- a/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,222 +0,0 @@ -# Mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -[![GoDoc][3]][4] -[![GoCard][5]][6] -[![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge - -### Latest release - -[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4). - -### Important note - -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. - -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) -Donate using Liberapay - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - -## Installation - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v2 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransfomer struct { -} - -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 6ea38e636..3f5afa83a 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -72,6 +72,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { + config.overwriteWithEmptyValue = true srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 706b22069..f8de6c543 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -9,6 +9,7 @@ package mergo import ( + "fmt" "reflect" ) @@ -25,9 +26,10 @@ func hasExportedField(dst reflect.Value) (exported bool) { } type Config struct { - Overwrite bool - AppendSlice bool - Transformers Transformers + Overwrite bool + AppendSlice bool + Transformers Transformers + overwriteWithEmptyValue bool } type Transformers interface { @@ -39,6 +41,8 @@ type Transformers interface { // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite + overwriteWithEmptySrc := config.overwriteWithEmptyValue + config.overwriteWithEmptyValue = false if !src.IsValid() { return @@ -73,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } @@ -124,15 +128,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { dstSlice = srcSlice } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) } dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { continue } @@ -147,9 +154,12 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !dst.CanSet() { break } - if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { dst.Set(src) } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } dst.Set(reflect.AppendSlice(dst, src)) } case reflect.Ptr: @@ -184,7 +194,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co return } default: - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d177..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/jimstudt/http-authentication/basic/README.md b/vendor/github.com/jimstudt/http-authentication/basic/README.md deleted file mode 100644 index 96a2b0cf0..000000000 --- a/vendor/github.com/jimstudt/http-authentication/basic/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# auth/htpasswd [![GoDoc](https://godoc.org/github.com/jimstudt/http-authentication/basic?status.png)](http://godoc.org/github.com/jimstudt/http-authentication/basic) - -Authenticate using Apache-style htpasswd files and HTTP Basic Authentication. - -`htpasswd` has supported a number of different password hashing schemes over the -decades. Most common and sane ones are supported directly. For some you will need -to add another package. - -| Style | Status | -|-------|--------| -| plain | yes+ | -| md5 | yes | -| sha | yes | -| crypt | no (conflicts with plain) | -| bcrypt | no (can add with another package) | - -The standard set of systems will use *Plain*, *Sha*, and *MD5* systems while filtering out *bcrypt*. -Because of its complexity, *bcrypt* will be in another package which you can import and -add if you need it. *Plain* accepts both Apache style plain text and nginx style where the -password is preceded by {PLAIN}. - -## Usage - -~~~ go -import ( - "github.com/codegangsta/martini" - "github.com/jimstudt/http-authentication/basic" - "log" -) - -func main() { - m := martini.Classic() - - pw,err := basic.New("My Realm", "./my-htpasswd-file", htpasswd.DefaultSystems, nil) - if ( err != nil) { - log.Fatalf("Unable to read my htpassword file: %s", err.Error()) - } - - // authenticate every request - m.Use( pw.ServeHTTP) - - // You will also want to call pw.Reload(nil) to reprocess the password file when it changes. - - // You can use pw.ReloadOn( syscall.SIGHUP, nil ) to make it automatically - // reload on a HUP signal. - - // And those 'nil' arguments are where you pass a function to be notified about illegally - // formatted entries, or unsupported hash systems. See the API documents. - - // If you only want to authenticate some requests, then it goes like this... - // m.Get("/secure/thing", pw.ServeHTTP, myRealHandler) - // ... if pw.ServeHTTP does the 401 then your handler will not be called - - m.Run() - -} -~~~ - -## API Documentation - -The API is documented using godoc and also available at [godoc.org](http://godoc.org/github.com/jimstudt/http-authentication/basic) -~~~ - - - diff --git a/vendor/github.com/jinzhu/gorm/.codeclimate.yml b/vendor/github.com/jinzhu/gorm/.codeclimate.yml deleted file mode 100644 index 51aba50cb..000000000 --- a/vendor/github.com/jinzhu/gorm/.codeclimate.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -engines: - gofmt: - enabled: true - govet: - enabled: true - golint: - enabled: true -ratings: - paths: - - "**.go" diff --git a/vendor/github.com/jinzhu/gorm/.gitignore b/vendor/github.com/jinzhu/gorm/.gitignore deleted file mode 100644 index 01dc5ce07..000000000 --- a/vendor/github.com/jinzhu/gorm/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -documents -_book diff --git a/vendor/github.com/jinzhu/gorm/README.md b/vendor/github.com/jinzhu/gorm/README.md deleted file mode 100644 index 0c5c7ea6c..000000000 --- a/vendor/github.com/jinzhu/gorm/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# GORM - -The fantastic ORM library for Golang, aims to be developer friendly. - -[![go report card](https://goreportcard.com/badge/github.com/jinzhu/gorm "go report card")](https://goreportcard.com/report/github.com/jinzhu/gorm) -[![wercker status](https://app.wercker.com/status/8596cace912c9947dd9c8542ecc8cb8b/s/master "wercker status")](https://app.wercker.com/project/byKey/8596cace912c9947dd9c8542ecc8cb8b) -[![Join the chat at https://gitter.im/jinzhu/gorm](https://img.shields.io/gitter/room/jinzhu/gorm.svg)](https://gitter.im/jinzhu/gorm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Open Collective Backer](https://opencollective.com/gorm/tiers/backer/badge.svg?label=backer&color=brightgreen "Open Collective Backer")](https://opencollective.com/gorm) -[![Open Collective Sponsor](https://opencollective.com/gorm/tiers/sponsor/badge.svg?label=sponsor&color=brightgreen "Open Collective Sponsor")](https://opencollective.com/gorm) -[![MIT license](http://img.shields.io/badge/license-MIT-brightgreen.svg)](http://opensource.org/licenses/MIT) -[![GoDoc](https://godoc.org/github.com/jinzhu/gorm?status.svg)](https://godoc.org/github.com/jinzhu/gorm) - -## Overview - -* Full-Featured ORM (almost) -* Associations (Has One, Has Many, Belongs To, Many To Many, Polymorphism) -* Hooks (Before/After Create/Save/Update/Delete/Find) -* Preloading (eager loading) -* Transactions -* Composite Primary Key -* SQL Builder -* Auto Migrations -* Logger -* Extendable, write Plugins based on GORM callbacks -* Every feature comes with tests -* Developer Friendly - -## Getting Started - -* GORM Guides [http://gorm.io](http://gorm.io) - -## Contributing - -[You can help to deliver a better GORM, check out things you can do](http://gorm.io/contribute.html) - -## License - -© Jinzhu, 2013~time.Now - -Released under the [MIT License](https://github.com/jinzhu/gorm/blob/master/License) diff --git a/vendor/github.com/jinzhu/gorm/docker-compose.yml b/vendor/github.com/jinzhu/gorm/docker-compose.yml deleted file mode 100644 index 79bf5fc39..000000000 --- a/vendor/github.com/jinzhu/gorm/docker-compose.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: '3' - -services: - mysql: - image: 'mysql:latest' - ports: - - 9910:3306 - environment: - - MYSQL_DATABASE=gorm - - MYSQL_USER=gorm - - MYSQL_PASSWORD=gorm - - MYSQL_RANDOM_ROOT_PASSWORD="yes" - postgres: - image: 'postgres:latest' - ports: - - 9920:5432 - environment: - - POSTGRES_USER=gorm - - POSTGRES_DB=gorm - - POSTGRES_PASSWORD=gorm - mssql: - image: 'mcmoe/mssqldocker:latest' - ports: - - 9930:1433 - environment: - - ACCEPT_EULA=Y - - SA_PASSWORD=LoremIpsum86 - - MSSQL_DB=gorm - - MSSQL_USER=gorm - - MSSQL_PASSWORD=LoremIpsum86 diff --git a/vendor/github.com/jinzhu/gorm/test_all.sh b/vendor/github.com/jinzhu/gorm/test_all.sh deleted file mode 100755 index 5cfb3321a..000000000 --- a/vendor/github.com/jinzhu/gorm/test_all.sh +++ /dev/null @@ -1,5 +0,0 @@ -dialects=("postgres" "mysql" "mssql" "sqlite") - -for dialect in "${dialects[@]}" ; do - DEBUG=false GORM_DIALECT=${dialect} go test -done diff --git a/vendor/github.com/jinzhu/gorm/wercker.yml b/vendor/github.com/jinzhu/gorm/wercker.yml deleted file mode 100644 index 0c3e73ef1..000000000 --- a/vendor/github.com/jinzhu/gorm/wercker.yml +++ /dev/null @@ -1,148 +0,0 @@ -# use the default golang container from Docker Hub -box: golang - -services: - - name: mariadb - id: mariadb:latest - env: - MYSQL_DATABASE: gorm - MYSQL_USER: gorm - MYSQL_PASSWORD: gorm - MYSQL_RANDOM_ROOT_PASSWORD: "yes" - - name: mysql57 - id: mysql:5.7 - env: - MYSQL_DATABASE: gorm - MYSQL_USER: gorm - MYSQL_PASSWORD: gorm - MYSQL_RANDOM_ROOT_PASSWORD: "yes" - - name: mysql56 - id: mysql:5.6 - env: - MYSQL_DATABASE: gorm - MYSQL_USER: gorm - MYSQL_PASSWORD: gorm - MYSQL_RANDOM_ROOT_PASSWORD: "yes" - - name: mysql55 - id: mysql:5.5 - env: - MYSQL_DATABASE: gorm - MYSQL_USER: gorm - MYSQL_PASSWORD: gorm - MYSQL_RANDOM_ROOT_PASSWORD: "yes" - - name: postgres - id: postgres:latest - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: postgres96 - id: postgres:9.6 - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: postgres95 - id: postgres:9.5 - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: postgres94 - id: postgres:9.4 - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: postgres93 - id: postgres:9.3 - env: - POSTGRES_USER: gorm - POSTGRES_PASSWORD: gorm - POSTGRES_DB: gorm - - name: mssql - id: mcmoe/mssqldocker:latest - env: - ACCEPT_EULA: Y - SA_PASSWORD: LoremIpsum86 - MSSQL_DB: gorm - MSSQL_USER: gorm - MSSQL_PASSWORD: LoremIpsum86 - -# The steps that will be executed in the build pipeline -build: - # The steps that will be executed on build - steps: - # Sets the go workspace and places you package - # at the right place in the workspace tree - - setup-go-workspace - - # Gets the dependencies - - script: - name: go get - code: | - cd $WERCKER_SOURCE_DIR - go version - go get -t ./... - - # Build the project - - script: - name: go build - code: | - go build ./... - - # Test the project - - script: - name: test sqlite - code: | - go test ./... - - - script: - name: test mariadb - code: | - GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mariadb:3306)/gorm?charset=utf8&parseTime=True" go test ./... - - - script: - name: test mysql5.7 - code: | - GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql57:3306)/gorm?charset=utf8&parseTime=True" go test ./... - - - script: - name: test mysql5.6 - code: | - GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql56:3306)/gorm?charset=utf8&parseTime=True" go test ./... - - - script: - name: test mysql5.5 - code: | - GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql55:3306)/gorm?charset=utf8&parseTime=True" go test ./... - - - script: - name: test postgres - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... - - - script: - name: test postgres96 - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres96 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... - - - script: - name: test postgres95 - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres95 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... - - - script: - name: test postgres94 - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres94 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... - - - script: - name: test postgres93 - code: | - GORM_DIALECT=postgres GORM_DSN="host=postgres93 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... - - - script: - name: test mssql - code: | - GORM_DIALECT=mssql GORM_DSN="sqlserver://gorm:LoremIpsum86@mssql:1433?database=gorm" go test ./... diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md deleted file mode 100644 index a3de33614..000000000 --- a/vendor/github.com/jinzhu/inflection/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Inflection - -Inflection pluralizes and singularizes English nouns - -[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930) - -## Basic Usage - -```go -inflection.Plural("person") => "people" -inflection.Plural("Person") => "People" -inflection.Plural("PERSON") => "PEOPLE" -inflection.Plural("bus") => "buses" -inflection.Plural("BUS") => "BUSES" -inflection.Plural("Bus") => "Buses" - -inflection.Singular("people") => "person" -inflection.Singular("People") => "Person" -inflection.Singular("PEOPLE") => "PERSON" -inflection.Singular("buses") => "bus" -inflection.Singular("BUSES") => "BUS" -inflection.Singular("Buses") => "Bus" - -inflection.Plural("FancyPerson") => "FancyPeople" -inflection.Singular("FancyPeople") => "FancyPerson" -``` - -## Register Rules - -Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) - -If you want to register more rules, follow: - -``` -inflection.AddUncountable("fish") -inflection.AddIrregular("person", "people") -inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" -inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" -``` - -## Contributing - -You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do. - -## Author - -**jinzhu** - -* -* -* - -## License - -Released under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/jinzhu/inflection/wercker.yml b/vendor/github.com/jinzhu/inflection/wercker.yml deleted file mode 100644 index 5e6ce981d..000000000 --- a/vendor/github.com/jinzhu/inflection/wercker.yml +++ /dev/null @@ -1,23 +0,0 @@ -box: golang - -build: - steps: - - setup-go-workspace - - # Gets the dependencies - - script: - name: go get - code: | - go get - - # Build the project - - script: - name: go build - code: | - go build ./... - - # Test the project - - script: - name: go test - code: | - go test ./... diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE new file mode 100644 index 000000000..e7ddd51be --- /dev/null +++ b/vendor/github.com/joho/godotenv/LICENCE @@ -0,0 +1,23 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go new file mode 100644 index 000000000..29b436c77 --- /dev/null +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -0,0 +1,346 @@ +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "sort" + "strings" +) + +const doubleQuoteSpecialChars = "\\\n\r\"!$`" + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (envMap map[string]string, err error) { + envMap = make(map[string]string) + + var lines []string + scanner := bufio.NewScanner(r) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if err = scanner.Err(); err != nil { + return + } + + for _, fullLine := range lines { + if !isIgnoredLine(fullLine) { + var key, value string + key, value, err = parseLine(fullLine, envMap) + + if err != nil { + return + } + envMap[key] = value + } + } + return +} + +//Unmarshal reads an env file from a string, returning a map of keys and values. +func Unmarshal(str string) (envMap map[string]string, err error) { + return Parse(strings.NewReader(str)) +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + Load(filenames...) + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +// Write serializes the given environment and writes it to a file +func Write(envMap map[string]string, filename string) error { + content, error := Marshal(envMap) + if error != nil { + return error + } + file, error := os.Create(filename) + if error != nil { + return error + } + _, err := file.WriteString(content) + return err +} + +// Marshal outputs the given environment as a dotenv-formatted environment file. +// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. +func Marshal(envMap map[string]string) (string, error) { + lines := make([]string, 0, len(envMap)) + for k, v := range envMap { + lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) + } + sort.Strings(lines) + return strings.Join(lines, "\n"), nil +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + return Parse(file) +} + +func parseLine(line string, envMap map[string]string) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + firstEquals := strings.Index(line, "=") + firstColon := strings.Index(line, ":") + splitString := strings.SplitN(line, "=", 2) + if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { + //this is a yaml-style line + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("Can't separate key from value") + return + } + + // Parse the key + key = splitString[0] + if strings.HasPrefix(key, "export") { + key = strings.TrimPrefix(key, "export") + } + key = strings.Trim(key, " ") + + // Parse the value + value = parseValue(splitString[1], envMap) + return +} + +func parseValue(value string, envMap map[string]string) string { + + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values or possible escapes + if len(value) > 1 { + rs := regexp.MustCompile(`\A'(.*)'\z`) + singleQuotes := rs.FindStringSubmatch(value) + + rd := regexp.MustCompile(`\A"(.*)"\z`) + doubleQuotes := rd.FindStringSubmatch(value) + + if singleQuotes != nil || doubleQuotes != nil { + // pull the quotes off the edges + value = value[1 : len(value)-1] + } + + if doubleQuotes != nil { + // expand newlines + escapeRegex := regexp.MustCompile(`\\.`) + value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { + c := strings.TrimPrefix(match, `\`) + switch c { + case "n": + return "\n" + case "r": + return "\r" + default: + return match + } + }) + // unescape characters + e := regexp.MustCompile(`\\([^$])`) + value = e.ReplaceAllString(value, "$1") + } + + if singleQuotes == nil { + value = expandVariables(value, envMap) + } + } + + return value +} + +func expandVariables(v string, m map[string]string) string { + r := regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) + + return r.ReplaceAllStringFunc(v, func(s string) string { + submatch := r.FindStringSubmatch(s) + + if submatch == nil { + return s + } + if submatch[1] == "\\" || submatch[2] == "(" { + return submatch[0][1:] + } else if submatch[4] != "" { + return m[submatch[4]] + } + return s + }) +} + +func isIgnoredLine(line string) bool { + trimmedLine := strings.Trim(line, " \n\t") + return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") +} + +func doubleQuoteEscape(line string) string { + for _, c := range doubleQuoteSpecialChars { + toReplace := "\\" + string(c) + if c == '\n' { + toReplace = `\n` + } + if c == '\r' { + toReplace = `\r` + } + line = strings.Replace(line, string(c), toReplace, -1) + } + return line +} diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml deleted file mode 100644 index 955dc0be5..000000000 --- a/vendor/github.com/json-iterator/go/.codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -ignore: - - "output_tests/.*" - diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore deleted file mode 100644 index 15556530a..000000000 --- a/vendor/github.com/json-iterator/go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/vendor -/bug_test.go -/coverage.txt -/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml deleted file mode 100644 index 449e67cd0..000000000 --- a/vendor/github.com/json-iterator/go/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.8.x - - 1.x - -before_install: - - go get -t -v ./... - -script: - - ./test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock deleted file mode 100644 index c8a9fbb38..000000000 --- a/vendor/github.com/json-iterator/go/Gopkg.lock +++ /dev/null @@ -1,21 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - -[[projects]] - name = "github.com/modern-go/reflect2" - packages = ["."] - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml deleted file mode 100644 index 313a0f887..000000000 --- a/vendor/github.com/json-iterator/go/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - -ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] - -[[constraint]] - name = "github.com/modern-go/reflect2" - version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md deleted file mode 100644 index 50d56ffbf..000000000 --- a/vendor/github.com/json-iterator/go/README.md +++ /dev/null @@ -1,87 +0,0 @@ -[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) -[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) -[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) -[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) -[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) -[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) - -A high-performance 100% compatible drop-in replacement of "encoding/json" - -You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) - -# Benchmark - -![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) - -Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go - -Raw Result (easyjson requires static code generation) - -| | ns/op | allocation bytes | allocation times | -| --- | --- | --- | --- | -| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | -| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | -| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | -| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | -| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | -| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | - -Always benchmark with your own workload. -The result depends heavily on the data input. - -# Usage - -100% compatibility with standard lib - -Replace - -```go -import "encoding/json" -json.Marshal(&data) -``` - -with - -```go -import "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary -json.Marshal(&data) -``` - -Replace - -```go -import "encoding/json" -json.Unmarshal(input, &data) -``` - -with - -```go -import "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary -json.Unmarshal(input, &data) -``` - -[More documentation](http://jsoniter.com/migrate-from-go-std.html) - -# How to get - -``` -go get github.com/json-iterator/go -``` - -# Contribution Welcomed ! - -Contributors - -* [thockin](https://github.com/thockin) -* [mattn](https://github.com/mattn) -* [cch123](https://github.com/cch123) -* [Oleg Shaldybin](https://github.com/olegshaldybin) -* [Jason Toffaletti](https://github.com/toffaletti) - -Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh deleted file mode 100755 index b45ef6883..000000000 --- a/vendor/github.com/json-iterator/go/build.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e -set -x - -if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then - mkdir -p /tmp/build-golang/src/github.com/json-iterator - ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go -fi -export GOPATH=/tmp/build-golang -go get -u github.com/golang/dep/cmd/dep -cd /tmp/build-golang/src/github.com/json-iterator/go -exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md deleted file mode 100644 index 3095662b0..000000000 --- a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md +++ /dev/null @@ -1,7 +0,0 @@ -| json type \ dest type | bool | int | uint | float |string| -| --- | --- | --- | --- |--|--| -| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| -| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| -| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| -| object | true | 0 | 0 |0|originnal json| -| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/test.sh b/vendor/github.com/json-iterator/go/test.sh deleted file mode 100755 index f4e7c0b2c..000000000 --- a/vendor/github.com/json-iterator/go/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -coverprofile=profile.out -coverpkg=github.com/json-iterator/go $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/kiali/kiali/LICENSE b/vendor/github.com/kiali/kiali/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/kiali/kiali/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kiali/kiali/business/apps.go b/vendor/github.com/kiali/kiali/business/apps.go new file mode 100644 index 000000000..468b93cc2 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/apps.go @@ -0,0 +1,216 @@ +package business + +import ( + "fmt" + "sync" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus" + "github.com/kiali/kiali/prometheus/internalmetrics" +) + +// AppService deals with fetching Workloads group by "app" label, which will be identified as an "application" +type AppService struct { + prom prometheus.ClientInterface + k8s kubernetes.IstioClientInterface +} + +// Temporal map of Workloads group by app label +type appsWorkload map[string][]*models.Workload + +// Helper method to build a map of workloads for a given labelSelector +func (in *AppService) fetchWorkloadsPerApp(namespace, labelSelector string) (appsWorkload, error) { + cfg := config.Get() + + ws, err := fetchWorkloads(in.k8s, namespace, labelSelector) + if err != nil { + return nil, err + } + + apps := make(appsWorkload) + for _, w := range ws { + if appLabel, ok := w.Labels[cfg.IstioLabels.AppLabelName]; ok { + apps[appLabel] = append(apps[appLabel], w) + } + } + return apps, nil +} + +// GetAppList is the API handler to fetch the list of applications in a given namespace +func (in *AppService) GetAppList(namespace string) (models.AppList, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "AppService", "GetAppList") + defer promtimer.ObserveNow(&err) + + appList := &models.AppList{ + Namespace: models.Namespace{Name: namespace}, + Apps: []models.AppListItem{}, + } + apps, err := fetchNamespaceApps(in.k8s, namespace, "") + if err != nil { + return *appList, err + } + + for keyApp, valueApp := range apps { + appItem := &models.AppListItem{Name: keyApp} + appItem.IstioSidecar = false + if len(valueApp.Workloads) > 0 { + appItem.IstioSidecar = true + } + for _, w := range valueApp.Workloads { + appItem.IstioSidecar = appItem.IstioSidecar && w.Pods.HasIstioSideCar() + } + (*appList).Apps = append((*appList).Apps, *appItem) + } + + return *appList, nil +} + +// GetApp is the API handler to fetch the details for a given namespace and app name +func (in *AppService) GetApp(namespace string, appName string) (models.App, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "AppService", "GetApp") + defer promtimer.ObserveNow(&err) + + appInstance := &models.App{Namespace: models.Namespace{Name: namespace}, Name: appName} + namespaceApps, err := fetchNamespaceApps(in.k8s, namespace, appName) + if err != nil { + return *appInstance, err + } + + var appDetails *appDetails + var ok bool + // Send a NewNotFound if the app is not found in the deployment list, instead to send an empty result + if appDetails, ok = namespaceApps[appName]; !ok { + return *appInstance, kubernetes.NewNotFound(appName, "Kiali", "App") + } + + (*appInstance).Workloads = make([]models.WorkloadItem, len(appDetails.Workloads)) + for i, wkd := range appDetails.Workloads { + wkdSvc := &models.WorkloadItem{WorkloadName: wkd.Name} + wkdSvc.IstioSidecar = wkd.Pods.HasIstioSideCar() + (*appInstance).Workloads[i] = *wkdSvc + } + + (*appInstance).ServiceNames = make([]string, len(appDetails.Services)) + for i, svc := range appDetails.Services { + (*appInstance).ServiceNames[i] = svc.Name + } + + in.fillCustomDashboardRefs(namespace, appInstance, appDetails) + + return *appInstance, nil +} + +// AppDetails holds Services and Workloads having the same "app" label +type appDetails struct { + app string + Services []v1.Service + Workloads models.Workloads +} + +// NamespaceApps is a map of app_name x AppDetails +type namespaceApps = map[string]*appDetails + +func castAppDetails(services []v1.Service, ws models.Workloads) namespaceApps { + allEntities := make(namespaceApps) + appLabel := config.Get().IstioLabels.AppLabelName + for _, service := range services { + if app, ok := service.Spec.Selector[appLabel]; ok { + if appEntities, ok := allEntities[app]; ok { + appEntities.Services = append(appEntities.Services, service) + } else { + allEntities[app] = &appDetails{ + app: app, + Services: []v1.Service{service}, + } + } + } + } + for _, w := range ws { + if app, ok := w.Labels[appLabel]; ok { + if appEntities, ok := allEntities[app]; ok { + appEntities.Workloads = append(appEntities.Workloads, w) + } else { + allEntities[app] = &appDetails{ + app: app, + Workloads: models.Workloads{w}, + } + } + } + } + return allEntities +} + +// Helper method to fetch all applications for a given namespace. +// Optionally if appName parameter is provided, it filters apps for that name. +// Return an error on any problem. +func fetchNamespaceApps(k8s kubernetes.IstioClientInterface, namespace string, appName string) (namespaceApps, error) { + var services []v1.Service + var ws models.Workloads + cfg := config.Get() + + labelSelector := cfg.IstioLabels.AppLabelName + if appName != "" { + labelSelector = fmt.Sprintf("%s=%s", cfg.IstioLabels.AppLabelName, appName) + } + + wg := sync.WaitGroup{} + wg.Add(2) + errChan := make(chan error, 2) + + go func() { + defer wg.Done() + var err error + services, err = k8s.GetServices(namespace, nil) + if appName != "" { + selector := labels.Set(map[string]string{cfg.IstioLabels.AppLabelName: appName}).AsSelector() + services = kubernetes.FilterServicesForSelector(selector, services) + } + if err != nil { + log.Errorf("Error fetching Services per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + ws, err = fetchWorkloads(k8s, namespace, labelSelector) + if err != nil { + log.Errorf("Error fetching Workload per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + wg.Wait() + if len(errChan) != 0 { + err := <-errChan + return nil, err + } + + return castAppDetails(services, ws), nil +} + +// fillCustomDashboardRefs finds all dashboard IDs and Titles associated to this app and add them to the model +func (in *AppService) fillCustomDashboardRefs(namespace string, app *models.App, details *appDetails) { + allPods := models.Pods{} + for _, workload := range details.Workloads { + allPods = append(allPods, workload.Pods...) + } + uniqueRefsList := getUniqueRuntimes(allPods) + mon, err := kubernetes.NewKialiMonitoringClient() + if err != nil { + // Do not fail the whole query, just log & return + log.Error("Cannot initialize Kiali Monitoring Client") + return + } + dash := NewDashboardsService(mon, in.prom) + app.Runtimes = dash.buildRuntimesList(namespace, uniqueRefsList) +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/checker.go b/vendor/github.com/kiali/kiali/business/checkers/checker.go new file mode 100644 index 000000000..7d33e0b1a --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/checker.go @@ -0,0 +1,11 @@ +package checkers + +import "github.com/kiali/kiali/models" + +type Checker interface { + Check() ([]*models.IstioCheck, bool) +} + +type GroupChecker interface { + Check() models.IstioValidations +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/destination_rules_checker.go b/vendor/github.com/kiali/kiali/business/checkers/destination_rules_checker.go new file mode 100644 index 000000000..47408e687 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/destination_rules_checker.go @@ -0,0 +1,27 @@ +package checkers + +import ( + "github.com/kiali/kiali/business/checkers/destinationrules" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +type DestinationRulesChecker struct { + DestinationRules []kubernetes.IstioObject + MTLSDetails kubernetes.MTLSDetails +} + +func (in DestinationRulesChecker) Check() models.IstioValidations { + validations := models.IstioValidations{} + + enabledDRCheckers := []GroupChecker{ + destinationrules.MultiMatchChecker{DestinationRules: in.DestinationRules}, + destinationrules.TrafficPolicyChecker{DestinationRules: in.DestinationRules, MTLSDetails: in.MTLSDetails}, + } + + for _, checker := range enabledDRCheckers { + validations = validations.MergeValidations(checker.Check()) + } + + return validations +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/destinationrules/multi_match_checker.go b/vendor/github.com/kiali/kiali/business/checkers/destinationrules/multi_match_checker.go new file mode 100644 index 000000000..ff75ef964 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/destinationrules/multi_match_checker.go @@ -0,0 +1,150 @@ +package destinationrules + +import ( + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +const DestinationRulesCheckerType = "destinationrule" + +type MultiMatchChecker struct { + DestinationRules []kubernetes.IstioObject +} + +type subset struct { + Name string + RuleName string +} + +// Check validates that no two destinationRules target the same host+subset combination +func (m MultiMatchChecker) Check() models.IstioValidations { + validations := models.IstioValidations{} + + // Equality search is: [fqdn][subset] + seenHostSubsets := make(map[string]map[string]string) + + for _, dr := range m.DestinationRules { + if host, ok := dr.GetSpec()["host"]; ok { + destinationRulesName := dr.GetObjectMeta().Name + if dHost, ok := host.(string); ok { + fqdn := kubernetes.ParseHost(dHost, dr.GetObjectMeta().Namespace, dr.GetObjectMeta().ClusterName) + + // Skip DR validation if it enables mTLS either namespace or mesh-wide + if isNonLocalmTLSForServiceEnabled(dr, fqdn.Service) { + continue + } + + foundSubsets := extractSubsets(dr, destinationRulesName) + + if fqdn.Service == "*" { + // We need to check the matching subsets from all hosts now + for _, h := range seenHostSubsets { + checkCollisions(validations, destinationRulesName, foundSubsets, h) + } + // We add * later + } + // Search "*" first and then exact name + if previous, found := seenHostSubsets["*"]; found { + // Need to check subsets of "*" + checkCollisions(validations, destinationRulesName, foundSubsets, previous) + } + + if previous, found := seenHostSubsets[fqdn.Service]; found { + // Host found, need to check underlying subsets + checkCollisions(validations, destinationRulesName, foundSubsets, previous) + } + // Nothing threw an error, so add these + if _, found := seenHostSubsets[fqdn.Service]; !found { + seenHostSubsets[fqdn.Service] = make(map[string]string) + } + for _, s := range foundSubsets { + seenHostSubsets[fqdn.Service][s.Name] = destinationRulesName + } + } + } + } + + return validations +} + +func isNonLocalmTLSForServiceEnabled(dr kubernetes.IstioObject, service string) bool { + return service == "*" && ismTLSEnabled(dr) +} + +func ismTLSEnabled(dr kubernetes.IstioObject) bool { + if trafficPolicy, trafficPresent := dr.GetSpec()["trafficPolicy"]; trafficPresent { + if trafficCasted, ok := trafficPolicy.(map[string]interface{}); ok { + if tls, found := trafficCasted["tls"]; found { + if tlsCasted, ok := tls.(map[string]interface{}); ok { + if mode, found := tlsCasted["mode"]; found { + if modeCasted, ok := mode.(string); ok { + return modeCasted == "ISTIO_MUTUAL" + } + } + } + } + } + } + return false +} + +func extractSubsets(dr kubernetes.IstioObject, destinationRulesName string) []subset { + if subsets, found := dr.GetSpec()["subsets"]; found { + if subsetSlice, ok := subsets.([]interface{}); ok { + foundSubsets := make([]subset, 0, len(subsetSlice)) + for _, se := range subsetSlice { + if element, ok := se.(map[string]interface{}); ok { + if name, found := element["name"]; found { + if n, ok := name.(string); ok { + foundSubsets = append(foundSubsets, subset{n, destinationRulesName}) + } + } + } + } + return foundSubsets + } + } + // Matches all the subsets:~ + return []subset{subset{"~", destinationRulesName}} +} + +func checkCollisions(validations models.IstioValidations, destinationRulesName string, foundSubsets []subset, existing map[string]string) { + // If current subset is ~ + if len(foundSubsets) == 1 && foundSubsets[0].Name == "~" { + // This should match any subset in the same hostname + for _, v := range existing { + addError(validations, []string{destinationRulesName, v}) + } + } + + // If we have existing subset with ~ + if ruleName, found := existing["~"]; found { + addError(validations, []string{destinationRulesName, ruleName}) + } + + for _, s := range foundSubsets { + if ruleName, found := existing[s.Name]; found { + addError(validations, []string{destinationRulesName, ruleName}) + } + } +} + +func addError(validations models.IstioValidations, destinationRuleNames []string) models.IstioValidations { + for _, destinationRuleName := range destinationRuleNames { + key := models.IstioValidationKey{Name: destinationRuleName, ObjectType: DestinationRulesCheckerType} + checks := models.Build("destinationrules.multimatch", "spec/host") + rrValidation := &models.IstioValidation{ + Name: destinationRuleName, + ObjectType: DestinationRulesCheckerType, + Valid: true, + Checks: []*models.IstioCheck{ + &checks, + }, + } + + if _, exists := validations[key]; !exists { + validations.MergeValidations(models.IstioValidations{key: rrValidation}) + } + } + return validations +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/destinationrules/no_dest_checker.go b/vendor/github.com/kiali/kiali/business/checkers/destinationrules/no_dest_checker.go new file mode 100644 index 000000000..94afe1491 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/destinationrules/no_dest_checker.go @@ -0,0 +1,116 @@ +package destinationrules + +import ( + "strconv" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +type NoDestinationChecker struct { + Namespace string + WorkloadList models.WorkloadList + DestinationRule kubernetes.IstioObject + ServiceEntries map[string][]string + ServiceNames []string +} + +// Check parses the DestinationRule definitions and verifies that they point to an existing service, including any subset definitions +func (n NoDestinationChecker) Check() ([]*models.IstioCheck, bool) { + valid := true + validations := make([]*models.IstioCheck, 0) + + if host, ok := n.DestinationRule.GetSpec()["host"]; ok { + if dHost, ok := host.(string); ok { + fqdn := kubernetes.ParseHost(dHost, n.DestinationRule.GetObjectMeta().Namespace, n.DestinationRule.GetObjectMeta().ClusterName) + if !n.hasMatchingService(fqdn.Service, dHost) { + validation := models.Build("destinationrules.nodest.matchingworkload", "spec/host") + validations = append(validations, &validation) + valid = false + } + if subsets, ok := n.DestinationRule.GetSpec()["subsets"]; ok { + if dSubsets, ok := subsets.([]interface{}); ok { + // Check that each subset has a matching workload somewhere.. + for i, subset := range dSubsets { + if innerSubset, ok := subset.(map[string]interface{}); ok { + if labels, ok := innerSubset["labels"]; ok { + if dLabels, ok := labels.(map[string]interface{}); ok { + stringLabels := make(map[string]string, len(dLabels)) + for k, v := range dLabels { + if s, ok := v.(string); ok { + stringLabels[k] = s + } + } + if !n.hasMatchingWorkload(fqdn.Service, stringLabels) { + validation := models.Build("destinationrules.nodest.subsetlabels", + "spec/subsets["+strconv.Itoa(i)+"]") + validations = append(validations, &validation) + valid = false + } + } + } + } + } + + } + } + } + } + + return validations, valid +} + +func (n NoDestinationChecker) hasMatchingWorkload(service string, labels map[string]string) bool { + appLabel := config.Get().IstioLabels.AppLabelName + + // Check wildcard hosts + if service == "*" { + return true + } + + // Check workloads + for _, wl := range n.WorkloadList.Workloads { + if service == wl.Labels[appLabel] { + valid := true + for k, v := range labels { + wlv, found := wl.Labels[k] + if !found || wlv != v { + valid = false + break + } + } + if valid { + return true + } + } + } + return false +} + +func (n NoDestinationChecker) hasMatchingService(service, origHost string) bool { + appLabel := config.Get().IstioLabels.AppLabelName + + // Check wildcard hosts + if service == "*" { + return true + } + + // Check Workloads + for _, wl := range n.WorkloadList.Workloads { + if service == wl.Labels[appLabel] { + return true + } + } + // Check ServiceNames + for _, s := range n.ServiceNames { + if service == s { + return true + } + } + // Check ServiceEntries + if _, found := n.ServiceEntries[origHost]; found { + return true + } + return false +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/destinationrules/traffic_policy_checker.go b/vendor/github.com/kiali/kiali/business/checkers/destinationrules/traffic_policy_checker.go new file mode 100644 index 000000000..ef50d66b9 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/destinationrules/traffic_policy_checker.go @@ -0,0 +1,103 @@ +package destinationrules + +import ( + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +type TrafficPolicyChecker struct { + DestinationRules []kubernetes.IstioObject + MTLSDetails kubernetes.MTLSDetails +} + +func (t TrafficPolicyChecker) Check() models.IstioValidations { + validations := models.IstioValidations{} + + // When mTLS is not enabled, there is no validation to be added. + if !t.isNonLocalmTLSEnabled() { + return validations + } + + // Check whether DRs override mTLS. + for _, dr := range t.DestinationRules { + if !hasTrafficPolicy(dr) || !hasTLSSettings(dr) { + check := models.Build("destinationrules.trafficpolicy.notlssettings", "spec/trafficPolicy") + key := models.BuildKey(DestinationRulesCheckerType, dr.GetObjectMeta().Name) + validation := buildDestinationRuleValidation(dr, check, true) + + if _, exists := validations[key]; !exists { + validations.MergeValidations(models.IstioValidations{key: validation}) + } + } + } + + return validations +} + +func (t TrafficPolicyChecker) isNonLocalmTLSEnabled() bool { + for _, dr := range t.MTLSDetails.DestinationRules { + if host, ok := dr.GetSpec()["host"]; ok { + if dHost, ok := host.(string); ok { + fqdn := kubernetes.ParseHost(dHost, dr.GetObjectMeta().Namespace, dr.GetObjectMeta().ClusterName) + if isNonLocalmTLSForServiceEnabled(dr, fqdn.Service) { + return true + } + } + } + } + return false +} + +func hasTrafficPolicy(dr kubernetes.IstioObject) bool { + _, trafficPresent := dr.GetSpec()["trafficPolicy"] + return trafficPresent +} + +func hasTLSSettings(dr kubernetes.IstioObject) bool { + return hasTrafficPolicyTLS(dr) || hasPortTLS(dr) +} + +// hasPortTLS returns true when there is one port that specifies any TLS settings +func hasPortTLS(dr kubernetes.IstioObject) bool { + if trafficPolicy, trafficPresent := dr.GetSpec()["trafficPolicy"]; trafficPresent { + if trafficCasted, ok := trafficPolicy.(map[string]interface{}); ok { + if portsSettings, found := trafficCasted["portLevelSettings"]; found { + if portsSettingsCasted, ok := portsSettings.([]interface{}); ok { + for _, portSettings := range portsSettingsCasted { + if portSettingsCasted, ok := portSettings.(map[string]interface{}); ok { + if _, found := portSettingsCasted["tls"]; found { + return true + } + } + } + } + } + } + } + return false +} + +// hasTrafficPolicyTLS returns true when there is a trafficPolicy specifying any tls mode +func hasTrafficPolicyTLS(dr kubernetes.IstioObject) bool { + if trafficPolicy, trafficPresent := dr.GetSpec()["trafficPolicy"]; trafficPresent { + if trafficCasted, ok := trafficPolicy.(map[string]interface{}); ok { + if _, found := trafficCasted["tls"]; found { + return true + } + } + } + return false +} + +func buildDestinationRuleValidation(dr kubernetes.IstioObject, checks models.IstioCheck, valid bool) *models.IstioValidation { + validation := &models.IstioValidation{ + Name: dr.GetObjectMeta().Name, + ObjectType: DestinationRulesCheckerType, + Valid: valid, + Checks: []*models.IstioCheck{ + &checks, + }, + } + + return validation +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/gateway_checker.go b/vendor/github.com/kiali/kiali/business/checkers/gateway_checker.go new file mode 100644 index 000000000..0d6164310 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/gateway_checker.go @@ -0,0 +1,51 @@ +package checkers + +import ( + "github.com/kiali/kiali/business/checkers/gateways" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +const GatewayCheckerType = "gateway" + +type GatewayChecker struct { + GatewaysPerNamespace [][]kubernetes.IstioObject + Namespace string +} + +// Check runs checks for the all namespaces actions as well as for the single namespace validations +func (g GatewayChecker) Check() models.IstioValidations { + // Multinamespace checkers + validations := gateways.MultiMatchChecker{ + GatewaysPerNamespace: g.GatewaysPerNamespace, + }.Check() + + // Single namespace + for _, nssGw := range g.GatewaysPerNamespace { + for _, gw := range nssGw { + if gw.GetObjectMeta().Namespace == g.Namespace { + validations.MergeValidations(runSingleChecks(gw)) + } + } + } + + return validations +} + +func runSingleChecks(gw kubernetes.IstioObject) models.IstioValidations { + validations := models.IstioValidations{} + checks, valid := gateways.PortChecker{ + Gateway: gw, + }.Check() + + if !valid { + key := models.IstioValidationKey{ObjectType: GatewayCheckerType, Name: gw.GetObjectMeta().Name} + validations[key] = &models.IstioValidation{ + Name: key.Name, + ObjectType: key.ObjectType, + Checks: checks, + Valid: valid, + } + } + return validations +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/gateways/multi_match_checker.go b/vendor/github.com/kiali/kiali/business/checkers/gateways/multi_match_checker.go new file mode 100644 index 000000000..e7d8b5d09 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/gateways/multi_match_checker.go @@ -0,0 +1,135 @@ +package gateways + +import ( + "regexp" + "strconv" + "strings" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +type MultiMatchChecker struct { + GatewaysPerNamespace [][]kubernetes.IstioObject + existingList []Host +} + +const ( + GatewayCheckerType = "gateway" + wildCardMatch = "*" +) + +type Host struct { + Port int + Hostname string + ServerIndex int + HostIndex int + GatewayRuleName string +} + +// Check validates that no two gateways share the same host+port combination +func (m MultiMatchChecker) Check() models.IstioValidations { + validations := models.IstioValidations{} + m.existingList = make([]Host, 0) + + for _, nsG := range m.GatewaysPerNamespace { + for _, g := range nsG { + gatewayRuleName := g.GetObjectMeta().Name + if specServers, found := g.GetSpec()["servers"]; found { + if servers, ok := specServers.([]interface{}); ok { + for i, def := range servers { + if serverDef, ok := def.(map[string]interface{}); ok { + hosts := parsePortAndHostnames(serverDef) + for hi, host := range hosts { + host.ServerIndex = i + host.HostIndex = hi + host.GatewayRuleName = gatewayRuleName + duplicate, dhosts := m.findMatch(host) + if duplicate { + validations = addError(validations, gatewayRuleName, i, hi) + for _, dh := range dhosts { + validations = addError(validations, dh.GatewayRuleName, dh.ServerIndex, dh.HostIndex) + } + } + m.existingList = append(m.existingList, host) + } + } + } + } + } + } + } + return validations +} + +func addError(validations models.IstioValidations, gatewayRuleName string, serverIndex, hostIndex int) models.IstioValidations { + key := models.IstioValidationKey{Name: gatewayRuleName, ObjectType: GatewayCheckerType} + checks := models.Build("gateways.multimatch", + "spec/servers["+strconv.Itoa(serverIndex)+"]/hosts["+strconv.Itoa(hostIndex)+"]") + rrValidation := &models.IstioValidation{ + Name: gatewayRuleName, + ObjectType: GatewayCheckerType, + Valid: true, + Checks: []*models.IstioCheck{ + &checks, + }, + } + + if _, exists := validations[key]; !exists { + validations.MergeValidations(models.IstioValidations{key: rrValidation}) + } + return validations +} + +func parsePortAndHostnames(serverDef map[string]interface{}) []Host { + var port int + if portDef, found := serverDef["port"]; found { + if ports, ok := portDef.(map[string]interface{}); ok { + if numberDef, found := ports["number"]; found { + if portNumber, ok := numberDef.(int64); ok { + port = int(portNumber) + } + } + } + } + + if hostDef, found := serverDef["hosts"]; found { + if hostnames, ok := hostDef.([]interface{}); ok { + hosts := make([]Host, 0, len(hostnames)) + for _, hostinterface := range hostnames { + if hostname, ok := hostinterface.(string); ok { + hosts = append(hosts, Host{ + Port: port, + Hostname: hostname, + }) + } + } + return hosts + } + } + return nil +} + +// findMatch uses a linear search with regexp to check for matching gateway host + port combinations. If this becomes a bottleneck for performance, replace with a graph or trie algorithm. +func (m MultiMatchChecker) findMatch(host Host) (bool, []Host) { + duplicates := make([]Host, 0) + for _, h := range m.existingList { + if h.Port == host.Port { + // wildcardMatches will always match + if host.Hostname == wildCardMatch || h.Hostname == wildCardMatch { + duplicates = append(duplicates, h) + break + } + + // Either one could include wildcards, so we need to check both ways and fix "*" -> ".*" for regexp engine + current := strings.Replace(host.Hostname, "*", ".*", -1) + previous := strings.Replace(h.Hostname, "*", ".*", -1) + + if regexp.MustCompile(current).MatchString(previous) || regexp.MustCompile(previous).MatchString(current) { + duplicates = append(duplicates, h) + break + } + } + } + return len(duplicates) > 0, duplicates +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/gateways/port_checker.go b/vendor/github.com/kiali/kiali/business/checkers/gateways/port_checker.go new file mode 100644 index 000000000..51f32dbe4 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/gateways/port_checker.go @@ -0,0 +1,34 @@ +package gateways + +import ( + "fmt" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +type PortChecker struct { + Gateway kubernetes.IstioObject +} + +func (p PortChecker) Check() ([]*models.IstioCheck, bool) { + validations := make([]*models.IstioCheck, 0) + + if serversSpec, found := p.Gateway.GetSpec()["servers"]; found { + if servers, ok := serversSpec.([]interface{}); ok { + for serverIndex, server := range servers { + if serverDef, ok := server.(map[string]interface{}); ok { + if portDef, found := serverDef["port"]; found { + if !kubernetes.ValidatePort(portDef) { + validation := models.Build("port.name.mismatch", + fmt.Sprintf("spec/servers[%d]/port/name", serverIndex)) + validations = append(validations, &validation) + } + } + } + } + } + } + + return validations, len(validations) == 0 +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/no_service_checker.go b/vendor/github.com/kiali/kiali/business/checkers/no_service_checker.go new file mode 100644 index 000000000..4528594c4 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/no_service_checker.go @@ -0,0 +1,109 @@ +package checkers + +import ( + "github.com/kiali/kiali/business/checkers/destinationrules" + "github.com/kiali/kiali/business/checkers/virtual_services" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + v1 "k8s.io/api/core/v1" +) + +type NoServiceChecker struct { + Namespace string + IstioDetails *kubernetes.IstioDetails + Services []v1.Service + WorkloadList models.WorkloadList + GatewaysPerNamespace [][]kubernetes.IstioObject +} + +func (in NoServiceChecker) Check() models.IstioValidations { + validations := models.IstioValidations{} + + if in.IstioDetails == nil || in.Services == nil { + return validations + } + + log.Infof("ServiceEntries: %v\n", in.IstioDetails.ServiceEntries) + + serviceNames := getServiceNames(in.Services) + serviceHosts := kubernetes.ServiceEntryHostnames(in.IstioDetails.ServiceEntries) + gatewayNames := kubernetes.GatewayNames(in.GatewaysPerNamespace) + + for _, virtualService := range in.IstioDetails.VirtualServices { + validations.MergeValidations(runVirtualServiceCheck(virtualService, in.Namespace, serviceNames, serviceHosts)) + validations.MergeValidations(runGatewayCheck(virtualService, gatewayNames)) + } + for _, destinationRule := range in.IstioDetails.DestinationRules { + validations.MergeValidations(runDestinationRuleCheck(destinationRule, in.Namespace, in.WorkloadList, serviceNames, serviceHosts)) + } + + return validations +} + +func runVirtualServiceCheck(virtualService kubernetes.IstioObject, namespace string, serviceNames []string, serviceHosts map[string][]string) models.IstioValidations { + result, valid := virtual_services.NoHostChecker{ + Namespace: namespace, + ServiceNames: serviceNames, + VirtualService: virtualService, + ServiceEntryHosts: serviceHosts, + }.Check() + + istioObjectName := virtualService.GetObjectMeta().Name + key := models.IstioValidationKey{ObjectType: "virtualservice", Name: istioObjectName} + vsvalidations := models.IstioValidations{} + vsvalidations[key] = &models.IstioValidation{ + Name: istioObjectName, + ObjectType: "virtualservice", + Checks: result, + Valid: valid, + } + return vsvalidations +} + +func runGatewayCheck(virtualService kubernetes.IstioObject, gatewayNames map[string]struct{}) models.IstioValidations { + result, valid := virtual_services.NoGatewayChecker{ + VirtualService: virtualService, + GatewayNames: gatewayNames, + }.Check() + + istioObjectName := virtualService.GetObjectMeta().Name + key := models.IstioValidationKey{ObjectType: "virtualservice", Name: istioObjectName} + vsvalidations := models.IstioValidations{} + vsvalidations[key] = &models.IstioValidation{ + Name: istioObjectName, + ObjectType: "virtualservice", + Checks: result, + Valid: valid, + } + return vsvalidations +} + +func runDestinationRuleCheck(destinationRule kubernetes.IstioObject, namespace string, workloads models.WorkloadList, serviceNames []string, serviceHosts map[string][]string) models.IstioValidations { + result, valid := destinationrules.NoDestinationChecker{ + Namespace: namespace, + WorkloadList: workloads, + DestinationRule: destinationRule, + ServiceNames: serviceNames, + ServiceEntries: serviceHosts, + }.Check() + + istioObjectName := destinationRule.GetObjectMeta().Name + key := models.IstioValidationKey{ObjectType: "destinationrule", Name: istioObjectName} + drvalidations := models.IstioValidations{} + drvalidations[key] = &models.IstioValidation{ + Name: istioObjectName, + ObjectType: "destinationrule", + Checks: result, + Valid: valid, + } + return drvalidations +} + +func getServiceNames(services []v1.Service) []string { + serviceNames := make([]string, 0) + for _, item := range services { + serviceNames = append(serviceNames, item.Name) + } + return serviceNames +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/virtual_service_checker.go b/vendor/github.com/kiali/kiali/business/checkers/virtual_service_checker.go new file mode 100644 index 000000000..cdbfb8b9d --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/virtual_service_checker.go @@ -0,0 +1,80 @@ +package checkers + +import ( + "github.com/kiali/kiali/business/checkers/virtual_services" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +const VirtualCheckerType = "virtualservice" + +type VirtualServiceChecker struct { + Namespace string + DestinationRules []kubernetes.IstioObject + VirtualServices []kubernetes.IstioObject +} + +// An Object Checker runs all checkers for an specific object type (i.e.: pod, route rule,...) +// It run two kinds of checkers: +// 1. Individual checks: validating individual objects. +// 2. Group checks: validating behaviour between configurations. +func (in VirtualServiceChecker) Check() models.IstioValidations { + validations := models.IstioValidations{} + + validations = validations.MergeValidations(in.runIndividualChecks()) + validations = validations.MergeValidations(in.runGroupChecks()) + + return validations +} + +// Runs individual checks for each virtual service +func (in VirtualServiceChecker) runIndividualChecks() models.IstioValidations { + validations := models.IstioValidations{} + + for _, virtualService := range in.VirtualServices { + validations.MergeValidations(in.runChecks(virtualService)) + } + + return validations +} + +// runGroupChecks runs group checks for all virtual services +func (in VirtualServiceChecker) runGroupChecks() models.IstioValidations { + validations := models.IstioValidations{} + + enabledCheckers := []GroupChecker{ + virtual_services.SingleHostChecker{Namespace: in.Namespace, VirtualServices: in.VirtualServices}, + } + + for _, checker := range enabledCheckers { + validations = validations.MergeValidations(checker.Check()) + } + + return validations +} + +// runChecks runs all the individual checks for a single virtual service and appends the result into validations. +func (in VirtualServiceChecker) runChecks(virtualService kubernetes.IstioObject) models.IstioValidations { + virtualServiceName := virtualService.GetObjectMeta().Name + key := models.IstioValidationKey{Name: virtualServiceName, ObjectType: VirtualCheckerType} + rrValidation := &models.IstioValidation{ + Name: virtualServiceName, + ObjectType: VirtualCheckerType, + Valid: true, + // Explicitly create an empty array as 0-values do not appear in json + Checks: []*models.IstioCheck{}, + } + + enabledCheckers := []Checker{ + virtual_services.RouteChecker{Route: virtualService}, + virtual_services.SubsetPresenceChecker{Namespace: in.Namespace, DestinationRules: in.DestinationRules, VirtualService: virtualService}, + } + + for _, checker := range enabledCheckers { + checks, validChecker := checker.Check() + rrValidation.Checks = append(rrValidation.Checks, checks...) + rrValidation.Valid = rrValidation.Valid && validChecker + } + + return models.IstioValidations{key: rrValidation} +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/virtual_services/no_gateway_checker.go b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/no_gateway_checker.go new file mode 100644 index 000000000..be9d5b4a8 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/no_gateway_checker.go @@ -0,0 +1,27 @@ +package virtual_services + +import ( + "strconv" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +type NoGatewayChecker struct { + VirtualService kubernetes.IstioObject + GatewayNames map[string]struct{} +} + +// Check validates that all the VirtualServices are pointing to an existing Gateway +func (s NoGatewayChecker) Check() ([]*models.IstioCheck, bool) { + validations := make([]*models.IstioCheck, 0) + + valid, index := kubernetes.ValidateVirtualServiceGateways(s.VirtualService.GetSpec(), s.GatewayNames, s.VirtualService.GetObjectMeta().Namespace, s.VirtualService.GetObjectMeta().ClusterName) + if !valid { + path := "spec/gateways[" + strconv.Itoa(index) + "]" + validation := models.Build("virtualservices.nogateway", path) + validations = append(validations, &validation) + } + + return validations, valid +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/virtual_services/no_host_checker.go b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/no_host_checker.go new file mode 100644 index 000000000..8d8eeb232 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/no_host_checker.go @@ -0,0 +1,79 @@ +package virtual_services + +import ( + "fmt" + "strings" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +type NoHostChecker struct { + Namespace string + ServiceNames []string + VirtualService kubernetes.IstioObject + ServiceEntryHosts map[string][]string +} + +func (n NoHostChecker) Check() ([]*models.IstioCheck, bool) { + validations := make([]*models.IstioCheck, 0) + + routeProtocols := []string{"http", "tcp", "tls"} + countOfDefinedProtocols := 0 + for _, protocol := range routeProtocols { + if prot, ok := n.VirtualService.GetSpec()[protocol]; ok { + countOfDefinedProtocols++ + if aHttp, ok := prot.([]interface{}); ok { + for k, httpRoute := range aHttp { + if mHttpRoute, ok := httpRoute.(map[string]interface{}); ok { + if route, ok := mHttpRoute["route"]; ok { + if aDestinationWeight, ok := route.([]interface{}); ok { + for i, destination := range aDestinationWeight { + if !n.checkDestination(destination, protocol) { + validation := models.Build("virtualservices.nohost.hostnotfound", + fmt.Sprintf("spec/%s[%d]/route[%d]/destination/host", protocol, k, i)) + validations = append(validations, &validation) + } + } + } + } + } + } + } + } + } + + if countOfDefinedProtocols < 1 { + validation := models.Build("virtualservices.nohost.invalidprotocol", "") + validations = append(validations, &validation) + } + + return validations, len(validations) == 0 +} + +func (n NoHostChecker) checkDestination(destination interface{}, protocol string) bool { + if mDestination, ok := destination.(map[string]interface{}); ok { + if destinationW, ok := mDestination["destination"]; ok { + if mDestinationW, ok := destinationW.(map[string]interface{}); ok { + if host, ok := mDestinationW["host"]; ok { + if sHost, ok := host.(string); ok { + for _, service := range n.ServiceNames { + if kubernetes.FilterByHost(sHost, service, n.Namespace) { + return true + } + } + if protocols, found := n.ServiceEntryHosts[sHost]; found { + // We have ServiceEntry to check + for _, prot := range protocols { + if prot == strings.ToLower(protocol) { + return true + } + } + } + } + } + } + } + } + return false +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/virtual_services/route_checker.go b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/route_checker.go new file mode 100644 index 000000000..65c471a8b --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/route_checker.go @@ -0,0 +1,114 @@ +package virtual_services + +import ( + "fmt" + "reflect" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/util/intutil" +) + +type RouteChecker struct { + Route kubernetes.IstioObject +} + +// Check returns both an array of IstioCheck and a boolean indicating if the current route rule is valid. +// The array of IstioChecks contains the result of running the following validations: +// 1. All weights with a numeric number. +// 2. All weights have value between 0 and 100. +// 3. Sum of all weights are 100 (if only one weight, then it assumes that is 100). +// 4. All the route has to have weight label. +func (route RouteChecker) Check() ([]*models.IstioCheck, bool) { + checks, valid := make([]*models.IstioCheck, 0), true + protocols := []string{"http", "tcp", "tls"} + + for _, protocol := range protocols { + cs, v := route.checkRoutesFor(protocol) + checks = append(checks, cs...) + valid = valid && v + } + + return checks, valid +} + +func (route RouteChecker) checkRoutesFor(kind string) ([]*models.IstioCheck, bool) { + validations := make([]*models.IstioCheck, 0) + weightSum, weightCount, valid := 0, 0, true + + http := route.Route.GetSpec()[kind] + if http == nil { + return validations, valid + } + + // Getting a []HTTPRoute + slice := reflect.ValueOf(http) + if slice.Kind() != reflect.Slice { + return validations, valid + } + + for routeIdx := 0; routeIdx < slice.Len(); routeIdx++ { + route, ok := slice.Index(routeIdx).Interface().(map[string]interface{}) + if !ok || route["route"] == nil { + continue + } + + weightCount, weightSum = 0, 0 + + // Getting a []DestinationWeight + destinationWeights := reflect.ValueOf(route["route"]) + if destinationWeights.Kind() != reflect.Slice { + return validations, valid + } + + for destWeightIdx := 0; destWeightIdx < destinationWeights.Len(); destWeightIdx++ { + destinationWeight, ok := destinationWeights.Index(destWeightIdx).Interface().(map[string]interface{}) + if !ok || destinationWeight["weight"] == nil { + continue + } + + weightCount = weightCount + 1 + weight, err := intutil.Convert(destinationWeight["weight"]) + if err != nil { + valid = false + path := fmt.Sprintf("spec/%s[%d]/route[%d]/weight/%s", + kind, routeIdx, destWeightIdx, destinationWeight["weight"]) + validation := buildValidation("virtualservices.route.numericweight", path) + validations = append(validations, &validation) + } + + if weight > 100 || weight < 0 { + valid = false + path := fmt.Sprintf("spec/%s[%d]/route[%d]/weight/%d", + kind, routeIdx, destWeightIdx, weight) + validation := buildValidation("virtualservices.route.weightrange", path) + validations = append(validations, &validation) + } + + weightSum = weightSum + weight + } + + if weightCount > 0 && weightSum != 100 { + valid = false + path := fmt.Sprintf("spec/%s[%d]/route", kind, routeIdx) + validation := buildValidation("virtualservices.route.weightsum", path) + validations = append(validations, &validation) + if weightCount != destinationWeights.Len() { + valid = false + path := fmt.Sprintf("spec/%s[%d]/route", kind, routeIdx) + validation := buildValidation("virtualservices.route.allweightspresent", path) + validations = append(validations, &validation) + } + } + } + + return validations, valid +} + +func buildValidation(checkId string, path string) models.IstioCheck { + validation := models.Build(checkId, path) + log.Infof("%s Galley should be performing this validation but it isn't. "+ + "Make sure Galley is fully working.", checkId) + return validation +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/virtual_services/single_host_checker.go b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/single_host_checker.go new file mode 100644 index 000000000..724bd3f07 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/single_host_checker.go @@ -0,0 +1,151 @@ +package virtual_services + +import ( + "reflect" + "strings" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +type SingleHostChecker struct { + Namespace string + VirtualServices []kubernetes.IstioObject +} + +type Host struct { + Service string + Namespace string + Cluster string +} + +func (in SingleHostChecker) Check() models.IstioValidations { + hostCounter := make(map[string]map[string]map[string][]*kubernetes.IstioObject) + validations := models.IstioValidations{} + + for _, vs := range in.VirtualServices { + for _, host := range getHost(vs) { + storeHost(hostCounter, vs, host) + } + } + + for _, clusterCounter := range hostCounter { + for _, namespaceCounter := range clusterCounter { + isNamespaceWildcard := len(namespaceCounter["*"]) > 0 + for _, serviceCounter := range namespaceCounter { + targetSameHost := len(serviceCounter) > 1 + otherServiceHosts := len(namespaceCounter) > 1 + for _, virtualService := range serviceCounter { + // Marking virtualService as invalid if: + // - there is more than one virtual service per a host + // - there is one virtual service with wildcard and there are other virtual services pointing + // a host for that namespace + if targetSameHost || isNamespaceWildcard && otherServiceHosts { + if !hasGateways(virtualService) { + multipleVirtualServiceCheck(*virtualService, validations) + } + } + } + } + } + } + + return validations +} + +func multipleVirtualServiceCheck(virtualService kubernetes.IstioObject, validations models.IstioValidations) { + virtualServiceName := virtualService.GetObjectMeta().Name + key := models.IstioValidationKey{Name: virtualServiceName, ObjectType: "virtualservice"} + checks := models.Build("virtualservices.singlehost", "spec/hosts") + rrValidation := &models.IstioValidation{ + Name: virtualServiceName, + ObjectType: "virtualservice", + Valid: true, + Checks: []*models.IstioCheck{ + &checks, + }, + } + + validations.MergeValidations(models.IstioValidations{key: rrValidation}) +} + +func storeHost(hostCounter map[string]map[string]map[string][]*kubernetes.IstioObject, vs kubernetes.IstioObject, host Host) { + vsList := []*kubernetes.IstioObject{&vs} + + if hostCounter[host.Cluster] == nil { + hostCounter[host.Cluster] = map[string]map[string][]*kubernetes.IstioObject{ + host.Namespace: { + host.Service: vsList, + }, + } + } else if hostCounter[host.Cluster][host.Namespace] == nil { + hostCounter[host.Cluster][host.Namespace] = map[string][]*kubernetes.IstioObject{ + host.Service: vsList, + } + } else if _, ok := hostCounter[host.Cluster][host.Namespace][host.Service]; !ok { + hostCounter[host.Cluster][host.Namespace][host.Service] = vsList + } else { + hostCounter[host.Cluster][host.Namespace][host.Service] = append(hostCounter[host.Cluster][host.Namespace][host.Service], &vs) + + } +} + +func getHost(virtualService kubernetes.IstioObject) []Host { + hosts := virtualService.GetSpec()["hosts"] + if hosts == nil { + return []Host{} + } + + slice := reflect.ValueOf(hosts) + if slice.Kind() != reflect.Slice { + return []Host{} + } + + targetHosts := make([]Host, 0, slice.Len()) + + for hostIdx := 0; hostIdx < slice.Len(); hostIdx++ { + hostName, ok := slice.Index(hostIdx).Interface().(string) + if !ok { + continue + } + + targetHosts = append(targetHosts, formatHostForSearch(hostName, virtualService.GetObjectMeta().Namespace)) + } + + return targetHosts +} + +// Convert host to Host struct for searching +// e.g. reviews -> reviews, virtualService.Namespace, svc.cluster.local +// e.g. reviews.bookinfo.svc.cluster.local -> reviews, bookinfo, svc.cluster.local +// e.g. *.bookinfo.svc.cluster.local -> *, bookinfo, svc.cluster.local +// e.g. * -> *, *, * +func formatHostForSearch(hostName, virtualServiceNamespace string) Host { + domainParts := strings.Split(hostName, ".") + host := Host{} + + host.Service = domainParts[0] + if len(domainParts) > 1 { + host.Namespace = domainParts[1] + + if len(domainParts) > 2 { + host.Cluster = strings.Join(domainParts[2:], ".") + } + } else if host.Service != "*" { + host.Namespace = virtualServiceNamespace + host.Cluster = "svc.cluster.local" + } else if host.Service == "*" { + host.Namespace = "*" + host.Cluster = "*" + } + + return host +} + +func hasGateways(virtualService *kubernetes.IstioObject) bool { + if gateways, ok := (*virtualService).GetSpec()["gateways"]; ok { + vsGateways, ok := (gateways).([]interface{}) + return ok && vsGateways != nil && len(vsGateways) > 0 + } + return false +} diff --git a/vendor/github.com/kiali/kiali/business/checkers/virtual_services/subset_presence_checker.go b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/subset_presence_checker.go new file mode 100644 index 000000000..67faca2ce --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/checkers/virtual_services/subset_presence_checker.go @@ -0,0 +1,140 @@ +package virtual_services + +import ( + "fmt" + "reflect" + "strings" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" +) + +type SubsetPresenceChecker struct { + Namespace string + DestinationRules []kubernetes.IstioObject + VirtualService kubernetes.IstioObject +} + +func (checker SubsetPresenceChecker) Check() ([]*models.IstioCheck, bool) { + valid := true + validations := make([]*models.IstioCheck, 0) + + protocols := [3]string{"http", "tcp", "tls"} + for _, protocol := range protocols { + specProtocol := checker.VirtualService.GetSpec()[protocol] + if specProtocol == nil { + continue + } + + // Getting a []HTTPRoute, []TLSRoute, []TCPRoute + slice := reflect.ValueOf(specProtocol) + if slice.Kind() != reflect.Slice { + continue + } + + for routeIdx := 0; routeIdx < slice.Len(); routeIdx++ { + httpRoute, ok := slice.Index(routeIdx).Interface().(map[string]interface{}) + if !ok || httpRoute["route"] == nil { + continue + } + + // Getting a []DestinationWeight + destinationWeights := reflect.ValueOf(httpRoute["route"]) + if destinationWeights.Kind() != reflect.Slice { + return validations, valid + } + + for destWeightIdx := 0; destWeightIdx < destinationWeights.Len(); destWeightIdx++ { + destinationWeight, ok := destinationWeights.Index(destWeightIdx).Interface().(map[string]interface{}) + if !ok || destinationWeight["destination"] == nil { + valid = false + path := fmt.Sprintf("spec/%s[%d]/route[%d]", protocol, routeIdx, destWeightIdx) + validation := models.Build("virtualservices.subsetpresent.destinationmandatory", path) + validations = append(validations, &validation) + continue + } + + destination, ok := destinationWeight["destination"].(map[string]interface{}) + if !ok { + continue + } + + host, ok := destination["host"].(string) + if !ok { + continue + } + + subset, ok := destination["subset"].(string) + if !ok { + continue + } + + if !checker.subsetPresent(host, subset) { + path := fmt.Sprintf("spec/%s[%d]/route[%d]/destination", protocol, routeIdx, destWeightIdx) + validation := models.Build("virtualservices.subsetpresent.subsetnotfound", path) + validations = append(validations, &validation) + } + } + } + } + + return validations, valid +} + +func (checker SubsetPresenceChecker) subsetPresent(host string, subset string) bool { + destinationRule, ok := checker.getDestinationRule(host) + if !ok || destinationRule == nil { + return false + } + + return hasSubsetDefined(destinationRule, subset) +} + +func (checker SubsetPresenceChecker) getDestinationRule(virtualServiceHost string) (kubernetes.IstioObject, bool) { + for _, destinationRule := range checker.DestinationRules { + host, ok := destinationRule.GetSpec()["host"] + if !ok { + continue + } + + sHost, ok := host.(string) + if !ok { + continue + } + + domainParts := strings.Split(sHost, ".") + serviceName := domainParts[0] + namespace := checker.Namespace + if len(domainParts) > 1 { + namespace = domainParts[1] + } + + if kubernetes.FilterByHost(virtualServiceHost, serviceName, namespace) { + return destinationRule, true + } + } + + return nil, false +} + +func hasSubsetDefined(destinationRule kubernetes.IstioObject, subsetTarget string) bool { + if subsets, ok := destinationRule.GetSpec()["subsets"]; ok { + if dSubsets, ok := subsets.([]interface{}); ok { + for _, subset := range dSubsets { + if innerSubset, ok := subset.(map[string]interface{}); ok { + if sSubsetName := innerSubset["name"]; ok { + subsetName := sSubsetName.(string) + if subsetName == subsetTarget { + if labels, ok := innerSubset["labels"]; ok { + if _, ok := labels.(map[string]interface{}); ok { + return true + } + } + } + } + } + } + } + } + return false +} diff --git a/vendor/github.com/kiali/kiali/business/dashboards.go b/vendor/github.com/kiali/kiali/business/dashboards.go new file mode 100644 index 000000000..68544c7da --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/dashboards.go @@ -0,0 +1,232 @@ +package business + +import ( + "fmt" + "strings" + "sync" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus" +) + +// DashboardsService deals with fetching dashboards from k8s client +type DashboardsService struct { + prom prometheus.ClientInterface + mon kubernetes.KialiMonitoringInterface +} + +// NewDashboardsService initializes this business service +func NewDashboardsService(mon kubernetes.KialiMonitoringInterface, prom prometheus.ClientInterface) DashboardsService { + return DashboardsService{prom: prom, mon: mon} +} + +func (in *DashboardsService) loadDashboardResource(namespace, template string) (*kubernetes.MonitoringDashboard, error) { + // There is an override mechanism with dashboards: default dashboards can be provided in Kiali namespace, + // and can be overriden in app namespace. + // So we look for the one in app namespace first, and only if not found fallback to the one in istio-system. + dashboard, err := in.mon.GetDashboard(namespace, template) + if err != nil { + cfg := config.Get() + dashboard, err = in.mon.GetDashboard(cfg.IstioNamespace, template) + if err != nil { + return nil, err + } + } + + return dashboard, nil +} + +// GetDashboard returns a dashboard filled-in with target data +func (in *DashboardsService) GetDashboard(params prometheus.CustomMetricsQuery, template string) (*models.MonitoringDashboard, error) { + dashboard, err := in.loadDashboardResource(params.Namespace, template) + if err != nil { + return nil, err + } + + aggLabels := models.ConvertAggregations(dashboard.Spec) + labels := fmt.Sprintf(`{namespace="%s",app="%s"`, params.Namespace, params.App) + if params.Version != "" { + labels += fmt.Sprintf(`,version="%s"`, params.Version) + } else { + // For app-based dashboards, we automatically add a possible aggregation/grouping over versions + versionsAgg := models.Aggregation{ + Label: "version", + DisplayName: "Version", + } + aggLabels = append([]models.Aggregation{versionsAgg}, aggLabels...) + } + labels += "}" + grouping := strings.Join(params.ByLabels, ",") + + wg := sync.WaitGroup{} + wg.Add(len(dashboard.Spec.Charts)) + filledCharts := make([]models.Chart, len(dashboard.Spec.Charts)) + + for i, c := range dashboard.Spec.Charts { + go func(idx int, chart kubernetes.MonitoringDashboardChart) { + defer wg.Done() + filledCharts[idx] = models.ConvertChart(chart) + if chart.DataType == kubernetes.Raw { + aggregator := params.RawDataAggregator + if chart.Aggregator != "" { + aggregator = chart.Aggregator + } + filledCharts[idx].Metric = in.prom.FetchRange(chart.MetricName, labels, grouping, aggregator, ¶ms.BaseMetricsQuery) + } else if chart.DataType == kubernetes.Rate { + filledCharts[idx].Metric = in.prom.FetchRateRange(chart.MetricName, labels, grouping, ¶ms.BaseMetricsQuery) + } else { + filledCharts[idx].Histogram = in.prom.FetchHistogramRange(chart.MetricName, labels, grouping, ¶ms.BaseMetricsQuery) + } + }(i, c) + } + + wg.Wait() + return &models.MonitoringDashboard{ + Title: dashboard.Spec.Title, + Charts: filledCharts, + Aggregations: aggLabels, + }, nil +} + +type istioChart struct { + models.Chart + refName string +} + +var istioCharts = []istioChart{ + { + Chart: models.Chart{ + Name: "Request volume", + Unit: "ops", + Spans: 6, + }, + refName: "request_count", + }, + { + Chart: models.Chart{ + Name: "Request duration", + Unit: "s", + Spans: 6, + }, + refName: "request_duration", + }, + { + Chart: models.Chart{ + Name: "Request size", + Unit: "B", + Spans: 6, + }, + refName: "request_size", + }, + { + Chart: models.Chart{ + Name: "Response size", + Unit: "B", + Spans: 6, + }, + refName: "response_size", + }, + { + Chart: models.Chart{ + Name: "TCP received", + Unit: "bps", + Spans: 6, + }, + refName: "tcp_received", + }, + { + Chart: models.Chart{ + Name: "TCP sent", + Unit: "bps", + Spans: 6, + }, + refName: "tcp_sent", + }, +} + +// GetIstioDashboard returns Istio dashboard (currently hard-coded) filled-in with metrics +func (in *DashboardsService) GetIstioDashboard(params prometheus.IstioMetricsQuery) (*models.MonitoringDashboard, error) { + var dashboard models.MonitoringDashboard + // Copy dashboard + if params.Direction == "inbound" { + dashboard = models.PrepareIstioDashboard("Inbound", "destination", "source") + } else { + dashboard = models.PrepareIstioDashboard("Outbound", "source", "destination") + } + + metrics := in.prom.GetMetrics(¶ms) + + for _, chartTpl := range istioCharts { + newChart := chartTpl.Chart + if metric, ok := metrics.Metrics[chartTpl.refName]; ok { + newChart.Metric = metric + } + if histo, ok := metrics.Histograms[chartTpl.refName]; ok { + newChart.Histogram = histo + } + dashboard.Charts = append(dashboard.Charts, newChart) + } + + return &dashboard, nil +} + +func (in *DashboardsService) buildRuntimesList(namespace string, templatesNames []string) []models.Runtime { + dashboards := make([]*kubernetes.MonitoringDashboard, len(templatesNames)) + wg := sync.WaitGroup{} + wg.Add(len(templatesNames)) + for idx, template := range templatesNames { + go func(i int, tpl string) { + defer wg.Done() + dashboard, err := in.loadDashboardResource(namespace, tpl) + if err != nil { + log.Errorf("Cannot get dashboard %s in namespace %s. Error was: %v", tpl, namespace, err) + } else { + dashboards[i] = dashboard + } + }(idx, template) + } + + wg.Wait() + + runtimes := []models.Runtime{} + for _, dashboard := range dashboards { + if dashboard == nil { + continue + } + runtime := getDashboardRuntime(dashboard) + ref := models.DashboardRef{ + Template: dashboard.Metadata["name"].(string), + Title: dashboard.Spec.Title, + } + found := false + for i := range runtimes { + rtObj := &runtimes[i] + if rtObj.Name == runtime { + rtObj.DashboardRefs = append(rtObj.DashboardRefs, ref) + found = true + break + } + } + if !found { + runtimes = append(runtimes, models.Runtime{ + Name: runtime, + DashboardRefs: []models.DashboardRef{ref}, + }) + } + } + return runtimes +} + +func getDashboardRuntime(dashboard *kubernetes.MonitoringDashboard) string { + if labels, ok := dashboard.Metadata["labels"]; ok { + if labelsMap, ok := labels.(map[string]interface{}); ok { + if runtime, ok := labelsMap["runtime"]; ok { + return runtime.(string) + } + } + } + return dashboard.Spec.Title +} diff --git a/vendor/github.com/kiali/kiali/business/health.go b/vendor/github.com/kiali/kiali/business/health.go new file mode 100644 index 000000000..0901517a0 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/health.go @@ -0,0 +1,305 @@ +package business + +import ( + "time" + + "github.com/prometheus/common/model" + "k8s.io/apimachinery/pkg/labels" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus" + "github.com/kiali/kiali/prometheus/internalmetrics" +) + +// HealthService deals with fetching health from various sources and convert to kiali model +type HealthService struct { + prom prometheus.ClientInterface + k8s kubernetes.IstioClientInterface +} + +// GetServiceHealth returns a service health (service request error rate) +func (in *HealthService) GetServiceHealth(namespace, service, rateInterval string, queryTime time.Time) (models.ServiceHealth, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "HealthService", "GetServiceHealth") + defer promtimer.ObserveNow(&err) + + rqHealth, err := in.getServiceRequestsHealth(namespace, service, rateInterval, queryTime) + return models.ServiceHealth{Requests: rqHealth}, err +} + +// GetAppHealth returns an app health from just Namespace and app name (thus, it fetches data from K8S and Prometheus) +func (in *HealthService) GetAppHealth(namespace, app, rateInterval string, queryTime time.Time) (models.AppHealth, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "HealthService", "GetAppHealth") + defer promtimer.ObserveNow(&err) + + appLabel := config.Get().IstioLabels.AppLabelName + + selectorLabels := make(map[string]string) + selectorLabels[appLabel] = app + labelSelector := labels.FormatLabels(selectorLabels) + + ws, err := fetchWorkloads(in.k8s, namespace, labelSelector) + if err != nil { + log.Errorf("Error fetching Workloads per namespace %s and app %s: %s", namespace, app, err) + return models.AppHealth{}, err + } + + return in.getAppHealth(namespace, app, rateInterval, queryTime, ws) +} + +func (in *HealthService) getAppHealth(namespace, app, rateInterval string, queryTime time.Time, ws models.Workloads) (models.AppHealth, error) { + health := models.EmptyAppHealth() + + // Perf: do not bother fetching request rate if not a single workload has sidecar + hasSidecar := false + for _, w := range ws { + if w.IstioSidecar { + hasSidecar = true + break + } + } + + // Fetch services requests rates + var errRate error + if hasSidecar { + rate, err := in.getAppRequestsHealth(namespace, app, rateInterval, queryTime) + health.Requests = rate + errRate = err + } + + // Deployment status + health.WorkloadStatuses = castWorkloadStatuses(ws) + + return health, errRate +} + +// GetWorkloadHealth returns a workload health from just Namespace and workload (thus, it fetches data from K8S and Prometheus) +func (in *HealthService) GetWorkloadHealth(namespace, workload, rateInterval string, queryTime time.Time) (models.WorkloadHealth, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "HealthService", "GetWorkloadHealth") + defer promtimer.ObserveNow(&err) + + w, err := fetchWorkload(in.k8s, namespace, workload) + if err != nil { + return models.WorkloadHealth{}, err + } + status := models.WorkloadStatus{ + Name: w.Name, + Replicas: w.Replicas, + AvailableReplicas: w.AvailableReplicas, + } + + // Perf: do not bother fetching request rate if workload has no sidecar + if !w.IstioSidecar { + return models.WorkloadHealth{ + WorkloadStatus: status, + Requests: models.NewEmptyRequestHealth(), + }, nil + } + + rate, err := in.getWorkloadRequestsHealth(namespace, workload, rateInterval, queryTime) + return models.WorkloadHealth{ + WorkloadStatus: status, + Requests: rate, + }, err +} + +// GetNamespaceAppHealth returns a health for all apps in given Namespace (thus, it fetches data from K8S and Prometheus) +func (in *HealthService) GetNamespaceAppHealth(namespace, rateInterval string, queryTime time.Time) (models.NamespaceAppHealth, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "HealthService", "GetNamespaceAppHealth") + defer promtimer.ObserveNow(&err) + + appEntities, err := fetchNamespaceApps(in.k8s, namespace, "") + if err != nil { + return nil, err + } + return in.getNamespaceAppHealth(namespace, appEntities, rateInterval, queryTime) +} + +func (in *HealthService) getNamespaceAppHealth(namespace string, appEntities namespaceApps, rateInterval string, queryTime time.Time) (models.NamespaceAppHealth, error) { + allHealth := make(models.NamespaceAppHealth) + + // Perf: do not bother fetching request rate if not a single workload has sidecar + hasSidecar := false + + // Prepare all data + for app, entities := range appEntities { + if app != "" { + h := models.EmptyAppHealth() + allHealth[app] = &h + if entities != nil { + h.WorkloadStatuses = castWorkloadStatuses(entities.Workloads) + for _, w := range entities.Workloads { + if w.IstioSidecar { + hasSidecar = true + break + } + } + } + } + } + + var errRate error + if hasSidecar { + // Fetch services requests rates + rates, err := in.prom.GetAllRequestRates(namespace, rateInterval, queryTime) + errRate = err + // Fill with collected request rates + fillAppRequestRates(allHealth, rates) + } + + return allHealth, errRate +} + +// GetNamespaceServiceHealth returns a health for all services in given Namespace (thus, it fetches data from K8S and Prometheus) +func (in *HealthService) GetNamespaceServiceHealth(namespace, rateInterval string, queryTime time.Time) (models.NamespaceServiceHealth, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "HealthService", "GetNamespaceServiceHealth") + defer promtimer.ObserveNow(&err) + + return in.getNamespaceServiceHealth(namespace, rateInterval, queryTime), nil +} + +func (in *HealthService) getNamespaceServiceHealth(namespace string, rateInterval string, queryTime time.Time) models.NamespaceServiceHealth { + allHealth := make(models.NamespaceServiceHealth) + + // Fetch services requests rates + rates, _ := in.prom.GetNamespaceServicesRequestRates(namespace, rateInterval, queryTime) + // Fill with collected request rates + lblDestSvc := model.LabelName("destination_service_name") + for _, sample := range rates { + service := string(sample.Metric[lblDestSvc]) + health, ok := allHealth[service] + if !ok { + health = &models.ServiceHealth{Requests: models.NewEmptyRequestHealth()} + allHealth[service] = health + } + health.Requests.AggregateInbound(sample) + } + + return allHealth +} + +// GetNamespaceWorkloadHealth returns a health for all workloads in given Namespace (thus, it fetches data from K8S and Prometheus) +func (in *HealthService) GetNamespaceWorkloadHealth(namespace, rateInterval string, queryTime time.Time) (models.NamespaceWorkloadHealth, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "HealthService", "GetNamespaceWorkloadHealth") + defer promtimer.ObserveNow(&err) + + wl, err := fetchWorkloads(in.k8s, namespace, "") + if err != nil { + return nil, err + } + + return in.getNamespaceWorkloadHealth(namespace, wl, rateInterval, queryTime), nil +} + +func (in *HealthService) getNamespaceWorkloadHealth(namespace string, ws models.Workloads, rateInterval string, queryTime time.Time) models.NamespaceWorkloadHealth { + // Perf: do not bother fetching request rate if not a single workload has sidecar + hasSidecar := false + + allHealth := make(models.NamespaceWorkloadHealth) + for _, w := range ws { + allHealth[w.Name] = &models.WorkloadHealth{} + allHealth[w.Name].WorkloadStatus = models.WorkloadStatus{ + Name: w.Name, + Replicas: w.Replicas, + AvailableReplicas: w.AvailableReplicas, + } + if w.IstioSidecar { + hasSidecar = true + } + } + + if hasSidecar { + // Fetch services requests rates + rates, _ := in.prom.GetAllRequestRates(namespace, rateInterval, queryTime) + // Fill with collected request rates + fillWorkloadRequestRates(allHealth, rates) + } + + return allHealth +} + +// fillAppRequestRates aggregates requests rates from metrics fetched from Prometheus, and stores the result in the health map. +func fillAppRequestRates(allHealth models.NamespaceAppHealth, rates model.Vector) { + lblDest := model.LabelName("destination_app") + lblSrc := model.LabelName("source_app") + for _, sample := range rates { + name := string(sample.Metric[lblDest]) + if health, ok := allHealth[name]; ok { + health.Requests.AggregateInbound(sample) + } + name = string(sample.Metric[lblSrc]) + if health, ok := allHealth[name]; ok { + health.Requests.AggregateOutbound(sample) + } + } +} + +// fillWorkloadRequestRates aggregates requests rates from metrics fetched from Prometheus, and stores the result in the health map. +func fillWorkloadRequestRates(allHealth models.NamespaceWorkloadHealth, rates model.Vector) { + lblDest := model.LabelName("destination_workload") + lblSrc := model.LabelName("source_workload") + for _, sample := range rates { + name := string(sample.Metric[lblDest]) + if health, ok := allHealth[name]; ok { + health.Requests.AggregateInbound(sample) + } + name = string(sample.Metric[lblSrc]) + if health, ok := allHealth[name]; ok { + health.Requests.AggregateOutbound(sample) + } + } +} + +func (in *HealthService) getServiceRequestsHealth(namespace, service, rateInterval string, queryTime time.Time) (models.RequestHealth, error) { + rqHealth := models.NewEmptyRequestHealth() + inbound, err := in.prom.GetServiceRequestRates(namespace, service, rateInterval, queryTime) + for _, sample := range inbound { + rqHealth.AggregateInbound(sample) + } + return rqHealth, err +} + +func (in *HealthService) getAppRequestsHealth(namespace, app, rateInterval string, queryTime time.Time) (models.RequestHealth, error) { + rqHealth := models.NewEmptyRequestHealth() + inbound, outbound, err := in.prom.GetAppRequestRates(namespace, app, rateInterval, queryTime) + for _, sample := range inbound { + rqHealth.AggregateInbound(sample) + } + for _, sample := range outbound { + rqHealth.AggregateOutbound(sample) + } + return rqHealth, err +} + +func (in *HealthService) getWorkloadRequestsHealth(namespace, workload, rateInterval string, queryTime time.Time) (models.RequestHealth, error) { + rqHealth := models.NewEmptyRequestHealth() + inbound, outbound, err := in.prom.GetWorkloadRequestRates(namespace, workload, rateInterval, queryTime) + for _, sample := range inbound { + rqHealth.AggregateInbound(sample) + } + for _, sample := range outbound { + rqHealth.AggregateOutbound(sample) + } + return rqHealth, err +} + +func castWorkloadStatuses(ws models.Workloads) []models.WorkloadStatus { + statuses := make([]models.WorkloadStatus, 0) + for _, w := range ws { + status := models.WorkloadStatus{ + Name: w.Name, + Replicas: w.Replicas, + AvailableReplicas: w.AvailableReplicas} + statuses = append(statuses, status) + + } + return statuses +} diff --git a/vendor/github.com/kiali/kiali/business/istio_config.go b/vendor/github.com/kiali/kiali/business/istio_config.go new file mode 100644 index 000000000..431bd1241 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/istio_config.go @@ -0,0 +1,722 @@ +package business + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + + errors2 "k8s.io/apimachinery/pkg/api/errors" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus/internalmetrics" +) + +type IstioConfigService struct { + k8s kubernetes.IstioClientInterface +} + +type IstioConfigCriteria struct { + Namespace string + IncludeGateways bool + IncludeVirtualServices bool + IncludeDestinationRules bool + IncludeServiceEntries bool + IncludeRules bool + IncludeAdapters bool + IncludeTemplates bool + IncludeQuotaSpecs bool + IncludeQuotaSpecBindings bool + IncludePolicies bool + IncludeMeshPolicies bool + IncludeClusterRbacConfigs bool + IncludeServiceRoles bool + IncludeServiceRoleBindings bool +} + +const ( + VirtualServices = "virtualservices" + DestinationRules = "destinationrules" + ServiceEntries = "serviceentries" + Gateways = "gateways" + Rules = "rules" + Adapters = "adapters" + Templates = "templates" + QuotaSpecs = "quotaspecs" + QuotaSpecBindings = "quotaspecbindings" + Policies = "policies" + MeshPolicies = "meshpolicies" + ClusterRbacConfigs = "clusterrbacconfigs" + ServiceRoles = "serviceroles" + ServiceRoleBindings = "servicerolebindings" +) + +var resourceTypesToAPI = map[string]string{ + DestinationRules: kubernetes.NetworkingGroupVersion.Group, + VirtualServices: kubernetes.NetworkingGroupVersion.Group, + ServiceEntries: kubernetes.NetworkingGroupVersion.Group, + Gateways: kubernetes.NetworkingGroupVersion.Group, + Adapters: kubernetes.ConfigGroupVersion.Group, + Templates: kubernetes.ConfigGroupVersion.Group, + Rules: kubernetes.ConfigGroupVersion.Group, + QuotaSpecs: kubernetes.ConfigGroupVersion.Group, + QuotaSpecBindings: kubernetes.ConfigGroupVersion.Group, + Policies: kubernetes.AuthenticationGroupVersion.Group, + MeshPolicies: kubernetes.AuthenticationGroupVersion.Group, + ClusterRbacConfigs: kubernetes.RbacGroupVersion.Group, + ServiceRoles: kubernetes.RbacGroupVersion.Group, + ServiceRoleBindings: kubernetes.RbacGroupVersion.Group, +} + +var apiToVersion = map[string]string{ + kubernetes.NetworkingGroupVersion.Group: kubernetes.ApiNetworkingVersion, + kubernetes.ConfigGroupVersion.Group: kubernetes.ApiConfigVersion, + kubernetes.ApiAuthenticationVersion: kubernetes.ApiAuthenticationVersion, + kubernetes.RbacGroupVersion.Group: kubernetes.ApiRbacVersion, +} + +const ( + MeshmTLSEnabled = "MESH_MTLS_ENABLED" + MeshmTLSPartiallyEnabled = "MESH_MTLS_PARTIALLY_ENABLED" + MeshmTLSNotEnabled = "MESH_MTLS_NOT_ENABLED" +) + +// GetIstioConfigList returns a list of Istio routing objects, Mixer Rules, (etc.) +// per a given Namespace. +func (in *IstioConfigService) GetIstioConfigList(criteria IstioConfigCriteria) (models.IstioConfigList, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "IstioConfigService", "GetIstioConfigList") + defer promtimer.ObserveNow(&err) + + if criteria.Namespace == "" { + return models.IstioConfigList{}, errors.New("GetIstioConfigList needs a non empty Namespace") + } + istioConfigList := models.IstioConfigList{ + Namespace: models.Namespace{Name: criteria.Namespace}, + Gateways: models.Gateways{}, + VirtualServices: models.VirtualServices{Items: []models.VirtualService{}}, + DestinationRules: models.DestinationRules{Items: []models.DestinationRule{}}, + ServiceEntries: models.ServiceEntries{}, + Rules: models.IstioRules{}, + Adapters: models.IstioAdapters{}, + Templates: models.IstioTemplates{}, + QuotaSpecs: models.QuotaSpecs{}, + QuotaSpecBindings: models.QuotaSpecBindings{}, + Policies: models.Policies{}, + MeshPolicies: models.MeshPolicies{}, + ClusterRbacConfigs: models.ClusterRbacConfigs{}, + ServiceRoles: models.ServiceRoles{}, + ServiceRoleBindings: models.ServiceRoleBindings{}, + } + var gg, vs, dr, se, qs, qb, aa, tt, mr, pc, mp, rc, sr, srb []kubernetes.IstioObject + var ggErr, vsErr, drErr, seErr, mrErr, qsErr, qbErr, aaErr, ttErr, pcErr, mpErr, rcErr, srErr, srbErr error + var wg sync.WaitGroup + wg.Add(14) + + go func() { + defer wg.Done() + if criteria.IncludeGateways { + if gg, ggErr = in.k8s.GetGateways(criteria.Namespace); ggErr == nil { + (&istioConfigList.Gateways).Parse(gg) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeVirtualServices { + if vs, vsErr = in.k8s.GetVirtualServices(criteria.Namespace, ""); vsErr == nil { + (&istioConfigList.VirtualServices).Parse(vs) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeDestinationRules { + if dr, drErr = in.k8s.GetDestinationRules(criteria.Namespace, ""); drErr == nil { + (&istioConfigList.DestinationRules).Parse(dr) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeServiceEntries { + if se, seErr = in.k8s.GetServiceEntries(criteria.Namespace); seErr == nil { + (&istioConfigList.ServiceEntries).Parse(se) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeRules { + if mr, mrErr = in.k8s.GetIstioRules(criteria.Namespace); mrErr == nil { + istioConfigList.Rules = models.CastIstioRulesCollection(mr) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeAdapters { + if aa, aaErr = in.k8s.GetAdapters(criteria.Namespace); aaErr == nil { + istioConfigList.Adapters = models.CastIstioAdaptersCollection(aa) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeTemplates { + if tt, ttErr = in.k8s.GetTemplates(criteria.Namespace); ttErr == nil { + istioConfigList.Templates = models.CastIstioTemplatesCollection(tt) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeQuotaSpecs { + if qs, qsErr = in.k8s.GetQuotaSpecs(criteria.Namespace); qsErr == nil { + (&istioConfigList.QuotaSpecs).Parse(qs) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeQuotaSpecBindings { + if qb, qbErr = in.k8s.GetQuotaSpecBindings(criteria.Namespace); qbErr == nil { + (&istioConfigList.QuotaSpecBindings).Parse(qb) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludePolicies { + if pc, pcErr = in.k8s.GetPolicies(criteria.Namespace); pcErr == nil { + (&istioConfigList.Policies).Parse(pc) + } + } + }() + + go func() { + defer wg.Done() + // MeshPolicies are not namespaced. They will be only listed for the namespace + // where istio is deployed. + if criteria.IncludeMeshPolicies && criteria.Namespace == config.Get().IstioNamespace { + if mp, mpErr = in.k8s.GetMeshPolicies(criteria.Namespace); mpErr == nil { + (&istioConfigList.MeshPolicies).Parse(mp) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeClusterRbacConfigs && criteria.Namespace == config.Get().IstioNamespace { + if rc, rcErr = in.k8s.GetClusterRbacConfigs(criteria.Namespace); rcErr == nil { + (&istioConfigList.ClusterRbacConfigs).Parse(rc) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeServiceRoles { + if sr, srErr = in.k8s.GetServiceRoles(criteria.Namespace); srErr == nil { + (&istioConfigList.ServiceRoles).Parse(sr) + } + } + }() + + go func() { + defer wg.Done() + if criteria.IncludeServiceRoleBindings { + if srb, srbErr = in.k8s.GetServiceRoleBindings(criteria.Namespace); srbErr == nil { + (&istioConfigList.ServiceRoleBindings).Parse(srb) + } + } + }() + + wg.Wait() + + for _, genErr := range []error{ggErr, vsErr, drErr, seErr, mrErr, qsErr, qbErr, aaErr, ttErr, mpErr, pcErr, rcErr, srErr, srbErr} { + if genErr != nil { + err = genErr + return models.IstioConfigList{}, err + } + } + + return istioConfigList, nil +} + +// GetIstioConfigDetails returns a specific Istio configuration object. +// It uses following parameters: +// - "namespace": namespace where configuration is stored +// - "objectType": type of the configuration +// - "objectSubtype": subtype of the configuration, used when objectType == "adapters" or "templates", empty/not used otherwise +// - "object": name of the configuration +func (in *IstioConfigService) GetIstioConfigDetails(namespace, objectType, objectSubtype, object string) (models.IstioConfigDetails, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "IstioConfigService", "GetIstioConfigDetails") + defer promtimer.ObserveNow(&err) + + istioConfigDetail := models.IstioConfigDetails{} + istioConfigDetail.Namespace = models.Namespace{Name: namespace} + istioConfigDetail.ObjectType = objectType + var gw, vs, dr, se, qs, qb, r, a, t, pc, mp, rc, sr, srb kubernetes.IstioObject + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + canCreate, canUpdate, canDelete := getPermissions(in.k8s, namespace, objectType, objectSubtype) + istioConfigDetail.Permissions = models.ResourcePermissions{ + Create: canCreate, + Update: canUpdate, + Delete: canDelete, + } + }() + + switch objectType { + case Gateways: + if gw, err = in.k8s.GetGateway(namespace, object); err == nil { + istioConfigDetail.Gateway = &models.Gateway{} + istioConfigDetail.Gateway.Parse(gw) + } + case VirtualServices: + if vs, err = in.k8s.GetVirtualService(namespace, object); err == nil { + istioConfigDetail.VirtualService = &models.VirtualService{} + istioConfigDetail.VirtualService.Parse(vs) + } + case DestinationRules: + if dr, err = in.k8s.GetDestinationRule(namespace, object); err == nil { + istioConfigDetail.DestinationRule = &models.DestinationRule{} + istioConfigDetail.DestinationRule.Parse(dr) + } + case ServiceEntries: + if se, err = in.k8s.GetServiceEntry(namespace, object); err == nil { + istioConfigDetail.ServiceEntry = &models.ServiceEntry{} + istioConfigDetail.ServiceEntry.Parse(se) + } + case Rules: + if r, err = in.k8s.GetIstioRule(namespace, object); err == nil { + istioRule := models.CastIstioRule(r) + istioConfigDetail.Rule = &istioRule + } + case Adapters: + if a, err = in.k8s.GetAdapter(namespace, objectSubtype, object); err == nil { + adapter := models.CastIstioAdapter(a) + istioConfigDetail.Adapter = &adapter + } + case Templates: + if t, err = in.k8s.GetTemplate(namespace, objectSubtype, object); err == nil { + template := models.CastIstioTemplate(t) + istioConfigDetail.Template = &template + } + case QuotaSpecs: + if qs, err = in.k8s.GetQuotaSpec(namespace, object); err == nil { + istioConfigDetail.QuotaSpec = &models.QuotaSpec{} + istioConfigDetail.QuotaSpec.Parse(qs) + } + case QuotaSpecBindings: + if qb, err = in.k8s.GetQuotaSpecBinding(namespace, object); err == nil { + istioConfigDetail.QuotaSpecBinding = &models.QuotaSpecBinding{} + istioConfigDetail.QuotaSpecBinding.Parse(qb) + } + case Policies: + if pc, err = in.k8s.GetPolicy(namespace, object); err == nil { + istioConfigDetail.Policy = &models.Policy{} + istioConfigDetail.Policy.Parse(pc) + } + case MeshPolicies: + if mp, err = in.k8s.GetMeshPolicy(namespace, object); err == nil { + istioConfigDetail.MeshPolicy = &models.MeshPolicy{} + istioConfigDetail.MeshPolicy.Parse(mp) + } + case ClusterRbacConfigs: + if rc, err = in.k8s.GetClusterRbacConfig(namespace, object); err == nil { + istioConfigDetail.ClusterRbacConfig = &models.ClusterRbacConfig{} + istioConfigDetail.ClusterRbacConfig.Parse(rc) + } + case ServiceRoles: + if sr, err = in.k8s.GetServiceRole(namespace, object); err == nil { + istioConfigDetail.ServiceRole = &models.ServiceRole{} + istioConfigDetail.ServiceRole.Parse(sr) + } + case ServiceRoleBindings: + if srb, err = in.k8s.GetServiceRoleBinding(namespace, object); err == nil { + istioConfigDetail.ServiceRoleBinding = &models.ServiceRoleBinding{} + istioConfigDetail.ServiceRoleBinding.Parse(srb) + } + default: + err = fmt.Errorf("Object type not found: %v", objectType) + } + + wg.Wait() + + return istioConfigDetail, err +} + +// GetIstioAPI provides the Kubernetes API that manages this Istio resource type +// or empty string if it's not managed +func GetIstioAPI(resourceType string) string { + return resourceTypesToAPI[resourceType] +} + +// ParseJsonForCreate checks if a json is well formed according resourceType/subresourceType. +// It returns a json validated to be used in the Create operation, or an error to report in the handler layer. +func (in *IstioConfigService) ParseJsonForCreate(resourceType, subresourceType string, body []byte) (string, error) { + var err error + istioConfigDetail := models.IstioConfigDetails{} + apiVersion := apiToVersion[resourceTypesToAPI[resourceType]] + var kind string + var marshalled string + if resourceType == Adapters || resourceType == Templates { + kind = kubernetes.PluralType[subresourceType] + } else { + kind = kubernetes.PluralType[resourceType] + } + switch resourceType { + case Gateways: + istioConfigDetail.Gateway = &models.Gateway{} + err = json.Unmarshal(body, istioConfigDetail.Gateway) + case VirtualServices: + istioConfigDetail.VirtualService = &models.VirtualService{} + err = json.Unmarshal(body, istioConfigDetail.VirtualService) + case DestinationRules: + istioConfigDetail.DestinationRule = &models.DestinationRule{} + err = json.Unmarshal(body, istioConfigDetail.DestinationRule) + case ServiceEntries: + istioConfigDetail.ServiceEntry = &models.ServiceEntry{} + err = json.Unmarshal(body, istioConfigDetail.ServiceEntry) + case Rules: + istioConfigDetail.Rule = &models.IstioRule{} + err = json.Unmarshal(body, istioConfigDetail.Rule) + case Adapters: + istioConfigDetail.Adapter = &models.IstioAdapter{} + err = json.Unmarshal(body, istioConfigDetail.Adapter) + case Templates: + istioConfigDetail.Template = &models.IstioTemplate{} + err = json.Unmarshal(body, istioConfigDetail.Template) + case QuotaSpecs: + istioConfigDetail.QuotaSpec = &models.QuotaSpec{} + err = json.Unmarshal(body, istioConfigDetail.QuotaSpec) + case QuotaSpecBindings: + istioConfigDetail.QuotaSpecBinding = &models.QuotaSpecBinding{} + err = json.Unmarshal(body, istioConfigDetail.QuotaSpecBinding) + case Policies: + istioConfigDetail.Policy = &models.Policy{} + err = json.Unmarshal(body, istioConfigDetail.Policy) + case MeshPolicies: + istioConfigDetail.MeshPolicy = &models.MeshPolicy{} + err = json.Unmarshal(body, istioConfigDetail.MeshPolicy) + default: + err = fmt.Errorf("Object type not found: %v", resourceType) + } + if err != nil { + return "", err + } + // Append apiVersion and kind + marshalled = string(body) + marshalled = strings.TrimSpace(marshalled) + marshalled = "" + + "{\n" + + "\"kind\": \"" + kind + "\",\n" + + "\"apiVersion\": \"" + apiVersion + "\"," + + marshalled[1:] + + return marshalled, nil +} + +// DeleteIstioConfigDetail deletes the given Istio resource +func (in *IstioConfigService) DeleteIstioConfigDetail(api, namespace, resourceType, resourceSubtype, name string) (err error) { + promtimer := internalmetrics.GetGoFunctionMetric("business", "IstioConfigService", "DeleteIstioConfigDetail") + defer promtimer.ObserveNow(&err) + + if resourceType == Adapters || resourceType == Templates { + err = in.k8s.DeleteIstioObject(api, namespace, resourceSubtype, name) + } else { + err = in.k8s.DeleteIstioObject(api, namespace, resourceType, name) + } + return err +} + +func (in *IstioConfigService) UpdateIstioConfigDetail(api, namespace, resourceType, resourceSubtype, name, jsonPatch string) (models.IstioConfigDetails, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "IstioConfigService", "UpdateIstioConfigDetail") + defer promtimer.ObserveNow(&err) + + return in.modifyIstioConfigDetail(api, namespace, resourceType, resourceSubtype, name, jsonPatch, false) +} + +func (in *IstioConfigService) modifyIstioConfigDetail(api, namespace, resourceType, resourceSubtype, name, json string, create bool) (models.IstioConfigDetails, error) { + var err error + updatedType := resourceType + if resourceType == Adapters || resourceType == Templates { + updatedType = resourceSubtype + } + + var result kubernetes.IstioObject + istioConfigDetail := models.IstioConfigDetails{} + istioConfigDetail.Namespace = models.Namespace{Name: namespace} + istioConfigDetail.ObjectType = resourceType + + if create { + // Create new object + result, err = in.k8s.CreateIstioObject(api, namespace, updatedType, json) + } else { + // Update/Path existing object + result, err = in.k8s.UpdateIstioObject(api, namespace, updatedType, name, json) + } + if err != nil { + return istioConfigDetail, err + } + + switch resourceType { + case Gateways: + istioConfigDetail.Gateway = &models.Gateway{} + istioConfigDetail.Gateway.Parse(result) + case VirtualServices: + istioConfigDetail.VirtualService = &models.VirtualService{} + istioConfigDetail.VirtualService.Parse(result) + case DestinationRules: + istioConfigDetail.DestinationRule = &models.DestinationRule{} + istioConfigDetail.DestinationRule.Parse(result) + case ServiceEntries: + istioConfigDetail.ServiceEntry = &models.ServiceEntry{} + istioConfigDetail.ServiceEntry.Parse(result) + case Rules: + istioRule := models.CastIstioRule(result) + istioConfigDetail.Rule = &istioRule + case Adapters: + adapter := models.CastIstioAdapter(result) + istioConfigDetail.Adapter = &adapter + case Templates: + template := models.CastIstioTemplate(result) + istioConfigDetail.Template = &template + case QuotaSpecs: + istioConfigDetail.QuotaSpec = &models.QuotaSpec{} + istioConfigDetail.QuotaSpec.Parse(result) + case QuotaSpecBindings: + istioConfigDetail.QuotaSpecBinding = &models.QuotaSpecBinding{} + istioConfigDetail.QuotaSpecBinding.Parse(result) + case Policies: + istioConfigDetail.Policy = &models.Policy{} + istioConfigDetail.Policy.Parse(result) + case MeshPolicies: + istioConfigDetail.MeshPolicy = &models.MeshPolicy{} + istioConfigDetail.MeshPolicy.Parse(result) + case ClusterRbacConfigs: + istioConfigDetail.ClusterRbacConfig = &models.ClusterRbacConfig{} + istioConfigDetail.ClusterRbacConfig.Parse(result) + case ServiceRoles: + istioConfigDetail.ServiceRole = &models.ServiceRole{} + istioConfigDetail.ServiceRole.Parse(result) + case ServiceRoleBindings: + istioConfigDetail.ServiceRoleBinding = &models.ServiceRoleBinding{} + istioConfigDetail.ServiceRoleBinding.Parse(result) + default: + err = fmt.Errorf("Object type not found: %v", resourceType) + } + return istioConfigDetail, err + +} + +func (in *IstioConfigService) CreateIstioConfigDetail(api, namespace, resourceType, resourceSubtype string, body []byte) (models.IstioConfigDetails, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "IstioConfigService", "CreateIstioConfigDetail") + defer promtimer.ObserveNow(&err) + + json, err := in.ParseJsonForCreate(resourceType, resourceSubtype, body) + if err != nil { + return models.IstioConfigDetails{}, errors2.NewBadRequest(err.Error()) + } + return in.modifyIstioConfigDetail(api, namespace, resourceType, resourceSubtype, "", json, true) +} + +func getPermissions(k8s kubernetes.IstioClientInterface, namespace, objectType, objectSubtype string) (bool, bool, bool) { + var canCreate, canPatch, canUpdate, canDelete bool + if api, ok := resourceTypesToAPI[objectType]; ok { + // objectType will always match the api used in adapters/templates + // but if objectSubtype is present it should be used as resourceType + resourceType := objectType + if objectSubtype != "" { + resourceType = objectSubtype + } + ssars, permErr := k8s.GetSelfSubjectAccessReview(namespace, api, resourceType, []string{"create", "patch", "update", "delete"}) + if permErr == nil { + for _, ssar := range ssars { + if ssar.Spec.ResourceAttributes != nil { + switch ssar.Spec.ResourceAttributes.Verb { + case "create": + canCreate = ssar.Status.Allowed + case "patch": + canPatch = ssar.Status.Allowed + case "update": + canUpdate = ssar.Status.Allowed + case "delete": + canDelete = ssar.Status.Allowed + } + } + } + } else { + log.Errorf("Error getting permissions [namespace: %s, api: %s, resourceType: %s]: %v", namespace, api, resourceType, permErr) + } + } + return canCreate, (canUpdate || canPatch), canDelete +} + +func (in *IstioConfigService) MeshWidemTLSStatus(namespaces []string) (string, error) { + mpp, mpErr := in.hasMeshPolicyEnabled(namespaces) + if mpErr != nil { + return "", mpErr + } + + drp, drErr := in.hasDestinationRuleEnabled(namespaces) + if drErr != nil { + return "", drErr + } + + if drp && mpp { + return MeshmTLSEnabled, nil + } else if drp || mpp { + return MeshmTLSPartiallyEnabled, nil + } + + return MeshmTLSNotEnabled, nil +} + +func (in *IstioConfigService) hasMeshPolicyEnabled(namespaces []string) (bool, error) { + if len(namespaces) < 1 { + return false, fmt.Errorf("Can't find MeshPolicies without a namespace") + } + + // MeshPolicies are not namespaced. So any namespace user has access to + // will work to retrieve all the MeshPolicies. + mps, err := in.k8s.GetMeshPolicies(namespaces[0]) + if err != nil { + return false, err + } + + for _, mp := range mps { + + // It is mandatory to have default as a name + if meshMeta := mp.GetObjectMeta(); meshMeta.Name != "default" { + continue + } + + // It is no globally enabled when has targets + targets, targetPresent := mp.GetSpec()["targets"] + specificTarget := targetPresent && len(targets.([]interface{})) > 0 + if specificTarget { + continue + } + + // It is globally enabled when a peer has mtls enabled + peers, peersPresent := mp.GetSpec()["peers"] + if !peersPresent { + continue + } + + for _, peer := range peers.([]interface{}) { + peerMap := peer.(map[string]interface{}) + if mtls, present := peerMap["mtls"]; present { + if mtlsMap, ok := mtls.(map[string]interface{}); ok { + // mTLS enabled in case there is an empty map or mode is STRICT + if mode, found := mtlsMap["mode"]; !found || mode == "STRICT" { + return true, nil + } + } else { + // mTLS enabled in case mtls object is empty + return true, nil + } + } + } + } + + return false, nil +} + +func (in *IstioConfigService) hasDestinationRuleEnabled(namespaces []string) (bool, error) { + drs, err := in.getAllDestinationRules(namespaces) + if err != nil { + return false, err + } + + mtlsEnabled := false + + for _, dr := range drs { + // Following the suggested procedure to enable mesh-wide mTLS, host might be '*.local': + // https://istio.io/docs/tasks/security/authn-policy/#globally-enabling-istio-mutual-tls + host, hostPresent := dr.GetSpec()["host"] + if !hostPresent || host != "*.local" { + continue + } + + if trafficPolicy, trafficPresent := dr.GetSpec()["trafficPolicy"]; trafficPresent { + if trafficCasted, ok := trafficPolicy.(map[string]interface{}); ok { + if tls, found := trafficCasted["tls"]; found { + if tlsCasted, ok := tls.(map[string]interface{}); ok { + if mode, found := tlsCasted["mode"]; found { + if modeCasted, ok := mode.(string); ok { + if modeCasted == "ISTIO_MUTUAL" { + mtlsEnabled = true + break + } + } + } + } + } + } + } + } + + return mtlsEnabled, nil +} + +func (in *IstioConfigService) getAllDestinationRules(namespaces []string) ([]kubernetes.IstioObject, error) { + drChan := make(chan []kubernetes.IstioObject, len(namespaces)) + errChan := make(chan error, 1) + wg := sync.WaitGroup{} + + wg.Add(len(namespaces)) + + for _, namespace := range namespaces { + go func(ns string) { + defer wg.Done() + + drs, err := in.k8s.GetDestinationRules(ns, "") + if err != nil { + errChan <- err + return + } + + drChan <- drs + }(namespace) + } + + wg.Wait() + close(errChan) + close(drChan) + + for err := range errChan { + if err != nil { + return nil, err + } + } + + allDestinationRules := make([]kubernetes.IstioObject, 0) + for drs := range drChan { + allDestinationRules = append(allDestinationRules, drs...) + } + + return allDestinationRules, nil +} diff --git a/vendor/github.com/kiali/kiali/business/istio_validations.go b/vendor/github.com/kiali/kiali/business/istio_validations.go new file mode 100644 index 000000000..8c8cdc0c0 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/istio_validations.go @@ -0,0 +1,282 @@ +package business + +import ( + "fmt" + "sync" + + "github.com/kiali/kiali/business/checkers" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus/internalmetrics" + + v1 "k8s.io/api/core/v1" +) + +type IstioValidationsService struct { + k8s kubernetes.IstioClientInterface + businessLayer *Layer +} + +type ObjectChecker interface { + Check() models.IstioValidations +} + +// GetValidations returns an IstioValidations object with all the checks found when running +// all the enabled checkers. If service is "" then the whole namespace is validated. +func (in *IstioValidationsService) GetValidations(namespace, service string) (models.IstioValidations, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "IstioValidationsService", "GetValidations") + defer promtimer.ObserveNow(&err) + + // Ensure the service or namespace exists.. do we need to block with this? + if service != "" { + if _, err := in.k8s.GetService(namespace, service); err != nil { + return nil, err + } + } else { + if _, err := in.k8s.GetNamespace(namespace); err != nil { + return nil, err + } + } + + wg := sync.WaitGroup{} + errChan := make(chan error, 1) + + var istioDetails kubernetes.IstioDetails + var services []v1.Service + var workloads models.WorkloadList + var gatewaysPerNamespace [][]kubernetes.IstioObject + var mtlsDetails kubernetes.MTLSDetails + + wg.Add(5) // We need to add these here to make sure we don't execute wg.Wait() before scheduler has started goroutines + + // NoServiceChecker is not necessary if we target a single service - those components with validation errors won't show up in the query + go in.fetchServices(&services, namespace, service, errChan, &wg) + + // We fetch without target service as some validations will require full-namespace details + go in.fetchDetails(&istioDetails, namespace, errChan, &wg) + go in.fetchWorkloads(&workloads, namespace, errChan, &wg) + go in.fetchGatewaysPerNamespace(&gatewaysPerNamespace, errChan, &wg) + go in.fetchNonLocalmTLSConfigs(&mtlsDetails, errChan, &wg) + + wg.Wait() + close(errChan) + for e := range errChan { + if e != nil { // Check that default value wasn't returned + return nil, err + } + } + + objectCheckers := in.getAllObjectCheckers(namespace, istioDetails, services, workloads, gatewaysPerNamespace, mtlsDetails) + + // Get group validations for same kind istio objects + return runObjectCheckers(objectCheckers), nil +} + +func (in *IstioValidationsService) getAllObjectCheckers(namespace string, istioDetails kubernetes.IstioDetails, services []v1.Service, workloads models.WorkloadList, gatewaysPerNamespace [][]kubernetes.IstioObject, mtlsDetails kubernetes.MTLSDetails) []ObjectChecker { + return []ObjectChecker{ + checkers.VirtualServiceChecker{Namespace: namespace, DestinationRules: istioDetails.DestinationRules, VirtualServices: istioDetails.VirtualServices}, + checkers.NoServiceChecker{Namespace: namespace, IstioDetails: &istioDetails, Services: services, WorkloadList: workloads, GatewaysPerNamespace: gatewaysPerNamespace}, + checkers.DestinationRulesChecker{DestinationRules: istioDetails.DestinationRules, MTLSDetails: mtlsDetails}, + checkers.GatewayChecker{GatewaysPerNamespace: gatewaysPerNamespace, Namespace: namespace}, + } +} + +func (in *IstioValidationsService) GetIstioObjectValidations(namespace string, objectType string, object string) (models.IstioValidations, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "IstioValidationsService", "GetIstioObjectValidations") + defer promtimer.ObserveNow(&err) + + var istioDetails kubernetes.IstioDetails + var services []v1.Service + var workloads models.WorkloadList + var gatewaysPerNamespace [][]kubernetes.IstioObject + var mtlsDetails kubernetes.MTLSDetails + + var objectCheckers []ObjectChecker + + wg := sync.WaitGroup{} + errChan := make(chan error, 1) + + // Get all the Istio objects from a Namespace and all gateways from every namespace + wg.Add(5) + go in.fetchDetails(&istioDetails, namespace, errChan, &wg) + go in.fetchServices(&services, namespace, "", errChan, &wg) + go in.fetchWorkloads(&workloads, namespace, errChan, &wg) + go in.fetchGatewaysPerNamespace(&gatewaysPerNamespace, errChan, &wg) + go in.fetchNonLocalmTLSConfigs(&mtlsDetails, errChan, &wg) + wg.Wait() + + switch objectType { + case Gateways: + objectCheckers = []ObjectChecker{ + checkers.GatewayChecker{GatewaysPerNamespace: gatewaysPerNamespace, Namespace: namespace}, + } + case VirtualServices: + virtualServiceChecker := checkers.VirtualServiceChecker{Namespace: namespace, VirtualServices: istioDetails.VirtualServices, DestinationRules: istioDetails.DestinationRules} + noServiceChecker := checkers.NoServiceChecker{Namespace: namespace, Services: services, IstioDetails: &istioDetails, WorkloadList: workloads, GatewaysPerNamespace: gatewaysPerNamespace} + objectCheckers = []ObjectChecker{noServiceChecker, virtualServiceChecker} + case DestinationRules: + destinationRulesChecker := checkers.DestinationRulesChecker{DestinationRules: istioDetails.DestinationRules, MTLSDetails: mtlsDetails} + noServiceChecker := checkers.NoServiceChecker{Namespace: namespace, Services: services, IstioDetails: &istioDetails, WorkloadList: workloads, GatewaysPerNamespace: gatewaysPerNamespace} + objectCheckers = []ObjectChecker{noServiceChecker, destinationRulesChecker} + case ServiceEntries: + // Validations on ServiceEntries are not yet in place + case Rules: + // Validations on Istio Rules are not yet in place + case Templates: + // Validations on Templates are not yet in place + // TODO Support subtypes + case Adapters: + // Validations on Adapters are not yet in place + // TODO Support subtypes + case QuotaSpecs: + // Validations on QuotaSpecs are not yet in place + case QuotaSpecBindings: + // Validations on QuotaSpecBindings are not yet in place + case Policies: + // Validations on Policies are not yet in place + case MeshPolicies: + // Validations on MeshPolicies are not yet in place + case ClusterRbacConfigs: + // Validations on ClusterRbacConfigs are not yet in place + case ServiceRoles: + // Validations on ServiceRoles are not yet in place + case ServiceRoleBindings: + // Validations on ServiceRoleBindings are not yet in place + default: + err = fmt.Errorf("Object type not found: %v", objectType) + } + + close(errChan) + for e := range errChan { + if e != nil { // Check that default value wasn't returned + return nil, err + } + } + + if objectCheckers == nil { + return models.IstioValidations{}, err + } + + return runObjectCheckers(objectCheckers).FilterByKey(models.ObjectTypeSingular[objectType], object), nil +} + +func runObjectCheckers(objectCheckers []ObjectChecker) models.IstioValidations { + objectTypeValidations := models.IstioValidations{} + + // Run checks for each IstioObject type + for _, objectChecker := range objectCheckers { + objectTypeValidations.MergeValidations(objectChecker.Check()) + } + + return objectTypeValidations +} + +// The following idea is used underneath: if errChan has at least one record, we'll effectively cancel the request (if scheduled in such order). On the other hand, if we can't +// write to the buffered errChan, we just ignore the error as select does not block even if channel is full. This is because a single error is enough to cancel the whole request. + +func (in *IstioValidationsService) fetchGatewaysPerNamespace(gatewaysPerNamespace *[][]kubernetes.IstioObject, errChan chan error, wg *sync.WaitGroup) { + defer wg.Done() + if nss, err := in.businessLayer.Namespace.GetNamespaces(); err == nil { + gwss := make([][]kubernetes.IstioObject, len(nss)) + for i := range nss { + gwss[i] = make([]kubernetes.IstioObject, 0) + } + *gatewaysPerNamespace = gwss + + wg.Add(len(nss)) + for i, ns := range nss { + go fetchNoEntry(&gwss[i], ns.Name, in.k8s.GetGateways, wg, errChan) + } + } else { + select { + case errChan <- err: + default: + } + } +} + +func fetchNoEntry(rValue *[]kubernetes.IstioObject, namespace string, fetcher func(string) ([]kubernetes.IstioObject, error), wg *sync.WaitGroup, errChan chan error) { + defer wg.Done() + if len(errChan) == 0 { + fetched, err := fetcher(namespace) + *rValue = append(*rValue, fetched...) + if err != nil { + select { + case errChan <- err: + default: + } + } + } +} + +func (in *IstioValidationsService) fetchServices(rValue *[]v1.Service, namespace, serviceName string, errChan chan error, wg *sync.WaitGroup) { + defer wg.Done() + if len(errChan) == 0 { + services, err := in.k8s.GetServices(namespace, nil) + if err != nil { + select { + case errChan <- err: + default: + } + } else { + *rValue = services + } + } +} + +func (in *IstioValidationsService) fetchWorkloads(rValue *models.WorkloadList, namespace string, errChan chan error, wg *sync.WaitGroup) { + defer wg.Done() + if len(errChan) == 0 { + workloadList, err := in.businessLayer.Workload.GetWorkloadList(namespace) + if err != nil { + select { + case errChan <- err: + default: + } + } else { + *rValue = workloadList + } + } +} + +func (in *IstioValidationsService) fetchDetails(rValue *kubernetes.IstioDetails, namespace string, errChan chan error, wg *sync.WaitGroup) { + defer wg.Done() + if len(errChan) == 0 { + istioDetails, err := in.k8s.GetIstioDetails(namespace, "") + if err != nil { + select { + case errChan <- err: + default: + } + } else { + *rValue = *istioDetails + } + } +} + +func (in *IstioValidationsService) fetchNonLocalmTLSConfigs(mtlsDetails *kubernetes.MTLSDetails, errChan chan error, wg *sync.WaitGroup) { + defer wg.Done() + if len(errChan) > 0 { + return + } + + namespaces, err := in.businessLayer.Namespace.GetNamespaces() + if err != nil { + errChan <- err + return + } + + nsNames := make([]string, 0, len(namespaces)) + for _, ns := range namespaces { + nsNames = append(nsNames, ns.Name) + } + + destinationRules, err := in.businessLayer.IstioConfig.getAllDestinationRules(nsNames) + if err != nil { + errChan <- err + } else { + mtlsDetails.DestinationRules = destinationRules + } +} diff --git a/vendor/github.com/kiali/kiali/business/jaeger_helper.go b/vendor/github.com/kiali/kiali/business/jaeger_helper.go new file mode 100644 index 000000000..1aa2b87d3 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/jaeger_helper.go @@ -0,0 +1,119 @@ +package business + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/kiali/kiali/log" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/kiali/kiali/config" +) + +type Trace struct { + Id string `json:"traceID"` +} + +type RequestTrace struct { + Traces []Trace `json:"data"` +} + +type JaegerServices struct { + Services []string `json:"data"` +} + +var ( + JaegerAvailable = true +) + +func getErrorTracesFromJaeger(namespace string, service string) (errorTraces int, err error) { + errorTraces = 0 + err = nil + if !JaegerAvailable { + return -1, errors.New("Jaeger is not available") + } + if config.Get().ExternalServices.Jaeger.Service != "" { + u, errParse := url.Parse(fmt.Sprintf("http://%s/api/traces", config.Get().ExternalServices.Jaeger.Service)) + if errParse != nil { + log.Errorf("Error parse Jaeger URL fetching Error Traces: %s", err) + err = errParse + } else { + q := u.Query() + q.Set("lookback", "1h") + q.Set("service", fmt.Sprintf("%s.%s", service, namespace)) + t := time.Now().UnixNano() / 1000 + q.Set("start", fmt.Sprintf("%d", t-60*60*1000*1000)) + q.Set("end", fmt.Sprintf("%d", t)) + q.Set("tags", "{\"error\":\"true\"}") + u.RawQuery = q.Encode() + timeout := time.Duration(1000 * time.Millisecond) + client := http.Client{ + Timeout: timeout, + } + resp, reqError := client.Get(u.String()) + if reqError != nil { + err = reqError + } else { + defer resp.Body.Close() + body, errRead := ioutil.ReadAll(resp.Body) + if errRead != nil { + log.Errorf("Error Reading Jaeger Response fetching Error Traces: %s", errRead) + err = errRead + return -1, err + } + var traces RequestTrace + if errMarshal := json.Unmarshal([]byte(body), &traces); errMarshal != nil { + log.Errorf("Error Unmarshal Jaeger Response fetching Error Traces: %s", errRead) + err = errMarshal + return -1, err + } + errorTraces = len(traces.Traces) + } + } + } + return errorTraces, err +} + +func GetServices() (services JaegerServices, err error) { + services = JaegerServices{Services: []string{}} + err = nil + u, err := url.Parse(fmt.Sprintf("http://%s/api/services", config.Get().ExternalServices.Jaeger.Service)) + if err != nil { + log.Errorf("Error parse Jaeger URL fetching Services: %s", err) + return services, err + } + timeout := time.Duration(1000 * time.Millisecond) + client := http.Client{ + Timeout: timeout, + } + resp, reqError := client.Get(u.String()) + if reqError != nil { + err = reqError + } else { + defer resp.Body.Close() + body, errRead := ioutil.ReadAll(resp.Body) + if errRead != nil { + log.Errorf("Error Reading Jaeger Response fetching Services: %s", errRead) + err = errRead + return services, err + } + if errMarshal := json.Unmarshal([]byte(body), &services); errMarshal != nil { + log.Errorf("Error Unmarshal Jaeger Response fetching Services: %s", errRead) + err = errMarshal + return services, err + } + } + return services, err +} + +func contains(a []string, x string) bool { + for _, n := range a { + if x == n { + return true + } + } + return false +} diff --git a/vendor/github.com/kiali/kiali/business/layer.go b/vendor/github.com/kiali/kiali/business/layer.go new file mode 100644 index 000000000..a81ff909e --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/layer.go @@ -0,0 +1,66 @@ +package business + +import ( + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/prometheus" +) + +// Layer is a container for fast access to inner services +type Layer struct { + Svc SvcService + Health HealthService + Validations IstioValidationsService + IstioConfig IstioConfigService + Workload WorkloadService + App AppService + Namespace NamespaceService + k8s kubernetes.IstioClientInterface +} + +// Global business.Layer; currently only used for tests to inject mocks, +// whereas production code recreates services in a stateless way +var layer *Layer + +// Get the business.Layer, create it if necessary +func Get() (*Layer, error) { + if layer != nil { + return layer, nil + } + k8s, err := kubernetes.NewClient() + if err != nil { + return nil, err + } + prom, err := prometheus.NewClient() + if err != nil { + return nil, err + } + // Business needs to maintain a minimal state as kubernetes package will maintain a cache + SetWithBackends(k8s, prom) + return layer, nil +} + +// SetWithBackends creates all services with injected clients to external APIs +func SetWithBackends(k8s kubernetes.IstioClientInterface, prom prometheus.ClientInterface) *Layer { + layer = NewWithBackends(k8s, prom) + return layer +} + +// NewWithBackends creates the business layer using the passed k8s and prom clients +func NewWithBackends(k8s kubernetes.IstioClientInterface, prom prometheus.ClientInterface) *Layer { + temporaryLayer := &Layer{} + temporaryLayer.Health = HealthService{prom: prom, k8s: k8s} + temporaryLayer.Svc = SvcService{prom: prom, k8s: k8s, businessLayer: temporaryLayer} + temporaryLayer.IstioConfig = IstioConfigService{k8s: k8s} + temporaryLayer.Workload = WorkloadService{k8s: k8s, prom: prom, businessLayer: temporaryLayer} + temporaryLayer.Validations = IstioValidationsService{k8s: k8s, businessLayer: temporaryLayer} + temporaryLayer.App = AppService{prom: prom, k8s: k8s} + temporaryLayer.Namespace = NewNamespaceService(k8s) + temporaryLayer.k8s = k8s + return temporaryLayer +} + +func (in *Layer) Stop() { + if in.k8s != nil { + in.k8s.Stop() + } +} diff --git a/vendor/github.com/kiali/kiali/business/namespaces.go b/vendor/github.com/kiali/kiali/business/namespaces.go new file mode 100644 index 000000000..345114683 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/namespaces.go @@ -0,0 +1,101 @@ +package business + +import ( + "regexp" + + osv1 "github.com/openshift/api/project/v1" + kv1 "k8s.io/api/core/v1" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus/internalmetrics" +) + +// Namespace deals with fetching k8s namespaces / OpenShift projects and convert to kiali model +type NamespaceService struct { + k8s kubernetes.IstioClientInterface + hasProjects bool +} + +func NewNamespaceService(k8s kubernetes.IstioClientInterface) NamespaceService { + + var hasProjects bool + + if k8s != nil && k8s.IsOpenShift() { + hasProjects = true + } else { + hasProjects = false + } + + return NamespaceService{ + k8s: k8s, + hasProjects: hasProjects, + } +} + +// Returns a list of the given namespaces / projects +func (in *NamespaceService) GetNamespaces() ([]models.Namespace, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "NamespaceService", "GetNamespaces") + defer promtimer.ObserveNow(&err) + + namespaces := []models.Namespace{} + // If we are running in OpenShift, we will use the project names since these are the list of accessible namespaces + if in.hasProjects { + projects, err2 := in.k8s.GetProjects() + if err2 == nil { + // Everything is good, return the projects we got from OpenShift / kube-project + namespaces = models.CastProjectCollection(projects) + } + } else { + nss, err := in.k8s.GetNamespaces() + if err != nil { + return nil, err + } + + namespaces = models.CastNamespaceCollection(nss) + } + + result := namespaces + excludes := config.Get().API.Namespaces.Exclude + if len(excludes) > 0 { + result = []models.Namespace{} + NAMESPACES: + for _, namespace := range namespaces { + for _, excludePattern := range excludes { + if match, _ := regexp.MatchString(excludePattern, namespace.Name); match { + continue NAMESPACES + } + } + result = append(result, namespace) + } + } + + return result, nil +} + +// GetNamespace returns the definition of the specified namespace. +func (in *NamespaceService) GetNamespace(namespace string) (*models.Namespace, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "NamespaceService", "GetNamespace") + defer promtimer.ObserveNow(&err) + + if in.hasProjects { + var project *osv1.Project + project, err = in.k8s.GetProject(namespace) + if err != nil { + return nil, err + } + result := models.CastProject(*project) + return &result, nil + } else { + var ns *kv1.Namespace + ns, err = in.k8s.GetNamespace(namespace) + if err != nil { + return nil, err + } + result := models.CastNamespace(*ns) + return &result, nil + } +} diff --git a/vendor/github.com/kiali/kiali/business/services.go b/vendor/github.com/kiali/kiali/business/services.go new file mode 100644 index 000000000..97e921b6c --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/services.go @@ -0,0 +1,267 @@ +package business + +import ( + "sync" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus" + "github.com/kiali/kiali/prometheus/internalmetrics" +) + +// SvcService deals with fetching istio/kubernetes services related content and convert to kiali model +type SvcService struct { + prom prometheus.ClientInterface + k8s kubernetes.IstioClientInterface + businessLayer *Layer +} + +// GetServiceList returns a list of all services for a given Namespace +func (in *SvcService) GetServiceList(namespace string) (*models.ServiceList, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "SvcService", "GetServiceList") + defer promtimer.ObserveNow(&err) + + var svcs []v1.Service + var pods []v1.Pod + + wg := sync.WaitGroup{} + wg.Add(2) + errChan := make(chan error, 2) + + go func() { + defer wg.Done() + var err2 error + svcs, err2 = in.k8s.GetServices(namespace, nil) + if err2 != nil { + log.Errorf("Error fetching Services per namespace %s: %s", namespace, err2) + errChan <- err2 + } + }() + + go func() { + defer wg.Done() + var err2 error + pods, err2 = in.k8s.GetPods(namespace, "") + if err2 != nil { + log.Errorf("Error fetching Pods per namespace %s: %s", namespace, err2) + errChan <- err2 + } + }() + + wg.Wait() + if len(errChan) != 0 { + err = <-errChan + return nil, err + } + + // Convert to Kiali model + return in.buildServiceList(models.Namespace{Name: namespace}, svcs, pods), nil +} + +func (in *SvcService) buildServiceList(namespace models.Namespace, svcs []v1.Service, pods []v1.Pod) *models.ServiceList { + services := make([]models.ServiceOverview, len(svcs)) + conf := config.Get() + // Convert each k8s service into our model + for i, item := range svcs { + sPods := kubernetes.FilterPodsForService(&item, pods) + /** Check if Service has istioSidecar deployed */ + mPods := models.Pods{} + mPods.Parse(sPods) + hasSideCar := mPods.HasIstioSideCar() + /** Check if Service has the label app required by Istio */ + _, appLabel := item.Spec.Selector[conf.IstioLabels.AppLabelName] + services[i] = models.ServiceOverview{ + Name: item.Name, + IstioSidecar: hasSideCar, + AppLabel: appLabel, + } + } + + return &models.ServiceList{Namespace: namespace, Services: services} +} + +// GetService returns a single service and associated data using the interval and queryTime +func (in *SvcService) GetService(namespace, service, interval string, queryTime time.Time) (*models.ServiceDetails, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "SvcService", "GetService") + defer promtimer.ObserveNow(&err) + + svc, eps, err := in.getServiceDefinition(namespace, service) + if err != nil { + return nil, err + } + + var pods []v1.Pod + var hth models.ServiceHealth + var vs, dr []kubernetes.IstioObject + var sWk map[string][]prometheus.Workload + var ws models.Workloads + + wg := sync.WaitGroup{} + wg.Add(9) + errChan := make(chan error, 8) + + labelsSelector := labels.Set(svc.Spec.Selector).String() + + go func() { + defer wg.Done() + var err2 error + pods, err2 = in.k8s.GetPods(namespace, labelsSelector) + if err2 != nil { + errChan <- err2 + } + }() + + go func() { + defer wg.Done() + var err2 error + hth, err2 = in.businessLayer.Health.GetServiceHealth(namespace, service, interval, queryTime) + if err2 != nil { + errChan <- err2 + } + }() + + go func() { + defer wg.Done() + var err2 error + vs, err2 = in.k8s.GetVirtualServices(namespace, service) + if err2 != nil { + errChan <- err2 + } + }() + + go func() { + defer wg.Done() + var err2 error + dr, err2 = in.k8s.GetDestinationRules(namespace, service) + if err2 != nil { + errChan <- err2 + } + }() + + go func() { + defer wg.Done() + var err2 error + ns, err2 := in.businessLayer.Namespace.GetNamespace(namespace) + if err2 != nil { + log.Errorf("Error fetching details of namespace %s: %s", namespace, err2) + errChan <- err2 + } + + sWk, err2 = in.prom.GetSourceWorkloads(ns.Name, ns.CreationTimestamp, service) + if err2 != nil { + log.Errorf("Error fetching SourceWorkloads per namespace %s and service %s: %s", namespace, service, err2) + errChan <- err2 + } + }() + + go func() { + defer wg.Done() + var err2 error + ws, err2 = fetchWorkloads(in.k8s, namespace, labelsSelector) + if err2 != nil { + log.Errorf("Error fetching Workloads per namespace %s and service %s: %s", namespace, service, err2) + errChan <- err2 + } + }() + + var vsCreate, vsUpdate, vsDelete bool + go func() { + defer wg.Done() + vsCreate, vsUpdate, vsDelete = getPermissions(in.k8s, namespace, VirtualServices, "") + }() + + var drCreate, drUpdate, drDelete bool + go func() { + defer wg.Done() + drCreate, drUpdate, drDelete = getPermissions(in.k8s, namespace, DestinationRules, "") + }() + + var eTraces int + go func() { + // Maybe a future jaeger business layer + defer wg.Done() + eTraces, err = getErrorTracesFromJaeger(namespace, service) + }() + + wg.Wait() + if len(errChan) != 0 { + err = <-errChan + return nil, err + } + + wo := models.WorkloadOverviews{} + for _, w := range ws { + wi := &models.WorkloadListItem{} + wi.ParseWorkload(w) + wo = append(wo, wi) + } + + s := models.ServiceDetails{Workloads: wo, Health: hth} + s.SetService(svc) + s.SetPods(kubernetes.FilterPodsForEndpoints(eps, pods)) + s.SetEndpoints(eps) + s.SetVirtualServices(vs, vsCreate, vsUpdate, vsDelete) + s.SetDestinationRules(dr, drCreate, drUpdate, drDelete) + s.SetSourceWorkloads(sWk) + s.SetErrorTraces(eTraces) + return &s, nil +} + +// GetServiceDefinition returns a single service definition (the service object and endpoints), no istio or runtime information +func (in *SvcService) GetServiceDefinition(namespace, service string) (*models.ServiceDetails, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "SvcService", "GetServiceDefinition") + defer promtimer.ObserveNow(&err) + + svc, eps, err := in.getServiceDefinition(namespace, service) + if err != nil { + return nil, err + } + + s := models.ServiceDetails{} + s.SetService(svc) + s.SetEndpoints(eps) + return &s, nil +} + +func (in *SvcService) getServiceDefinition(namespace, service string) (svc *v1.Service, eps *v1.Endpoints, err error) { + wg := sync.WaitGroup{} + wg.Add(2) + errChan := make(chan error, 2) + + go func() { + defer wg.Done() + var err2 error + svc, err2 = in.k8s.GetService(namespace, service) + if err2 != nil { + log.Errorf("Error fetching Service per namespace %s and service %s: %s", namespace, service, err2) + errChan <- err2 + } + }() + + go func() { + defer wg.Done() + var err2 error + eps, err2 = in.k8s.GetEndpoints(namespace, service) + if err2 != nil { + log.Errorf("Error fetching Endpoints per namespace %s and service %s: %s", namespace, service, err2) + errChan <- err2 + } + }() + + wg.Wait() + if len(errChan) != 0 { + err = <-errChan + return nil, nil, err + } + + return svc, eps, nil +} diff --git a/vendor/github.com/kiali/kiali/business/test_util.go b/vendor/github.com/kiali/kiali/business/test_util.go new file mode 100644 index 000000000..e51c53393 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/test_util.go @@ -0,0 +1,712 @@ +package business + +import ( + "time" + + osappsv1 "github.com/openshift/api/apps/v1" + "k8s.io/api/apps/v1beta1" + "k8s.io/api/apps/v1beta2" + "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes/kubetest" +) + +// Consolidate fake/mock data used in tests per package + +func FakeDeployments() []v1beta1.Deployment { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + return []v1beta1.Deployment{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "Deployment", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v1", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin"}, + }, + }, + }, + Status: v1beta1.DeploymentStatus{ + Replicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "Deployment", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v2", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin", versionLabel: "v2"}, + }, + }, + }, + Status: v1beta1.DeploymentStatus{ + Replicas: 2, + AvailableReplicas: 1, + UnavailableReplicas: 1, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "Deployment", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v3", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + Status: v1beta1.DeploymentStatus{ + Replicas: 2, + AvailableReplicas: 0, + UnavailableReplicas: 2, + }, + }, + } +} + +func FakeDuplicatedDeployments() []v1beta1.Deployment { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + return []v1beta1.Deployment{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "Deployment", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "duplicated-v1", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "duplicated", versionLabel: "v1"}, + }, + }, + }, + Status: v1beta1.DeploymentStatus{ + Replicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + }, + }, + } +} + +func FakeReplicaSets() []v1beta2.ReplicaSet { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + return []v1beta2.ReplicaSet{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "ReplicaSet", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v1", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta2.ReplicaSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin"}, + }, + }, + }, + Status: v1beta2.ReplicaSetStatus{ + Replicas: 1, + AvailableReplicas: 1, + ReadyReplicas: 1, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "ReplicaSet", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v2", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta2.ReplicaSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin", versionLabel: "v2"}, + }, + }, + }, + Status: v1beta2.ReplicaSetStatus{ + Replicas: 2, + AvailableReplicas: 1, + ReadyReplicas: 1, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "ReplicaSet", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v3", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta2.ReplicaSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + Status: v1beta2.ReplicaSetStatus{ + Replicas: 2, + AvailableReplicas: 0, + ReadyReplicas: 2, + }, + }, + } +} + +func FakeDuplicatedReplicaSets() []v1beta2.ReplicaSet { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + controller := true + return []v1beta2.ReplicaSet{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "ReplicaSet", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "duplicated-v1-12345", + CreationTimestamp: meta_v1.NewTime(t1), + OwnerReferences: []meta_v1.OwnerReference{meta_v1.OwnerReference{ + Controller: &controller, + Kind: "Deployment", + Name: "duplicated-v1", + }}, + }, + Spec: v1beta2.ReplicaSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "duplicated", versionLabel: "v1"}, + }, + }, + }, + Status: v1beta2.ReplicaSetStatus{ + Replicas: 1, + AvailableReplicas: 1, + ReadyReplicas: 1, + }, + }, + } +} + +func FakeReplicationControllers() []v1.ReplicationController { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + return []v1.ReplicationController{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "ReplicationController", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v1", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1.ReplicationControllerSpec{ + Template: &v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin"}, + }, + }, + }, + Status: v1.ReplicationControllerStatus{ + Replicas: 1, + AvailableReplicas: 1, + ReadyReplicas: 1, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "ReplicationController", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v2", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1.ReplicationControllerSpec{ + Template: &v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin", versionLabel: "v2"}, + }, + }, + }, + Status: v1.ReplicationControllerStatus{ + Replicas: 2, + AvailableReplicas: 1, + ReadyReplicas: 1, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "ReplicationController", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v3", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1.ReplicationControllerSpec{ + Template: &v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + Status: v1.ReplicationControllerStatus{ + Replicas: 2, + AvailableReplicas: 0, + ReadyReplicas: 2, + }, + }, + } +} + +func FakeDeploymentConfigs() []osappsv1.DeploymentConfig { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + return []osappsv1.DeploymentConfig{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "DeploymentConfig", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v1", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: osappsv1.DeploymentConfigSpec{ + Template: &v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin"}, + }, + }, + }, + Status: osappsv1.DeploymentConfigStatus{ + Replicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "DeploymentConfig", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v2", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: osappsv1.DeploymentConfigSpec{ + Template: &v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin", versionLabel: "v2"}, + }, + }, + }, + Status: osappsv1.DeploymentConfigStatus{ + Replicas: 2, + AvailableReplicas: 1, + UnavailableReplicas: 1, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "DeploymentConfig", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v3", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: osappsv1.DeploymentConfigSpec{ + Template: &v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + Status: osappsv1.DeploymentConfigStatus{ + Replicas: 2, + AvailableReplicas: 0, + UnavailableReplicas: 2, + }, + }, + } +} + +func FakeStatefulSets() []v1beta2.StatefulSet { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + return []v1beta2.StatefulSet{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "StatefulSet", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v1", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta2.StatefulSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin"}, + }, + }, + }, + Status: v1beta2.StatefulSetStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "StatefulSet", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v2", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta2.StatefulSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "httpbin", versionLabel: "v2"}, + }, + }, + }, + Status: v1beta2.StatefulSetStatus{ + Replicas: 2, + ReadyReplicas: 1, + }, + }, + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "StatefulSet", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v3", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta2.StatefulSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + Status: v1beta2.StatefulSetStatus{ + Replicas: 2, + ReadyReplicas: 2, + }, + }, + } +} + +func FakeDuplicatedStatefulSets() []v1beta2.StatefulSet { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + return []v1beta2.StatefulSet{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "StatefulSet", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "duplicated-v1", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta2.StatefulSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "duplicated", versionLabel: "v1"}, + }, + }, + }, + Status: v1beta2.StatefulSetStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + } +} + +func FakeDepSyncedWithRS() []v1beta1.Deployment { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + return []v1beta1.Deployment{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "Deployment", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "details-v1", + CreationTimestamp: meta_v1.NewTime(t1), + }, + Spec: v1beta1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "details", versionLabel: "v1"}, + }, + }, + }, + Status: v1beta1.DeploymentStatus{ + Replicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + }, + }, + } +} + +func FakeRSSyncedWithPods() []v1beta2.ReplicaSet { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + controller := true + return []v1beta2.ReplicaSet{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "ReplicaSet", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "details-v1-3618568057", + CreationTimestamp: meta_v1.NewTime(t1), + OwnerReferences: []meta_v1.OwnerReference{meta_v1.OwnerReference{ + Controller: &controller, + Kind: "Deployment", + Name: "details-v1", + }}, + }, + Spec: v1beta2.ReplicaSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: meta_v1.ObjectMeta{ + Labels: map[string]string{appLabel: "details", versionLabel: "v1"}, + }, + }, + }, + Status: v1beta2.ReplicaSetStatus{ + Replicas: 1, + AvailableReplicas: 1, + ReadyReplicas: 0, + }, + }, + } +} + +func FakePodsSyncedWithDeployments() []v1.Pod { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + controller := true + return []v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "details-v1-3618568057-dnkjp", + CreationTimestamp: meta_v1.NewTime(t1), + Labels: map[string]string{appLabel: "httpbin", versionLabel: "v1"}, + OwnerReferences: []meta_v1.OwnerReference{meta_v1.OwnerReference{ + Controller: &controller, + Kind: "ReplicaSet", + Name: "details-v1-3618568057", + }}, + Annotations: kubetest.FakeIstioAnnotations(), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{Name: "details", Image: "whatever"}, + v1.Container{Name: "istio-proxy", Image: "docker.io/istio/proxy:0.7.1"}, + }, + InitContainers: []v1.Container{ + v1.Container{Name: "istio-init", Image: "docker.io/istio/proxy_init:0.7.1"}, + v1.Container{Name: "enable-core-dump", Image: "alpine"}, + }, + }, + }, + } +} + +func FakePodsSyncedWithDuplicated() []v1.Pod { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + controller := true + return []v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "duplicated-v1-3618568057-1", + CreationTimestamp: meta_v1.NewTime(t1), + Labels: map[string]string{appLabel: "duplicated", versionLabel: "v1"}, + OwnerReferences: []meta_v1.OwnerReference{meta_v1.OwnerReference{ + Controller: &controller, + Kind: "StatefulSet", + Name: "duplicated-v1", + }}, + Annotations: kubetest.FakeIstioAnnotations(), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{Name: "details", Image: "whatever"}, + v1.Container{Name: "istio-proxy", Image: "docker.io/istio/proxy:0.7.1"}, + }, + InitContainers: []v1.Container{ + v1.Container{Name: "istio-init", Image: "docker.io/istio/proxy_init:0.7.1"}, + v1.Container{Name: "enable-core-dump", Image: "alpine"}, + }, + }, + }, + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "duplicated-v1-3618568057-3", + CreationTimestamp: meta_v1.NewTime(t1), + Labels: map[string]string{appLabel: "duplicated", versionLabel: "v1"}, + OwnerReferences: []meta_v1.OwnerReference{meta_v1.OwnerReference{ + Controller: &controller, + Kind: "StatefulSet", + Name: "duplicated-v1", + }}, + Annotations: kubetest.FakeIstioAnnotations(), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{Name: "details", Image: "whatever"}, + v1.Container{Name: "istio-proxy", Image: "docker.io/istio/proxy:0.7.1"}, + }, + InitContainers: []v1.Container{ + v1.Container{Name: "istio-init", Image: "docker.io/istio/proxy_init:0.7.1"}, + v1.Container{Name: "enable-core-dump", Image: "alpine"}, + }, + }, + }, + } +} + +func FakePodsNoController() []v1.Pod { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + + return []v1.Pod{ + { + TypeMeta: meta_v1.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: meta_v1.ObjectMeta{ + Name: "orphan-pod", + CreationTimestamp: meta_v1.NewTime(t1), + Labels: map[string]string{appLabel: "httpbin", versionLabel: "v1"}, + Annotations: kubetest.FakeIstioAnnotations(), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{Name: "details", Image: "whatever"}, + v1.Container{Name: "istio-proxy", Image: "docker.io/istio/proxy:0.7.1"}, + }, + InitContainers: []v1.Container{ + v1.Container{Name: "istio-init", Image: "docker.io/istio/proxy_init:0.7.1"}, + v1.Container{Name: "enable-core-dump", Image: "alpine"}, + }, + }, + }, + } +} + +func FakePodsFromDaemonSet() []v1.Pod { + conf := config.NewConfig() + config.Set(conf) + appLabel := conf.IstioLabels.AppLabelName + versionLabel := conf.IstioLabels.VersionLabelName + t1, _ := time.Parse(time.RFC822Z, "08 Mar 18 17:44 +0300") + controller := true + return []v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "daemon-pod", + CreationTimestamp: meta_v1.NewTime(t1), + Labels: map[string]string{appLabel: "httpbin", versionLabel: "v1"}, + OwnerReferences: []meta_v1.OwnerReference{meta_v1.OwnerReference{ + Controller: &controller, + Kind: "DaemonSet", + Name: "daemon-controller", + }}, + Annotations: kubetest.FakeIstioAnnotations(), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{Name: "details", Image: "whatever"}, + v1.Container{Name: "istio-proxy", Image: "docker.io/istio/proxy:0.7.1"}, + }, + InitContainers: []v1.Container{ + v1.Container{Name: "istio-init", Image: "docker.io/istio/proxy_init:0.7.1"}, + v1.Container{Name: "enable-core-dump", Image: "alpine"}, + }, + }, + }, + } +} + +func FakeServices() []v1.Service { + return []v1.Service{ + { + ObjectMeta: meta_v1.ObjectMeta{Name: "httpbin"}, + Spec: v1.ServiceSpec{ + Selector: map[string]string{"app": "httpbin"}, + }, + }, + } +} diff --git a/vendor/github.com/kiali/kiali/business/workloads.go b/vendor/github.com/kiali/kiali/business/workloads.go new file mode 100644 index 000000000..862605ec7 --- /dev/null +++ b/vendor/github.com/kiali/kiali/business/workloads.go @@ -0,0 +1,1028 @@ +package business + +import ( + "sort" + "sync" + "time" + + osappsv1 "github.com/openshift/api/apps/v1" + "k8s.io/api/apps/v1beta1" + "k8s.io/api/apps/v1beta2" + batch_v1 "k8s.io/api/batch/v1" + batch_v1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus" + "github.com/kiali/kiali/prometheus/internalmetrics" +) + +// Workload deals with fetching istio/kubernetes workloads related content and convert to kiali model +type WorkloadService struct { + prom prometheus.ClientInterface + k8s kubernetes.IstioClientInterface + businessLayer *Layer +} + +// GetWorkloadList is the API handler to fetch the list of workloads in a given namespace. +func (in *WorkloadService) GetWorkloadList(namespace string) (models.WorkloadList, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "WorkloadService", "GetWorkloadList") + defer promtimer.ObserveNow(&err) + + workloadList := &models.WorkloadList{ + Namespace: models.Namespace{Name: namespace, CreationTimestamp: time.Time{}}, + Workloads: []models.WorkloadListItem{}, + } + ws, err := fetchWorkloads(in.k8s, namespace, "") + if err != nil { + return *workloadList, err + } + + for _, w := range ws { + wItem := &models.WorkloadListItem{} + wItem.ParseWorkload(w) + workloadList.Workloads = append(workloadList.Workloads, *wItem) + } + + return *workloadList, nil +} + +// GetWorkload is the API handler to fetch details of a specific workload. +// If includeServices is set true, the Workload will fetch all services related +func (in *WorkloadService) GetWorkload(namespace string, workloadName string, includeServices bool) (*models.Workload, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "WorkloadService", "GetWorkload") + defer promtimer.ObserveNow(&err) + + workload, err := fetchWorkload(in.k8s, namespace, workloadName) + if err != nil { + return nil, err + } + + if includeServices { + wg := sync.WaitGroup{} + wg.Add(1) + errChan := make(chan error, 1) + var destService []prometheus.Service + + go func() { + defer wg.Done() + var err2 error + ns, err2 := in.businessLayer.Namespace.GetNamespace(namespace) + if err2 != nil { + log.Errorf("Error fetching details of namespace %s: %s", namespace, err2) + errChan <- err2 + return + } + + destService, err2 = in.prom.GetDestinationServices(ns.Name, ns.CreationTimestamp, workloadName) + if err2 != nil { + log.Errorf("Error fetching SourceWorkloads per namespace %s and service %s: %s", namespace, workloadName, err2) + errChan <- err2 + } + }() + + services, err := in.k8s.GetServices(namespace, workload.Labels) + if err != nil { + return nil, err + } + workload.SetServices(services) + + wg.Wait() + if len(errChan) != 0 { + err = <-errChan + return nil, err + } + + workload.SetDestinationServices(destService) + } + + in.fillCustomDashboardRefs(namespace, workload) + + return workload, nil +} + +func (in *WorkloadService) GetPods(namespace string, labelSelector string) (models.Pods, error) { + var err error + promtimer := internalmetrics.GetGoFunctionMetric("business", "WorkloadService", "GetPods") + defer promtimer.ObserveNow(&err) + + ps, err := in.k8s.GetPods(namespace, labelSelector) + if err != nil { + return nil, err + } + pods := models.Pods{} + pods.Parse(ps) + return pods, nil +} + +func fetchWorkloads(k8s kubernetes.IstioClientInterface, namespace string, labelSelector string) (models.Workloads, error) { + var pods []v1.Pod + var repcon []v1.ReplicationController + var dep []v1beta1.Deployment + var repset []v1beta2.ReplicaSet + var depcon []osappsv1.DeploymentConfig + var fulset []v1beta2.StatefulSet + var jbs []batch_v1.Job + var conjbs []batch_v1beta1.CronJob + + ws := models.Workloads{} + + wg := sync.WaitGroup{} + wg.Add(8) + errChan := make(chan error, 8) + + go func() { + defer wg.Done() + var err error + pods, err = k8s.GetPods(namespace, labelSelector) + if err != nil { + log.Errorf("Error fetching Pods per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + dep, err = k8s.GetDeployments(namespace) + if err != nil { + log.Errorf("Error fetching Deployments per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + repset, err = k8s.GetReplicaSets(namespace) + if err != nil { + log.Errorf("Error fetching ReplicaSets per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + repcon, err = k8s.GetReplicationControllers(namespace) + if err != nil { + log.Errorf("Error fetching GetReplicationControllers per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + if k8s.IsOpenShift() { + depcon, err = k8s.GetDeploymentConfigs(namespace) + if err != nil { + log.Errorf("Error fetching DeploymentConfigs per namespace %s: %s", namespace, err) + errChan <- err + } + } + }() + + go func() { + defer wg.Done() + var err error + fulset, err = k8s.GetStatefulSets(namespace) + if err != nil { + log.Errorf("Error fetching StatefulSets per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + conjbs, err = k8s.GetCronJobs(namespace) + if err != nil { + log.Errorf("Error fetching CronJobs per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + jbs, err = k8s.GetJobs(namespace) + if err != nil { + log.Errorf("Error fetching Jobs per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + wg.Wait() + if len(errChan) != 0 { + err := <-errChan + return ws, err + } + + // Key: name of controller; Value: type of controller + controllers := map[string]string{} + + // Find controllers from pods + for _, pod := range pods { + if len(pod.OwnerReferences) != 0 { + for _, ref := range pod.OwnerReferences { + if ref.Controller != nil && *ref.Controller { + if _, exist := controllers[ref.Name]; !exist { + controllers[ref.Name] = ref.Kind + } else { + if controllers[ref.Name] != ref.Kind { + controllers[ref.Name] = controllerPriority(controllers[ref.Name], ref.Kind) + } + } + } + } + } else { + if _, exist := controllers[pod.Name]; !exist { + // Pod without controller + controllers[pod.Name] = "Pod" + } + } + } + + // Resolve ReplicaSets from Deployments + // Resolve ReplicationControllers from DeploymentConfigs + // Resolve Jobs from CronJobs + for cname, ctype := range controllers { + if ctype == "ReplicaSet" { + found := false + iFound := -1 + for i, rs := range repset { + if rs.Name == cname { + iFound = i + found = true + break + } + } + if found && len(repset[iFound].OwnerReferences) > 0 { + for _, ref := range repset[iFound].OwnerReferences { + if ref.Controller != nil && *ref.Controller { + // Delete the child ReplicaSet and add the parent controller + if _, exist := controllers[ref.Name]; !exist { + controllers[ref.Name] = ref.Kind + } else { + if controllers[ref.Name] != ref.Kind { + controllers[ref.Name] = controllerPriority(controllers[ref.Name], ref.Kind) + } + } + delete(controllers, cname) + } + } + } + } + if ctype == "ReplicationController" { + found := false + iFound := -1 + for i, rc := range repcon { + if rc.Name == cname { + iFound = i + found = true + break + } + } + if found && len(repcon[iFound].OwnerReferences) > 0 { + for _, ref := range repcon[iFound].OwnerReferences { + if ref.Controller != nil && *ref.Controller { + // Delete the child ReplicationController and add the parent controller + if _, exist := controllers[ref.Name]; !exist { + controllers[ref.Name] = ref.Kind + } else { + if controllers[ref.Name] != ref.Kind { + controllers[ref.Name] = controllerPriority(controllers[ref.Name], ref.Kind) + } + } + delete(controllers, cname) + } + } + } + } + if ctype == "Job" { + found := false + iFound := -1 + for i, jb := range jbs { + if jb.Name == cname { + iFound = i + found = true + break + } + } + if found && len(jbs[iFound].OwnerReferences) > 0 { + for _, ref := range jbs[iFound].OwnerReferences { + if ref.Controller != nil && *ref.Controller { + // Delete the child Job and add the parent controller + if _, exist := controllers[ref.Name]; !exist { + controllers[ref.Name] = ref.Kind + } else { + if controllers[ref.Name] != ref.Kind { + controllers[ref.Name] = controllerPriority(controllers[ref.Name], ref.Kind) + } + } + // Jobs are special as deleting CronJob parent doesn't delete children + // So we need to check that parent exists before to delete children controller + cnExist := false + for _, cnj := range conjbs { + if cnj.Name == ref.Name { + cnExist = true + break + } + } + if cnExist { + delete(controllers, cname) + } + } + } + } + } + } + + // Cornercase, check for controllers without pods, to show them as a workload + var selector labels.Selector + var selErr error + if labelSelector != "" { + selector, selErr = labels.Parse(labelSelector) + if selErr != nil { + log.Errorf("%s can not be processed as selector: %v", labelSelector, selErr) + } + } + for _, d := range dep { + selectorCheck := true + if selector != nil { + selectorCheck = selector.Matches(labels.Set(d.Spec.Template.Labels)) + } + if _, exist := controllers[d.Name]; !exist && selectorCheck { + controllers[d.Name] = "Deployment" + } + } + for _, rs := range repset { + selectorCheck := true + if selector != nil { + selectorCheck = selector.Matches(labels.Set(rs.Spec.Template.Labels)) + } + if _, exist := controllers[rs.Name]; !exist && len(rs.OwnerReferences) == 0 && selectorCheck { + controllers[rs.Name] = "ReplicaSet" + } + } + for _, dc := range depcon { + selectorCheck := true + if selector != nil { + selectorCheck = selector.Matches(labels.Set(dc.Spec.Template.Labels)) + } + if _, exist := controllers[dc.Name]; !exist && selectorCheck { + controllers[dc.Name] = "DeploymentConfig" + } + } + for _, rc := range repcon { + selectorCheck := true + if selector != nil { + selectorCheck = selector.Matches(labels.Set(rc.Spec.Template.Labels)) + } + if _, exist := controllers[rc.Name]; !exist && len(rc.OwnerReferences) == 0 && selectorCheck { + controllers[rc.Name] = "ReplicationController" + } + } + for _, fs := range fulset { + selectorCheck := true + if selector != nil { + selectorCheck = selector.Matches(labels.Set(fs.Spec.Template.Labels)) + } + if _, exist := controllers[fs.Name]; !exist && selectorCheck { + controllers[fs.Name] = "StatefulSet" + } + } + + // Build workloads from controllers + var cnames []string + for k := range controllers { + cnames = append(cnames, k) + } + sort.Strings(cnames) + for _, cname := range cnames { + w := &models.Workload{ + Pods: models.Pods{}, + Services: models.Services{}, + } + ctype := controllers[cname] + // Flag to add a controller if it is found + cnFound := true + switch ctype { + case "Deployment": + found := false + iFound := -1 + for i, dp := range dep { + if dp.Name == cname { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(dep[iFound].Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseDeployment(&dep[iFound]) + } else { + log.Errorf("Workload %s is not found as Deployment", cname) + cnFound = false + } + case "ReplicaSet": + found := false + iFound := -1 + for i, rs := range repset { + if rs.Name == cname { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(repset[iFound].Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseReplicaSet(&repset[iFound]) + } else { + log.Errorf("Workload %s is not found as ReplicaSet", cname) + cnFound = false + } + case "ReplicationController": + found := false + iFound := -1 + for i, rc := range repcon { + if rc.Name == cname { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(repcon[iFound].Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseReplicationController(&repcon[iFound]) + } else { + log.Errorf("Workload %s is not found as ReplicationController", cname) + cnFound = false + } + case "DeploymentConfig": + found := false + iFound := -1 + for i, dc := range depcon { + if dc.Name == cname { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(depcon[iFound].Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseDeploymentConfig(&depcon[iFound]) + } else { + log.Errorf("Workload %s is not found as DeploymentConfig", cname) + cnFound = false + } + case "StatefulSet": + found := false + iFound := -1 + for i, fs := range fulset { + if fs.Name == cname { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(fulset[iFound].Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseStatefulSet(&fulset[iFound]) + } else { + log.Errorf("Workload %s is not found as StatefulSet", cname) + cnFound = false + } + case "Pod": + found := false + iFound := -1 + for i, pod := range pods { + if pod.Name == cname { + found = true + iFound = i + break + } + } + if found { + w.SetPods([]v1.Pod{pods[iFound]}) + w.ParsePod(&pods[iFound]) + } else { + log.Errorf("Workload %s is not found as Pod", cname) + cnFound = false + } + case "Job": + found := false + iFound := -1 + for i, jb := range jbs { + if jb.Name == cname { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(jbs[iFound].Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseJob(&jbs[iFound]) + } else { + log.Errorf("Workload %s is not found as Job", cname) + cnFound = false + } + case "CronJob": + found := false + iFound := -1 + for i, cjb := range conjbs { + if cjb.Name == cname { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(conjbs[iFound].Spec.JobTemplate.Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseCronJob(&conjbs[iFound]) + } else { + log.Warningf("Workload %s is not found as CronJob (CronJob could be deleted but children are still in the namespace)", cname) + cnFound = false + } + default: + cPods := kubernetes.FilterPodsForController(cname, ctype, pods) + w.SetPods(cPods) + w.ParsePods(cname, ctype, cPods) + } + if cnFound { + ws = append(ws, w) + } + } + return ws, nil +} + +func fetchWorkload(k8s kubernetes.IstioClientInterface, namespace string, workloadName string) (*models.Workload, error) { + var pods []v1.Pod + var repcon []v1.ReplicationController + var dep *v1beta1.Deployment + var repset []v1beta2.ReplicaSet + var depcon *osappsv1.DeploymentConfig + var fulset *v1beta2.StatefulSet + var jbs []batch_v1.Job + var conjbs []batch_v1beta1.CronJob + + wl := &models.Workload{ + Pods: models.Pods{}, + Services: models.Services{}, + } + + wg := sync.WaitGroup{} + wg.Add(8) + errChan := make(chan error, 8) + + go func() { + defer wg.Done() + var err error + pods, err = k8s.GetPods(namespace, "") + if err != nil { + log.Errorf("Error fetching Pods per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + dep, err = k8s.GetDeployment(namespace, workloadName) + if err != nil { + dep = nil + } + }() + + go func() { + defer wg.Done() + var err error + repset, err = k8s.GetReplicaSets(namespace) + if err != nil { + log.Errorf("Error fetching ReplicaSets per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + repcon, err = k8s.GetReplicationControllers(namespace) + if err != nil { + log.Errorf("Error fetching GetReplicationControllers per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + if k8s.IsOpenShift() { + depcon, err = k8s.GetDeploymentConfig(namespace, workloadName) + if err != nil { + depcon = nil + } + } + }() + + go func() { + defer wg.Done() + var err error + fulset, err = k8s.GetStatefulSet(namespace, workloadName) + if err != nil { + fulset = nil + } + }() + + go func() { + defer wg.Done() + var err error + conjbs, err = k8s.GetCronJobs(namespace) + if err != nil { + log.Errorf("Error fetching CronJobs per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + go func() { + defer wg.Done() + var err error + jbs, err = k8s.GetJobs(namespace) + if err != nil { + log.Errorf("Error fetching Jobs per namespace %s: %s", namespace, err) + errChan <- err + } + }() + + wg.Wait() + if len(errChan) != 0 { + err := <-errChan + return wl, err + } + + // Key: name of controller; Value: type of controller + controllers := map[string]string{} + + // Find controllers from pods + for _, pod := range pods { + if len(pod.OwnerReferences) != 0 { + for _, ref := range pod.OwnerReferences { + if ref.Controller != nil && *ref.Controller { + if _, exist := controllers[ref.Name]; !exist { + controllers[ref.Name] = ref.Kind + } else { + if controllers[ref.Name] != ref.Kind { + controllers[ref.Name] = controllerPriority(controllers[ref.Name], ref.Kind) + } + } + } + } + } else { + if _, exist := controllers[pod.Name]; !exist { + // Pod without controller + controllers[pod.Name] = "Pod" + } + } + } + + // Resolve ReplicaSets from Deployments + // Resolve ReplicationControllers from DeploymentConfigs + // Resolve Jobs from CronJobs + for cname, ctype := range controllers { + if ctype == "ReplicaSet" { + found := false + iFound := -1 + for i, rs := range repset { + if rs.Name == cname { + iFound = i + found = true + break + } + } + if found && len(repset[iFound].OwnerReferences) > 0 { + for _, ref := range repset[iFound].OwnerReferences { + if ref.Controller != nil && *ref.Controller { + // Delete the child ReplicaSet and add the parent controller + if _, exist := controllers[ref.Name]; !exist { + controllers[ref.Name] = ref.Kind + } else { + if controllers[ref.Name] != ref.Kind { + controllers[ref.Name] = controllerPriority(controllers[ref.Name], ref.Kind) + } + } + delete(controllers, cname) + } + } + } + } + if ctype == "ReplicationController" { + found := false + iFound := -1 + for i, rc := range repcon { + if rc.Name == cname { + iFound = i + found = true + break + } + } + if found && len(repcon[iFound].OwnerReferences) > 0 { + for _, ref := range repcon[iFound].OwnerReferences { + if ref.Controller != nil && *ref.Controller { + // Delete the child ReplicationController and add the parent controller + if _, exist := controllers[ref.Name]; !exist { + controllers[ref.Name] = ref.Kind + } else { + if controllers[ref.Name] != ref.Kind { + controllers[ref.Name] = controllerPriority(controllers[ref.Name], ref.Kind) + } + } + delete(controllers, cname) + } + } + } + } + if ctype == "Job" { + found := false + iFound := -1 + for i, jb := range jbs { + if jb.Name == cname { + iFound = i + found = true + break + } + } + if found && len(jbs[iFound].OwnerReferences) > 0 { + for _, ref := range jbs[iFound].OwnerReferences { + if ref.Controller != nil && *ref.Controller { + // Delete the child Job and add the parent controller + if _, exist := controllers[ref.Name]; !exist { + controllers[ref.Name] = ref.Kind + } else { + if controllers[ref.Name] != ref.Kind { + controllers[ref.Name] = controllerPriority(controllers[ref.Name], ref.Kind) + } + } + // Jobs are special as deleting CronJob parent doesn't delete children + // So we need to check that parent exists before to delete children controller + cnExist := false + for _, cnj := range conjbs { + if cnj.Name == ref.Name { + cnExist = true + break + } + } + if cnExist { + delete(controllers, cname) + } + } + } + } + } + } + + // Cornercase, check for controllers without pods, to show them as a workload + if dep != nil { + if _, exist := controllers[dep.Name]; !exist { + controllers[dep.Name] = "Deployment" + } + } + for _, rs := range repset { + if _, exist := controllers[rs.Name]; !exist && len(rs.OwnerReferences) == 0 { + controllers[rs.Name] = "ReplicaSet" + } + } + if depcon != nil { + if _, exist := controllers[depcon.Name]; !exist { + controllers[depcon.Name] = "DeploymentConfig" + } + } + for _, rc := range repcon { + if _, exist := controllers[rc.Name]; !exist && len(rc.OwnerReferences) == 0 { + controllers[rc.Name] = "ReplicationController" + } + } + if fulset != nil { + if _, exist := controllers[fulset.Name]; !exist { + controllers[fulset.Name] = "StatefulSet" + } + } + + // Build workload from controllers + + if _, exist := controllers[workloadName]; exist { + w := models.Workload{ + Pods: models.Pods{}, + Services: models.Services{}, + } + ctype := controllers[workloadName] + // Flag to add a controller if it is found + cnFound := true + switch ctype { + case "Deployment": + if dep.Name == workloadName { + selector := labels.Set(dep.Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseDeployment(dep) + } else { + log.Errorf("Workload %s is not found as Deployment", workloadName) + cnFound = false + } + case "ReplicaSet": + found := false + iFound := -1 + for i, rs := range repset { + if rs.Name == workloadName { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(repset[iFound].Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseReplicaSet(&repset[iFound]) + } else { + log.Errorf("Workload %s is not found as ReplicaSet", workloadName) + cnFound = false + } + case "ReplicationController": + found := false + iFound := -1 + for i, rc := range repcon { + if rc.Name == workloadName { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(repcon[iFound].Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseReplicationController(&repcon[iFound]) + } else { + log.Errorf("Workload %s is not found as ReplicationController", workloadName) + cnFound = false + } + case "DeploymentConfig": + if depcon.Name == workloadName { + selector := labels.Set(depcon.Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseDeploymentConfig(depcon) + } else { + log.Errorf("Workload %s is not found as DeploymentConfig", workloadName) + cnFound = false + } + case "StatefulSet": + if fulset.Name == workloadName { + selector := labels.Set(fulset.Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseStatefulSet(fulset) + } else { + log.Errorf("Workload %s is not found as StatefulSet", workloadName) + cnFound = false + } + case "Pod": + found := false + iFound := -1 + for i, pod := range pods { + if pod.Name == workloadName { + found = true + iFound = i + break + } + } + if found { + w.SetPods([]v1.Pod{pods[iFound]}) + w.ParsePod(&pods[iFound]) + } else { + log.Errorf("Workload %s is not found as Pod", workloadName) + cnFound = false + } + case "Job": + found := false + iFound := -1 + for i, jb := range jbs { + if jb.Name == workloadName { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(jbs[iFound].Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseJob(&jbs[iFound]) + } else { + log.Errorf("Workload %s is not found as Job", workloadName) + cnFound = false + } + case "CronJob": + found := false + iFound := -1 + for i, cjb := range conjbs { + if cjb.Name == workloadName { + found = true + iFound = i + break + } + } + if found { + selector := labels.Set(conjbs[iFound].Spec.JobTemplate.Spec.Template.Labels).AsSelector() + w.SetPods(kubernetes.FilterPodsForSelector(selector, pods)) + w.ParseCronJob(&conjbs[iFound]) + } else { + log.Warningf("Workload %s is not found as CronJob (CronJob could be deleted but children are still in the namespace)", workloadName) + cnFound = false + } + default: + cPods := kubernetes.FilterPodsForController(workloadName, ctype, pods) + w.SetPods(cPods) + w.ParsePods(workloadName, ctype, cPods) + } + if cnFound { + return &w, nil + } + } + return wl, kubernetes.NewNotFound(workloadName, "Kiali", "Workload") +} + +// KIALI-1730 +// This method is used to decide the priority of the controller in the cornercase when two controllers have same labels +// on the selector. Note that this is a situation that user should control as it is described in the documentation: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +// But Istio only identifies one controller as workload (it doesn't note which one). +// Kiali can select one on the list of workloads and other in the details and this should be consistent. +var controllerOrder = map[string]int{ + "Deployment": 6, + "DeploymentConfig": 5, + "ReplicaSet": 4, + "ReplicationController": 3, + "StatefulSet": 2, + "Job": 1, + "DaemonSet": 0, + "Pod": -1, +} + +func controllerPriority(type1, type2 string) string { + w1, e1 := controllerOrder[type1] + if !e1 { + log.Errorf("This controller %s is assigned in a Pod and it's not properly managed", type1) + } + w2, e2 := controllerOrder[type2] + if !e2 { + log.Errorf("This controller %s is assigned in a Pod and it's not properly managed", type2) + } + if w1 >= w2 { + return type1 + } else { + return type2 + } +} + +// fillCustomDashboardRefs finds all dashboard IDs and Titles associated to this workload and add them to the model +func (in *WorkloadService) fillCustomDashboardRefs(namespace string, workload *models.Workload) { + uniqueRefsList := getUniqueRuntimes(workload.Pods) + mon, err := kubernetes.NewKialiMonitoringClient() + if err != nil { + // Do not fail the whole query, just log & return + log.Error("Cannot initialize Kiali Monitoring Client") + return + } + dash := NewDashboardsService(mon, in.prom) + workload.Runtimes = dash.buildRuntimesList(namespace, uniqueRefsList) +} + +func getUniqueRuntimes(pods models.Pods) []string { + // Get uniqueness from plain list rather than map to preserve ordering; anyway, very low amount of objects is expected + uniqueRefs := []string{} + for _, pod := range pods { + for _, ref := range pod.RuntimesAnnotation { + if ref != "" { + exists := false + for _, existingRef := range uniqueRefs { + if ref == existingRef { + exists = true + break + } + } + if !exists { + uniqueRefs = append(uniqueRefs, ref) + } + } + } + } + return uniqueRefs +} diff --git a/vendor/github.com/kiali/kiali/config/authentication.go b/vendor/github.com/kiali/kiali/config/authentication.go new file mode 100644 index 000000000..39a7ddc79 --- /dev/null +++ b/vendor/github.com/kiali/kiali/config/authentication.go @@ -0,0 +1,63 @@ +package config + +import ( + "net/http" + "strings" + + "github.com/kiali/kiali/log" +) + +func AuthenticationHandler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + statusCode := http.StatusOK + errMsg := "" + conf := Get() + if strings.Contains(r.Header.Get("Authorization"), "Bearer") { + user, err := ValidateToken(strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ")) + // Internal header used to propagate the subject of the request for audit purposes + r.Header.Add("Kiali-User", user) + if err != nil { + log.Warning("Token error: ", err) + statusCode = http.StatusUnauthorized + } + } else if conf.Server.Credentials.Username != "" || conf.Server.Credentials.Password != "" { + u, p, ok := r.BasicAuth() + // Internal header used to propagate the subject of the request for audit purposes + r.Header.Add("Kiali-User", u) + if !ok || conf.Server.Credentials.Username != u || conf.Server.Credentials.Password != p { + statusCode = http.StatusUnauthorized + if conf.Server.Credentials.AllowAnonymous { + // we really should never get here - the credentials config should not have validated at startup + // if anonymous access is allowed with non-empty username/password. But just in case, log a message. + log.Debugf("Access to the server endpoint allows anonymous access - but user [%v] provided invalid credentials. Request denied.", u) + } + } + } else if conf.Server.Credentials.AllowAnonymous { + log.Trace("Access to the server endpoint is not secured with credentials - letting request come in") + } else { + statusCode = 520 // our specific error code that indicates to the client that we are missing the secret + errMsg = "Credentials are missing. Create a secret and restart Kiali. Please refer to the documentation for more details." + } + + if statusCode != http.StatusOK && errMsg == "" { + errMsg = http.StatusText(statusCode) + } + + switch statusCode { + case http.StatusOK: + next.ServeHTTP(w, r) + case http.StatusUnauthorized: + // If header exists return the value, must be 1 to use the API from Kiali + // Otherwise an empty string is returned and WWW-Authenticate will be Basic + if r.Header.Get("X-Auth-Type-Kiali-UI") == "1" { + w.Header().Set("WWW-Authenticate", "xBasic realm=\"Kiali\"") + } else { + w.Header().Set("WWW-Authenticate", "Basic realm=\"Kiali\"") + } + http.Error(w, errMsg, statusCode) + default: + http.Error(w, errMsg, statusCode) + log.Errorf("Cannot send response to unauthorized user: %v (%v)", statusCode, errMsg) + } + }) +} diff --git a/vendor/github.com/kiali/kiali/config/config.go b/vendor/github.com/kiali/kiali/config/config.go new file mode 100644 index 000000000..552c89769 --- /dev/null +++ b/vendor/github.com/kiali/kiali/config/config.go @@ -0,0 +1,392 @@ +package config + +import ( + "fmt" + "io/ioutil" + "os" + "regexp" + "strconv" + "strings" + "time" + + yaml "gopkg.in/yaml.v2" + + "github.com/kiali/kiali/config/security" + "github.com/kiali/kiali/log" +) + +// Environment vars can define some default values. +// NOTE: If you add a new variable, don't forget to update README.adoc +const ( + EnvIdentityCertFile = "IDENTITY_CERT_FILE" + EnvIdentityPrivateKeyFile = "IDENTITY_PRIVATE_KEY_FILE" + + EnvPrometheusServiceURL = "PROMETHEUS_SERVICE_URL" + EnvPrometheusCustomMetricsURL = "PROMETHEUS_CUSTOM_METRICS_URL" + EnvInCluster = "IN_CLUSTER" + EnvIstioIdentityDomain = "ISTIO_IDENTITY_DOMAIN" + EnvIstioSidecarAnnotation = "ISTIO_SIDECAR_ANNOTATION" + EnvIstioUrlServiceVersion = "ISTIO_URL_SERVICE_VERSION" + EnvApiNamespacesExclude = "API_NAMESPACES_EXCLUDE" + + EnvServerAddress = "SERVER_ADDRESS" + EnvServerPort = "SERVER_PORT" + EnvServerCredentialsUsername = "SERVER_CREDENTIALS_USERNAME" + EnvServerCredentialsPassword = "SERVER_CREDENTIALS_PASSWORD" + EnvServerAllowAnonymousAccess = "SERVER_ALLOW_ANONYMOUS_ACCESS" + EnvWebRoot = "SERVER_WEB_ROOT" + EnvServerStaticContentRootDirectory = "SERVER_STATIC_CONTENT_ROOT_DIRECTORY" + EnvServerCORSAllowAll = "SERVER_CORS_ALLOW_ALL" + EnvServerAuditLog = "SERVER_AUDIT_LOG" + + EnvGrafanaDisplayLink = "GRAFANA_DISPLAY_LINK" + EnvGrafanaURL = "GRAFANA_URL" + EnvGrafanaServiceNamespace = "GRAFANA_SERVICE_NAMESPACE" + EnvGrafanaService = "GRAFANA_SERVICE" + EnvGrafanaWorkloadDashboardPattern = "GRAFANA_WORKLOAD_DASHBOARD_PATTERN" + EnvGrafanaServiceDashboardPattern = "GRAFANA_SERVICE_DASHBOARD_PATTERN" + EnvGrafanaVarNamespace = "GRAFANA_VAR_NAMESPACE" + EnvGrafanaVarService = "GRAFANA_VAR_SERVICE" + EnvGrafanaVarWorkload = "GRAFANA_VAR_WORKLOAD" + EnvGrafanaAPIKey = "GRAFANA_API_KEY" + EnvGrafanaUsername = "GRAFANA_USERNAME" + EnvGrafanaPassword = "GRAFANA_PASSWORD" + + EnvJaegerURL = "JAEGER_URL" + EnvJaegerService = "JAEGER_SERVICE" + + EnvLoginTokenSigningKey = "LOGIN_TOKEN_SIGNING_KEY" + EnvLoginTokenExpirationSeconds = "LOGIN_TOKEN_EXPIRATION_SECONDS" + EnvIstioNamespace = "ISTIO_NAMESPACE" + + EnvIstioLabelNameApp = "ISTIO_LABEL_NAME_APP" + EnvIstioLabelNameVersion = "ISTIO_LABEL_NAME_VERSION" + + EnvKubernetesBurst = "KUBERNETES_BURST" + EnvKubernetesQPS = "KUBERNETES_QPS" + EnvKubernetesCacheEnabled = "KUBERNETES_CACHE_ENABLED" + EnvKubernetesCacheDuration = "KUBERNETES_CACHE_DURATION" +) + +// The versions that Kiali requires +const ( + IstioVersionSupported = ">= 1.0" + MaistraVersionSupported = ">= 0.1.0" +) + +// Global configuration for the application. +var configuration *Config + +// Server configuration +type Server struct { + Address string `yaml:",omitempty"` + Port int `yaml:",omitempty"` + Credentials security.Credentials `yaml:",omitempty"` + WebRoot string `yaml:"web_root,omitempty"` + StaticContentRootDirectory string `yaml:"static_content_root_directory,omitempty"` + CORSAllowAll bool `yaml:"cors_allow_all,omitempty"` + AuditLog bool `yaml:"audit_log,omitempty"` +} + +// GrafanaConfig describes configuration used for Grafana links +type GrafanaConfig struct { + DisplayLink bool `yaml:"display_link"` + URL string `yaml:"url"` + ServiceNamespace string `yaml:"service_namespace"` + Service string `yaml:"service"` + WorkloadDashboardPattern string `yaml:"workload_dashboard_pattern"` + ServiceDashboardPattern string `yaml:"service_dashboard_pattern"` + VarNamespace string `yaml:"var_namespace"` + VarService string `yaml:"var_service"` + VarWorkload string `yaml:"var_workload"` + APIKey string `yaml:"api_key"` + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +// JaegerConfig describes configuration used for jaeger links +type JaegerConfig struct { + URL string `yaml:"url"` + Service string `yaml:"service"` +} + +// IstioConfig describes configuration used for istio links +type IstioConfig struct { + UrlServiceVersion string `yaml:"url_service_version"` + IstioIdentityDomain string `yaml:"istio_identity_domain,omitempty"` + IstioSidecarAnnotation string `yaml:"istio_sidecar_annotation,omitempty"` +} + +// ExternalServices holds configurations for other systems that Kiali depends on +type ExternalServices struct { + Istio IstioConfig `yaml:"istio,omitempty"` + PrometheusServiceURL string `yaml:"prometheus_service_url,omitempty"` + PrometheusCustomMetricsURL string `yaml:"prometheus_custom_metrics_url,omitempty"` + Grafana GrafanaConfig `yaml:"grafana,omitempty"` + Jaeger JaegerConfig `yaml:"jaeger,omitempty"` +} + +// LoginToken holds config used in token-based authentication +type LoginToken struct { + SigningKey []byte `yaml:"signing_key,omitempty"` + ExpirationSeconds int64 `yaml:"expiration_seconds,omitempty"` +} + +// IstioLabels holds configuration about the labels required by Istio +type IstioLabels struct { + AppLabelName string `yaml:"app_label_name,omitempty" json:"appLabelName"` + VersionLabelName string `yaml:"version_label_name,omitempty" json:"versionLabelName"` +} + +// Kubernetes client configuration +type KubernetesConfig struct { + Burst int `yaml:"burst,omitempty"` + QPS float32 `yaml:"qps,omitempty"` + CacheEnabled bool `yaml:"cache_enabled,omitempty"` + CacheDuration int64 `yaml:"cache_duration,omitempty"` +} + +// Exclude Blacklist holds regex strings defining a blacklist +type ApiConfig struct { + Namespaces ApiNamespacesConfig +} + +// Exclude Blacklist holds regex strings defining a blacklist +type ApiNamespacesConfig struct { + Exclude []string +} + +// Config defines full YAML configuration. +type Config struct { + Identity security.Identity `yaml:",omitempty"` + Server Server `yaml:",omitempty"` + InCluster bool `yaml:"in_cluster,omitempty"` + ExternalServices ExternalServices `yaml:"external_services,omitempty"` + LoginToken LoginToken `yaml:"login_token,omitempty"` + IstioNamespace string `yaml:"istio_namespace,omitempty"` + IstioLabels IstioLabels `yaml:"istio_labels,omitempty"` + KubernetesConfig KubernetesConfig `yaml:"kubernetes_config,omitempty"` + API ApiConfig `yaml:"api,omitempty"` +} + +// NewConfig creates a default Config struct +func NewConfig() (c *Config) { + c = new(Config) + + c.Identity.CertFile = getDefaultString(EnvIdentityCertFile, "") + c.Identity.PrivateKeyFile = getDefaultString(EnvIdentityPrivateKeyFile, "") + c.InCluster = getDefaultBool(EnvInCluster, true) + c.IstioNamespace = strings.TrimSpace(getDefaultString(EnvIstioNamespace, "istio-system")) + c.IstioLabels.AppLabelName = strings.TrimSpace(getDefaultString(EnvIstioLabelNameApp, "app")) + c.IstioLabels.VersionLabelName = strings.TrimSpace(getDefaultString(EnvIstioLabelNameVersion, "version")) + c.API.Namespaces.Exclude = getDefaultStringArray(EnvApiNamespacesExclude, "istio-operator,kube.*,openshift.*,ibm.*") + + // Server configuration + c.Server.Address = strings.TrimSpace(getDefaultString(EnvServerAddress, "")) + c.Server.Port = getDefaultInt(EnvServerPort, 20000) + c.Server.Credentials = security.Credentials{ + Username: getDefaultString(EnvServerCredentialsUsername, ""), + Password: getDefaultString(EnvServerCredentialsPassword, ""), + AllowAnonymous: getDefaultBool(EnvServerAllowAnonymousAccess, false), + } + + c.Server.WebRoot = strings.TrimSpace(getDefaultString(EnvWebRoot, "/")) + c.Server.StaticContentRootDirectory = strings.TrimSpace(getDefaultString(EnvServerStaticContentRootDirectory, "/opt/kiali/console")) + c.Server.CORSAllowAll = getDefaultBool(EnvServerCORSAllowAll, false) + c.Server.AuditLog = getDefaultBool(EnvServerAuditLog, true) + + // Prometheus configuration + c.ExternalServices.PrometheusServiceURL = strings.TrimSpace(getDefaultString(EnvPrometheusServiceURL, "http://prometheus.istio-system:9090")) + c.ExternalServices.PrometheusCustomMetricsURL = strings.TrimSpace(getDefaultString(EnvPrometheusCustomMetricsURL, c.ExternalServices.PrometheusServiceURL)) + + // Grafana Configuration + c.ExternalServices.Grafana.DisplayLink = getDefaultBool(EnvGrafanaDisplayLink, true) + c.ExternalServices.Grafana.URL = strings.TrimSpace(getDefaultString(EnvGrafanaURL, "")) + c.ExternalServices.Grafana.ServiceNamespace = strings.TrimSpace(getDefaultString(EnvGrafanaServiceNamespace, "istio-system")) + c.ExternalServices.Grafana.Service = strings.TrimSpace(getDefaultString(EnvGrafanaService, "grafana")) + c.ExternalServices.Grafana.WorkloadDashboardPattern = strings.TrimSpace(getDefaultString(EnvGrafanaWorkloadDashboardPattern, "Istio%20Workload%20Dashboard")) + c.ExternalServices.Grafana.ServiceDashboardPattern = strings.TrimSpace(getDefaultString(EnvGrafanaServiceDashboardPattern, "Istio%20Service%20Dashboard")) + c.ExternalServices.Grafana.VarNamespace = strings.TrimSpace(getDefaultString(EnvGrafanaVarNamespace, "var-namespace")) + c.ExternalServices.Grafana.VarService = strings.TrimSpace(getDefaultString(EnvGrafanaVarService, "var-service")) + c.ExternalServices.Grafana.VarWorkload = strings.TrimSpace(getDefaultString(EnvGrafanaVarWorkload, "var-workload")) + c.ExternalServices.Grafana.APIKey = strings.TrimSpace(getDefaultString(EnvGrafanaAPIKey, "")) + c.ExternalServices.Grafana.Username = strings.TrimSpace(getDefaultString(EnvGrafanaUsername, "")) + c.ExternalServices.Grafana.Password = strings.TrimSpace(getDefaultString(EnvGrafanaPassword, "")) + if c.ExternalServices.Grafana.Username != "" && c.ExternalServices.Grafana.Password == "" { + log.Error("Grafana username (\"GRAFANA_USERNAME\") requires that Grafana password (\"GRAFANA_PASSWORD\") is set.") + } + + // Jaeger Configuration + c.ExternalServices.Jaeger.URL = strings.TrimSpace(getDefaultString(EnvJaegerURL, "")) + c.ExternalServices.Jaeger.Service = strings.TrimSpace(getDefaultString(EnvJaegerService, "jaeger-query")) + + // Istio Configuration + c.ExternalServices.Istio.IstioIdentityDomain = strings.TrimSpace(getDefaultString(EnvIstioIdentityDomain, "svc.cluster.local")) + c.ExternalServices.Istio.IstioSidecarAnnotation = strings.TrimSpace(getDefaultString(EnvIstioSidecarAnnotation, "sidecar.istio.io/status")) + c.ExternalServices.Istio.UrlServiceVersion = strings.TrimSpace(getDefaultString(EnvIstioUrlServiceVersion, "http://istio-pilot:8080/version")) + + // Token-based authentication Configuration + c.LoginToken.SigningKey = []byte(strings.TrimSpace(getDefaultString(EnvLoginTokenSigningKey, "kiali"))) + c.LoginToken.ExpirationSeconds = getDefaultInt64(EnvLoginTokenExpirationSeconds, 24*3600) + + // Kubernetes client Configuration + c.KubernetesConfig.Burst = getDefaultInt(EnvKubernetesBurst, 200) + c.KubernetesConfig.QPS = getDefaultFloat32(EnvKubernetesQPS, 175) + c.KubernetesConfig.CacheEnabled = getDefaultBool(EnvKubernetesCacheEnabled, false) + c.KubernetesConfig.CacheDuration = getDefaultInt64(EnvKubernetesCacheDuration, time.Duration(5*time.Minute).Nanoseconds()) + + trimmedExclusionPatterns := []string{} + for _, entry := range c.API.Namespaces.Exclude { + exclusionPattern := strings.TrimSpace(entry) + if _, err := regexp.Compile(exclusionPattern); err != nil { + log.Errorf("Invalid namespace exclude entry, [%s] is not a valid regex pattern: %v", exclusionPattern, err) + } else { + trimmedExclusionPatterns = append(trimmedExclusionPatterns, strings.TrimSpace(exclusionPattern)) + } + } + c.API.Namespaces.Exclude = trimmedExclusionPatterns + + return +} + +// Get the global Config +func Get() (conf *Config) { + return configuration +} + +// Set the global Config +func Set(conf *Config) { + configuration = conf +} + +func getDefaultString(envvar string, defaultValue string) (retVal string) { + retVal = os.Getenv(envvar) + if retVal == "" { + retVal = defaultValue + } + return +} + +func getDefaultStringArray(envvar string, defaultValue string) (retVal []string) { + csv := os.Getenv(envvar) + if csv == "" { + csv = defaultValue + } + retVal = strings.Split(csv, ",") + return +} + +func getDefaultInt(envvar string, defaultValue int) (retVal int) { + retValString := os.Getenv(envvar) + if retValString == "" { + retVal = defaultValue + } else { + if num, err := strconv.Atoi(retValString); err != nil { + log.Warningf("Invalid number for envvar [%v]. err=%v", envvar, err) + retVal = defaultValue + } else { + retVal = num + } + } + return +} + +func getDefaultInt64(envvar string, defaultValue int64) (retVal int64) { + retValString := os.Getenv(envvar) + if retValString == "" { + retVal = defaultValue + } else { + if num, err := strconv.ParseInt(retValString, 10, 64); err != nil { + log.Warningf("Invalid number for envvar [%v]. err=%v", envvar, err) + retVal = defaultValue + } else { + retVal = num + } + } + return +} + +func getDefaultBool(envvar string, defaultValue bool) (retVal bool) { + retValString := os.Getenv(envvar) + if retValString == "" { + retVal = defaultValue + } else { + if b, err := strconv.ParseBool(retValString); err != nil { + log.Warningf("Invalid boolean for envvar [%v]. err=%v", envvar, err) + retVal = defaultValue + } else { + retVal = b + } + } + return +} + +func getDefaultFloat32(envvar string, defaultValue float32) (retVal float32) { + retValString := os.Getenv(envvar) + if retValString == "" { + retVal = defaultValue + } else { + if f, err := strconv.ParseFloat(retValString, 32); err != nil { + log.Warningf("Invalid float number for envvar [%v]. err=%v", envvar, err) + retVal = defaultValue + } else { + retVal = float32(f) + } + } + return +} + +// String marshals the given Config into a YAML string +func (conf Config) String() (str string) { + str, err := Marshal(&conf) + if err != nil { + str = fmt.Sprintf("Failed to marshal config to string. err=%v", err) + log.Debugf(str) + } + + return +} + +// Unmarshal parses the given YAML string and returns its Config object representation. +func Unmarshal(yamlString string) (conf *Config, err error) { + conf = NewConfig() + err = yaml.Unmarshal([]byte(yamlString), &conf) + if err != nil { + return nil, fmt.Errorf("Failed to parse yaml data. error=%v", err) + } + return +} + +// Marshal converts the Config object and returns its YAML string. +func Marshal(conf *Config) (yamlString string, err error) { + yamlBytes, err := yaml.Marshal(&conf) + if err != nil { + return "", fmt.Errorf("Failed to produce yaml. error=%v", err) + } + + yamlString = string(yamlBytes) + return +} + +// LoadFromFile reads the YAML from the given file, parses the content, and returns its Config object representation. +func LoadFromFile(filename string) (conf *Config, err error) { + log.Debugf("Reading YAML config from [%s]", filename) + fileContent, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("Failed to load config file [%v]. error=%v", filename, err) + } + + return Unmarshal(string(fileContent)) +} + +// SaveToFile converts the Config object and stores its YAML string into the given file, overwriting any data that is in the file. +func SaveToFile(filename string, conf *Config) (err error) { + fileContent, err := Marshal(conf) + if err != nil { + return fmt.Errorf("Failed to save config file [%v]. error=%v", filename, err) + } + + log.Debugf("Writing YAML config to [%s]", filename) + err = ioutil.WriteFile(filename, []byte(fileContent), 0640) + return +} diff --git a/vendor/github.com/kiali/kiali/config/security/config_security.go b/vendor/github.com/kiali/kiali/config/security/config_security.go new file mode 100644 index 000000000..424b131b3 --- /dev/null +++ b/vendor/github.com/kiali/kiali/config/security/config_security.go @@ -0,0 +1,80 @@ +package security + +import ( + "encoding/base64" + "fmt" +) + +// Identity security details about a client. +type Identity struct { + CertFile string `yaml:"cert_file"` + PrivateKeyFile string `yaml:"private_key_file"` +} + +// Credentials provides information when needing to authenticate to remote endpoints. +// Credentials are either a username/password or a bearer token, but not both. +type Credentials struct { + Username string `yaml:",omitempty"` + Password string `yaml:",omitempty"` + Token string `yaml:",omitempty"` + AllowAnonymous bool `yaml:"allow_anonymous,omitempty"` +} + +// TLS options - SkipCertificateValidation will disable server certificate verification - the client +// will accept any certificate presented by the server and any host name in that certificate. +type TLS struct { + SkipCertificateValidation bool `yaml:"skip_certificate_validation,omitempty"` +} + +// ValidateCredentials makes sure that if username is provided, so is password (and vice versa) +// and also makes sure if username/password is provided that token is not (and vice versa). +// It is valid to have nothing defined (no username, password, nor token), but if nothing is +// defined and the "AllowAnonymous" flag is false, this usually means the person who +// installed Kiali most likely forgot to set credentials - therefore access should always be denied. +// If nothing is defined and the "AllowAnonymous" flag is true, this means anonymous access is specifically allowed. +// If the "AllowAnonymous" flag is true but non-empty credentials are defined, an error results. +func (c *Credentials) ValidateCredentials() error { + if c.Username != "" && c.Password == "" { + return fmt.Errorf("A password must be provided if a username is set") + } + + if c.Username == "" && c.Password != "" { + return fmt.Errorf("A username must be provided if a password is set") + } + + if c.Username != "" && c.Token != "" { + return fmt.Errorf("Username/password cannot be specified if a token is specified also. Only Username/Password or Token can be set but not both") + } + + if c.AllowAnonymous && (c.Username != "" || c.Token != "") { + return fmt.Errorf("The 'AllowAnonymous' flag is true but non-empty credentials exist") + } + + return nil +} + +// GetHTTPAuthHeader provides the authentication ehader name and value (can be empty), or an error +func (c *Credentials) GetHTTPAuthHeader() (headerName string, headerValue string, err error) { + // if no credentials are provided, this is fine, we are just going to do an insecure request + if c == nil { + return "", "", nil + } + + if err := c.ValidateCredentials(); err != nil { + return "", "", err + } + + if c.Token != "" { + headerName = "Authorization" + headerValue = fmt.Sprintf("Bearer %s", c.Token) + } else if c.Username != "" { + creds := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", c.Username, c.Password))) + headerName = "Authorization" + headerValue = fmt.Sprintf("Basic %s", creds) + } else { + headerName = "" + headerValue = "" + } + + return headerName, headerValue, nil +} diff --git a/vendor/github.com/kiali/kiali/config/token.go b/vendor/github.com/kiali/kiali/config/token.go new file mode 100644 index 000000000..4f587a826 --- /dev/null +++ b/vendor/github.com/kiali/kiali/config/token.go @@ -0,0 +1,77 @@ +package config + +import ( + "errors" + "fmt" + "time" + + "github.com/dgrijalva/jwt-go" +) + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type TokenClaim struct { + User string `json:"username"` + jwt.StandardClaims +} + +// TokenGenerated tokenGenerated +// +// This is used for returning the token +// +// swagger:model TokenGenerated +type TokenGenerated struct { + // The authentication token + // A string with the authentication token for the user + // + // example: zI1NiIsIsR5cCI6IkpXVCJ9.ezJ1c2VybmFtZSI6ImFkbWluIiwiZXhwIjoxNTI5NTIzNjU0fQ.PPZvRGnR6VA4v7FmgSfQcGQr-VD + // required: true + Token string `json:"token"` + // The expired time for the token + // A string with the Datetime when the token will be expired + // + // example: 2018-06-20 19:40:54.116369887 +0000 UTC m=+43224.838320603 + // required: true + ExpiredAt string `json:"expired_at"` +} + +// GenerateToken generates a signed token with an expiration of seconds +func GenerateToken(username string) (TokenGenerated, error) { + timeExpire := time.Now().Add(time.Second * time.Duration(Get().LoginToken.ExpirationSeconds)) + claim := TokenClaim{ + username, + jwt.StandardClaims{ + ExpiresAt: timeExpire.Unix(), + }, + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claim) + ss, err := token.SignedString(Get().LoginToken.SigningKey) + if err != nil { + return TokenGenerated{}, err + } + + return TokenGenerated{Token: ss, ExpiredAt: timeExpire.String()}, nil +} + +// ValidateToken checks if the input token is still valid +func ValidateToken(tokenString string) (string, error) { + token, err := jwt.ParseWithClaims(tokenString, &TokenClaim{}, func(token *jwt.Token) (interface{}, error) { + return Get().LoginToken.SigningKey, nil + }) + if err != nil { + return "", err + } + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return "", fmt.Errorf("Unexpected signing method: %s", token.Header["alg"]) + } + if token.Valid { + user := "" + if sToken, ok := token.Claims.(*TokenClaim); ok { + user = sToken.User + } + return user, nil + } + return "", errors.New("Invalid token") +} diff --git a/vendor/github.com/kiali/kiali/graph/appender/appender.go b/vendor/github.com/kiali/kiali/graph/appender/appender.go new file mode 100644 index 000000000..6281ffeb4 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/appender/appender.go @@ -0,0 +1,80 @@ +package appender + +import ( + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus" +) + +// GlobalInfo caches information relevant to a single graph. It allows +// an appender to populate the cache and then it, or another appender +// can re-use the information. A new instance is generated for graph and +// is initially empty. +type GlobalInfo struct { + Business *business.Layer + PromClient *prometheus.Client + ServiceEntries map[string]string +} + +func NewGlobalInfo() *GlobalInfo { + return &GlobalInfo{} +} + +// NamespaceInfo caches information relevant to a single namespace. It allows +// one appender to populate the cache and another to then re-use the information. +// A new instance is generated for each namespace of a single graph and is initially +// seeded with only Namespace. +type NamespaceInfo struct { + Namespace string // always provided + WorkloadList *models.WorkloadList +} + +func NewNamespaceInfo(namespace string) *NamespaceInfo { + return &NamespaceInfo{Namespace: namespace} +} + +func getWorkload(workloadName string, workloadList *models.WorkloadList) (*models.WorkloadListItem, bool) { + if workloadName == "" || workloadName == graph.Unknown { + return nil, false + } + + for _, workload := range workloadList.Workloads { + if workload.Name == workloadName { + return &workload, true + } + } + return nil, false +} + +func getAppWorkloads(app, version string, workloadList *models.WorkloadList) []models.WorkloadListItem { + cfg := config.Get() + appLabel := cfg.IstioLabels.AppLabelName + versionLabel := cfg.IstioLabels.VersionLabelName + + result := []models.WorkloadListItem{} + versionOk := graph.IsOK(version) + for _, workload := range workloadList.Workloads { + if appVal, ok := workload.Labels[appLabel]; ok && app == appVal { + if !versionOk { + result = append(result, workload) + } else if versionVal, ok := workload.Labels[versionLabel]; ok && version == versionVal { + result = append(result, workload) + } + } + } + return result +} + +// Appender is implemented by any code offering to append a service graph with +// supplemental information. On error the appender should panic and it will be +// handled as an error response. +type Appender interface { + // AppendGraph performs the appender work on the provided traffic map. The map + // may be initially empty. An appender is allowed to add or remove map entries. + AppendGraph(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) + + // Name returns a unique appender name and which is the name used to identify the appender (e.g in 'appenders' query param) + Name() string +} diff --git a/vendor/github.com/kiali/kiali/graph/appender/dead_node.go b/vendor/github.com/kiali/kiali/graph/appender/dead_node.go new file mode 100644 index 000000000..b39386eb3 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/appender/dead_node.go @@ -0,0 +1,129 @@ +package appender + +import ( + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/graph" +) + +const DeadNodeAppenderName = "deadNode" + +// DeadNodeAppender is responsible for removing from the graph unwanted nodes: +// - nodes for which there is no traffic reported and a backing workload that can't be found +// (presumably removed from K8S). (kiali-621) +// - this includes "unknown" +// - service nodes that are not service entries (kiali-1526) and for which there is no incoming +// error traffic and no outgoing edges (kiali-1326). +// Name: deadNode +type DeadNodeAppender struct{} + +// Name implements Appender +func (a DeadNodeAppender) Name() string { + return DeadNodeAppenderName +} + +// AppendGraph implements Appender +func (a DeadNodeAppender) AppendGraph(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + if len(trafficMap) == 0 { + return + } + + var err error + if globalInfo.Business == nil { + globalInfo.Business, err = business.Get() + graph.CheckError(err) + } + if namespaceInfo.WorkloadList == nil { + workloadList, err := globalInfo.Business.Workload.GetWorkloadList(namespaceInfo.Namespace) + graph.CheckError(err) + namespaceInfo.WorkloadList = &workloadList + } + + a.applyDeadNodes(trafficMap, globalInfo, namespaceInfo) +} + +func (a DeadNodeAppender) applyDeadNodes(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + numRemoved := 0 + for id, n := range trafficMap { + switch n.NodeType { + case graph.NodeTypeService: + // a service node with outgoing edges is never considered dead (or egress) + if len(n.Edges) > 0 { + continue + } + + // A service node that is a service entry is never considered dead + if _, ok := n.Metadata["isServiceEntry"]; ok { + continue + } + + // a service node with no incoming error traffic and no outgoing edges, is dead. + // Incoming non-error traffic can not raise the dead because it is caused by an + // edge case (pod life-cycle change) that we don't want to see. + isDead := true + ServiceCase: + for _, p := range graph.Protocols { + for _, r := range p.NodeRates { + if r.IsErr { + if errRate, hasErrRate := n.Metadata[r.Name]; hasErrRate && errRate.(float64) > 0 { + isDead = false + break ServiceCase + } + } + } + } + if isDead { + delete(trafficMap, id) + numRemoved++ + } + default: + // a node with traffic is not dead, skip + isDead := true + DefaultCase: + for _, p := range graph.Protocols { + for _, r := range p.NodeRates { + if r.IsIn || r.IsOut { + if rate, hasRate := n.Metadata[r.Name]; hasRate && rate.(float64) > 0 { + isDead = false + break DefaultCase + } + } + } + } + if !isDead { + continue + } + + // There are some node types that are never associated with backing workloads (such as versionless app nodes). + // Nodes of those types are never dead because their workload clearly can't be missing (they don't have workloads). + // - note: unknown is not saved by this rule (kiali-2078) - i.e. unknown nodes can be declared dead + if n.NodeType != graph.NodeTypeUnknown && !graph.IsOK(n.Workload) { + continue + } + + // Remove if backing workload is not defined (always true for "unknown"), flag if there are no pods + if workload, found := getWorkload(n.Workload, namespaceInfo.WorkloadList); !found { + delete(trafficMap, id) + numRemoved++ + } else { + if workload.PodCount == 0 { + n.Metadata["isDead"] = true + } + } + } + } + + // If we removed any nodes we need to remove any edges to them as well... + if numRemoved == 0 { + return + } + + for _, s := range trafficMap { + goodEdges := []*graph.Edge{} + for _, e := range s.Edges { + if _, found := trafficMap[e.Dest.ID]; found { + goodEdges = append(goodEdges, e) + } + } + s.Edges = goodEdges + } +} diff --git a/vendor/github.com/kiali/kiali/graph/appender/istio_details.go b/vendor/github.com/kiali/kiali/graph/appender/istio_details.go new file mode 100644 index 000000000..7b9c86b54 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/appender/istio_details.go @@ -0,0 +1,142 @@ +package appender + +import ( + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" +) + +const IstioAppenderName = "istio" + +// IstioAppender is responsible for badging nodes with special Istio significance: +// - CircuitBreaker: n.Metadata["hasCB"] = true +// - VirtualService: n.Metadata["hasVS"] = true +// Name: istio +type IstioAppender struct{} + +// Name implements Appender +func (a IstioAppender) Name() string { + return IstioAppenderName +} + +// AppendGraph implements Appender +func (a IstioAppender) AppendGraph(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + if len(trafficMap) == 0 { + return + } + + if globalInfo.Business == nil { + var err error + globalInfo.Business, err = business.Get() + graph.CheckError(err) + } + + addBadging(trafficMap, globalInfo, namespaceInfo) + addLabels(trafficMap, globalInfo) +} + +func addBadging(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + // Currently no other appenders use DestinationRules or VirtualServices, so they are not cached in NamespaceInfo + istioCfg, err := globalInfo.Business.IstioConfig.GetIstioConfigList(business.IstioConfigCriteria{ + IncludeDestinationRules: true, + IncludeVirtualServices: true, + Namespace: namespaceInfo.Namespace, + }) + graph.CheckError(err) + + applyCircuitBreakers(trafficMap, namespaceInfo.Namespace, istioCfg) + applyVirtualServices(trafficMap, namespaceInfo.Namespace, istioCfg) +} + +func applyCircuitBreakers(trafficMap graph.TrafficMap, namespace string, istioCfg models.IstioConfigList) { +NODES: + for _, n := range trafficMap { + // Skip the check if this node is outside the requested namespace, we limit badging to the requested namespaces + if n.Namespace != namespace { + continue + } + + // Note, Because DestinationRules are applied to services we limit CB badges to service nodes and app nodes. + // Whether we should add to workload nodes is debatable, we could add it later if needed. + versionOk := graph.IsOK(n.Version) + switch { + case n.NodeType == graph.NodeTypeService: + for _, destinationRule := range istioCfg.DestinationRules.Items { + if destinationRule.HasCircuitBreaker(namespace, n.Service, "") { + n.Metadata["hasCB"] = true + continue NODES + } + } + case !versionOk && (n.NodeType == graph.NodeTypeApp): + if destServices, ok := n.Metadata["destServices"]; ok { + for serviceName, _ := range destServices.(map[string]bool) { + for _, destinationRule := range istioCfg.DestinationRules.Items { + if destinationRule.HasCircuitBreaker(namespace, serviceName, "") { + n.Metadata["hasCB"] = true + continue NODES + } + } + } + } + case versionOk: + if destServices, ok := n.Metadata["destServices"]; ok { + for serviceName, _ := range destServices.(map[string]bool) { + for _, destinationRule := range istioCfg.DestinationRules.Items { + if destinationRule.HasCircuitBreaker(namespace, serviceName, n.Version) { + n.Metadata["hasCB"] = true + continue NODES + } + } + } + } + default: + continue + } + } +} + +func applyVirtualServices(trafficMap graph.TrafficMap, namespace string, istioCfg models.IstioConfigList) { +NODES: + for _, n := range trafficMap { + if n.NodeType != graph.NodeTypeService { + continue + } + if n.Namespace != namespace { + continue + } + for _, virtualService := range istioCfg.VirtualServices.Items { + if virtualService.IsValidHost(namespace, n.Service) { + n.Metadata["hasVS"] = true + continue NODES + } + } + } +} + +// addLabels is a chance to add any missing label info to nodes when the telemetry does not provide enough information. +// TODO: For efficiency we may want to consider pulling all namespace service definitions in one call (the call does not +// exist at this writing). As written we pull each service individually, which can be a fair number of round +// trips when services are injected (as they are by default). Note also that currently we do query for +// outsider service nodes. That may be a security problem f the outside namespace is inaccessible to the user. If +// that becomes an issue we can limit to accessible namespaces or only to the NamespaceInfo namespace. +func addLabels(trafficMap graph.TrafficMap, globalInfo *GlobalInfo) { + appLabelName := config.Get().IstioLabels.AppLabelName + for _, n := range trafficMap { + // make sure service nodes have the defined app label so it can be used for app grouping in the UI. + if n.NodeType == graph.NodeTypeService && n.App == "" { + service, err := globalInfo.Business.Svc.GetServiceDefinition(n.Namespace, n.Service) + if err != nil { + log.Debugf("Error fetching service definition, may not apply app label correctly for namespace=%s svc=%s: %s", n.Namespace, n.Service, err.Error()) + if service == nil { + continue + } + } + + if app, ok := service.Service.Labels[appLabelName]; ok { + n.App = app + } + } + } +} diff --git a/vendor/github.com/kiali/kiali/graph/appender/response_time.go b/vendor/github.com/kiali/kiali/graph/appender/response_time.go new file mode 100644 index 000000000..f6180f9f9 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/appender/response_time.go @@ -0,0 +1,206 @@ +package appender + +import ( + "fmt" + "math" + "time" + + "github.com/prometheus/common/model" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/prometheus" +) + +const ( + DefaultQuantile = 0.95 // 95th percentile + ResponseTimeAppenderName = "responseTime" +) + +// ResponseTimeAppender is responsible for adding responseTime information to the graph. ResponseTime +// is represented as a percentile value. The default is 95th percentile, which means that +// 95% of requests executed in no more than the resulting milliseconds. +// Name: responseTime +type ResponseTimeAppender struct { + GraphType string + InjectServiceNodes bool + IncludeIstio bool + Namespaces map[string]graph.NamespaceInfo + Quantile float64 + QueryTime int64 // unix time in seconds +} + +// Name implements Appender +func (a ResponseTimeAppender) Name() string { + return ResponseTimeAppenderName +} + +// AppendGraph implements Appender +func (a ResponseTimeAppender) AppendGraph(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + if len(trafficMap) == 0 { + return + } + + if globalInfo.PromClient == nil { + var err error + globalInfo.PromClient, err = prometheus.NewClient() + graph.CheckError(err) + } + + a.appendGraph(trafficMap, namespaceInfo.Namespace, globalInfo.PromClient) +} + +func (a ResponseTimeAppender) appendGraph(trafficMap graph.TrafficMap, namespace string, client *prometheus.Client) { + quantile := a.Quantile + if a.Quantile <= 0.0 || a.Quantile >= 100.0 { + log.Warningf("Replacing invalid quantile [%.2f] with default [%.2f]", a.Quantile, DefaultQuantile) + quantile = DefaultQuantile + } + log.Debugf("Generating responseTime using quantile [%.2f]; namespace = %v", quantile, namespace) + duration := a.Namespaces[namespace].Duration + + // create map to quickly look up responseTime + responseTimeMap := make(map[string]float64) + + // query prometheus for the responseTime info in three queries: + // 1) query for responseTime originating from "unknown" (i.e. the internet) + groupBy := "le,source_workload_namespace,source_workload,source_app,source_version,destination_service_namespace,destination_service_name,destination_workload,destination_app,destination_version" + query := fmt.Sprintf(`histogram_quantile(%.2f, sum(rate(%s{reporter="destination",source_workload="unknown",destination_service_namespace="%v",response_code=~"%s"}[%vs])) by (%s))`, + quantile, + "istio_request_duration_seconds_bucket", + namespace, + "2[0-9]{2}|^0$", // must match success for all expected protocols + int(duration.Seconds()), // range duration for the query + groupBy) + unkVector := promQuery(query, time.Unix(a.QueryTime, 0), client.API(), a) + a.populateResponseTimeMap(responseTimeMap, &unkVector) + + // 2) query for responseTime originating from a workload outside of the namespace. Exclude any "unknown" source telemetry (an unusual corner case) + query = fmt.Sprintf(`histogram_quantile(%.2f, sum(rate(%s{reporter="source",source_workload_namespace!="%v",source_workload!="unknown",destination_service_namespace="%v",response_code=~"%s"}[%vs])) by (%s))`, + quantile, + "istio_request_duration_seconds_bucket", + namespace, + namespace, + "2[0-9]{2}|^0$", // must match success for all expected protocols + int(duration.Seconds()), // range duration for the query + groupBy) + outVector := promQuery(query, time.Unix(a.QueryTime, 0), client.API(), a) + a.populateResponseTimeMap(responseTimeMap, &outVector) + + // 3) query for responseTime originating from a workload inside of the namespace + query = fmt.Sprintf(`histogram_quantile(%.2f, sum(rate(%s{reporter="source",source_workload_namespace="%v",response_code=~"%s"}[%vs])) by (%s))`, + quantile, + "istio_request_duration_seconds_bucket", + namespace, + "2[0-9]{2}|^0$", // must match success for all expected protocols + int(duration.Seconds()), // range duration for the query + groupBy) + inVector := promQuery(query, time.Unix(a.QueryTime, 0), client.API(), a) + a.populateResponseTimeMap(responseTimeMap, &inVector) + + // istio component telemetry is only reported destination-side, so we must perform additional queries + if a.IncludeIstio { + istioNamespace := config.Get().IstioNamespace + + // 4) if the target namespace is istioNamespace re-query for traffic originating from outside (other than unknown, covered in query #1) + if namespace == istioNamespace { + query = fmt.Sprintf(`histogram_quantile(%.2f, sum(rate(%s{reporter="destination",source_workload!="unknown",source_workload_namespace!="%v",destination_service_namespace="%v",response_code=~"%s"}[%vs])) by (%s))`, + quantile, + "istio_request_duration_seconds_bucket", + namespace, + namespace, + "2[0-9]{2}|^0$", // must match success for all expected protocols + int(duration.Seconds()), // range duration for the query + groupBy) + + // fetch the externally originating request traffic time-series + outIstioVector := promQuery(query, time.Unix(a.QueryTime, 0), client.API(), a) + a.populateResponseTimeMap(responseTimeMap, &outIstioVector) + } + + // 5) supplemental query for traffic originating from a workload inside of the namespace with istioSystem destination + query = fmt.Sprintf(`histogram_quantile(%.2f, sum(rate(%s{reporter="destination",source_workload_namespace="%v",destination_service_namespace="%v",response_code=~"%s"}[%vs])) by (%s))`, + quantile, + "istio_request_duration_seconds_bucket", + namespace, + istioNamespace, + "2[0-9]{2}|^0$", // must match success for all expected protocols + int(duration.Seconds()), // range duration for the query + groupBy) + + // fetch the internally originating request traffic time-series + inIstioVector := promQuery(query, time.Unix(a.QueryTime, 0), client.API(), a) + a.populateResponseTimeMap(responseTimeMap, &inIstioVector) + } + + applyResponseTime(trafficMap, responseTimeMap) +} + +func applyResponseTime(trafficMap graph.TrafficMap, responseTimeMap map[string]float64) { + for _, n := range trafficMap { + for _, e := range n.Edges { + key := fmt.Sprintf("%s %s", e.Source.ID, e.Dest.ID) + e.Metadata["responseTime"] = responseTimeMap[key] + } + } +} + +func (a ResponseTimeAppender) populateResponseTimeMap(responseTimeMap map[string]float64, vector *model.Vector) { + for _, s := range *vector { + m := s.Metric + lSourceWlNs, sourceWlNsOk := m["source_workload_namespace"] + lSourceWl, sourceWlOk := m["source_workload"] + lSourceApp, sourceAppOk := m["source_app"] + lSourceVer, sourceVerOk := m["source_version"] + lDestSvcNs, destSvcNsOk := m["destination_service_namespace"] + lDestSvcName, destSvcNameOk := m["destination_service_name"] + lDestWl, destWlOk := m["destination_workload"] + lDestApp, destAppOk := m["destination_app"] + lDestVer, destVerOk := m["destination_version"] + if !sourceWlNsOk || !sourceWlOk || !sourceAppOk || !sourceVerOk || !destSvcNsOk || !destSvcNameOk || !destWlOk || !destAppOk || !destVerOk { + log.Warningf("Skipping %v, missing expected labels", m.String()) + continue + } + + sourceWlNs := string(lSourceWlNs) + sourceWl := string(lSourceWl) + sourceApp := string(lSourceApp) + sourceVer := string(lSourceVer) + destSvcNs := string(lDestSvcNs) + destSvcName := string(lDestSvcName) + destWl := string(lDestWl) + destApp := string(lDestApp) + destVer := string(lDestVer) + + // to best preserve precision convert from secs to millis now, otherwise the + // thousandths place is dropped downstream. + val := float64(s.Value) * 1000.0 + + // It is possible to get a NaN if there is no traffic (or possibly other reasons). Just skip it + if math.IsNaN(val) { + continue + } + + if a.InjectServiceNodes { + // don't inject a service node if the dest node is already a service node. Also, we can't inject if destSvcName is not set. + _, destNodeType := graph.Id(destSvcNs, destWl, destApp, destVer, destSvcName, a.GraphType) + if destSvcNameOk && destNodeType != graph.NodeTypeService { + // Do not set response time on the incoming edge, we can't validly aggregate response times of the outgoing edges (kiali-2297) + a.addResponseTime(responseTimeMap, val, destSvcNs, "", "", "", destSvcName, destSvcNs, destWl, destApp, destVer, destSvcName) + } else { + a.addResponseTime(responseTimeMap, val, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, destWl, destApp, destVer, destSvcName) + } + } else { + a.addResponseTime(responseTimeMap, val, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, destWl, destApp, destVer, destSvcName) + } + } +} + +func (a ResponseTimeAppender) addResponseTime(responseTimeMap map[string]float64, val float64, sourceWlNs, sourceWl, sourceApp, sourceVer, sourceSvcName, destSvcNs, destWl, destApp, destVer, destSvcName string) { + sourceId, _ := graph.Id(sourceWlNs, sourceWl, sourceApp, sourceVer, sourceSvcName, a.GraphType) + destId, _ := graph.Id(destSvcNs, destWl, destApp, destVer, destSvcName, a.GraphType) + key := fmt.Sprintf("%s %s", sourceId, destId) + + responseTimeMap[key] = val +} diff --git a/vendor/github.com/kiali/kiali/graph/appender/security_policy.go b/vendor/github.com/kiali/kiali/graph/appender/security_policy.go new file mode 100644 index 000000000..81b8bd0e1 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/appender/security_policy.go @@ -0,0 +1,168 @@ +package appender + +import ( + "fmt" + "time" + + "github.com/prometheus/common/model" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/prometheus" +) + +const ( + SecurityPolicyAppenderName = "securityPolicy" + policyMTLS = "mutual_tls" +) + +// SecurityPolicyAppender is responsible for adding securityPolicy information to the graph. +// The appender currently reports only mutual_tls security although is written in a generic way. +// Name: securityPolicy +type SecurityPolicyAppender struct { + GraphType string + IncludeIstio bool + InjectServiceNodes bool + Namespaces map[string]graph.NamespaceInfo + QueryTime int64 // unix time in seconds +} + +type PolicyRates map[string]float64 + +// Name implements Appender +func (a SecurityPolicyAppender) Name() string { + return SecurityPolicyAppenderName +} + +// AppendGraph implements Appender +func (a SecurityPolicyAppender) AppendGraph(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + if len(trafficMap) == 0 { + return + } + + if globalInfo.PromClient == nil { + var err error + globalInfo.PromClient, err = prometheus.NewClient() + graph.CheckError(err) + } + + a.appendGraph(trafficMap, namespaceInfo.Namespace, globalInfo.PromClient) +} + +func (a SecurityPolicyAppender) appendGraph(trafficMap graph.TrafficMap, namespace string, client *prometheus.Client) { + log.Debugf("Resolving security policy for namespace = %v", namespace) + duration := a.Namespaces[namespace].Duration + + // query prometheus for mutual_tls info in two queries (use dest telemetry because it reports the security policy): + // 1) query for requests originating from a workload outside the namespace + groupBy := "source_workload_namespace,source_workload,source_app,source_version,destination_service_namespace,destination_service_name,destination_workload,destination_app,destination_version,connection_security_policy" + query := fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload_namespace!="%v",destination_service_namespace="%v"}[%vs]) > 0) by (%s)`, + "istio_requests_total", + namespace, + namespace, + int(duration.Seconds()), // range duration for the query + groupBy) + outVector := promQuery(query, time.Unix(a.QueryTime, 0), client.API(), a) + + // 2) query for requests originating from a workload inside of the namespace + istioCondition := "" + if !a.IncludeIstio { + istioCondition = fmt.Sprintf(`,destination_service_namespace!="%s"`, config.Get().IstioNamespace) + } + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload_namespace="%v"%s}[%vs]) > 0) by (%s)`, + "istio_requests_total", + namespace, + istioCondition, + int(duration.Seconds()), // range duration for the query + groupBy) + inVector := promQuery(query, time.Unix(a.QueryTime, 0), client.API(), a) + + // create map to quickly look up securityPolicy + securityPolicyMap := make(map[string]PolicyRates) + a.populateSecurityPolicyMap(securityPolicyMap, &outVector) + a.populateSecurityPolicyMap(securityPolicyMap, &inVector) + + applySecurityPolicy(trafficMap, securityPolicyMap) +} + +func (a SecurityPolicyAppender) populateSecurityPolicyMap(securityPolicyMap map[string]PolicyRates, vector *model.Vector) { + for _, s := range *vector { + m := s.Metric + lSourceWlNs, sourceWlNsOk := m["source_workload_namespace"] + lSourceWl, sourceWlOk := m["source_workload"] + lSourceApp, sourceAppOk := m["source_app"] + lSourceVer, sourceVerOk := m["source_version"] + lDestSvcNs, destSvcNsOk := m["destination_service_namespace"] + lDestSvcName, destSvcNameOk := m["destination_service_name"] + lDestWl, destWlOk := m["destination_workload"] + lDestApp, destAppOk := m["destination_app"] + lDestVer, destVerOk := m["destination_version"] + lCsp, cspOk := m["connection_security_policy"] + if !sourceWlNsOk || !sourceWlOk || !sourceAppOk || !sourceVerOk || !destSvcNsOk || !destSvcNameOk || !destWlOk || !destAppOk || !destVerOk || !cspOk { + log.Warningf("Skipping %v, missing expected labels", m.String()) + continue + } + + sourceWlNs := string(lSourceWlNs) + sourceWl := string(lSourceWl) + sourceApp := string(lSourceApp) + sourceVer := string(lSourceVer) + destSvcNs := string(lDestSvcNs) + destSvcName := string(lDestSvcName) + destWl := string(lDestWl) + destApp := string(lDestApp) + destVer := string(lDestVer) + csp := string(lCsp) + + val := float64(s.Value) + + if a.InjectServiceNodes { + // don't inject a service node if the dest node is already a service node. Also, we can't inject if destSvcName is not set. + _, destNodeType := graph.Id(destSvcNs, destWl, destApp, destVer, destSvcName, a.GraphType) + if destSvcNameOk && destNodeType != graph.NodeTypeService { + a.addSecurityPolicy(securityPolicyMap, csp, val, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, "", "", "", destSvcName) + a.addSecurityPolicy(securityPolicyMap, csp, val, destSvcNs, "", "", "", destSvcName, destSvcNs, destWl, destApp, destVer, destSvcName) + } else { + a.addSecurityPolicy(securityPolicyMap, csp, val, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, destWl, destApp, destVer, destSvcName) + } + } else { + a.addSecurityPolicy(securityPolicyMap, csp, val, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, destWl, destApp, destVer, destSvcName) + } + } +} + +func (a SecurityPolicyAppender) addSecurityPolicy(securityPolicyMap map[string]PolicyRates, csp string, val float64, sourceWlNs, sourceWl, sourceApp, sourceVer, sourceSvcName, destSvcNs, destWl, destApp, destVer, destSvcName string) { + sourceId, _ := graph.Id(sourceWlNs, sourceWl, sourceApp, sourceVer, sourceSvcName, a.GraphType) + destId, _ := graph.Id(destSvcNs, destWl, destApp, destVer, destSvcName, a.GraphType) + key := fmt.Sprintf("%s %s", sourceId, destId) + var policyRates PolicyRates + var ok bool + if policyRates, ok = securityPolicyMap[key]; !ok { + policyRates = make(PolicyRates) + securityPolicyMap[key] = policyRates + } + policyRates[csp] = val +} + +func applySecurityPolicy(trafficMap graph.TrafficMap, securityPolicyMap map[string]PolicyRates) { + for _, s := range trafficMap { + for _, e := range s.Edges { + key := fmt.Sprintf("%s %s", e.Source.ID, e.Dest.ID) + if policyRates, ok := securityPolicyMap[key]; ok { + mtls := 0.0 + other := 0.0 + for policy, rate := range policyRates { + if policy == policyMTLS { + mtls = rate + } else { + other += rate + } + } + if percentMtls := mtls / (mtls + other) * 100; percentMtls > 0 { + e.Metadata["isMTLS"] = percentMtls + } + } + } + } +} diff --git a/vendor/github.com/kiali/kiali/graph/appender/service_entry.go b/vendor/github.com/kiali/kiali/graph/appender/service_entry.go new file mode 100644 index 000000000..0b29e9bc9 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/appender/service_entry.go @@ -0,0 +1,92 @@ +package appender + +import ( + "time" + + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/log" +) + +const ServiceEntryAppenderName = "serviceEntry" + +// ServiceEntryAppender is responsible for identifying service nodes that are +// Istio Service Entries. +// Name: serviceEntry +type ServiceEntryAppender struct { + AccessibleNamespaces map[string]time.Time +} + +// Name implements Appender +func (a ServiceEntryAppender) Name() string { + return ServiceEntryAppenderName +} + +// AppendGraph implements Appender +func (a ServiceEntryAppender) AppendGraph(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + if len(trafficMap) == 0 { + return + } + + var err error + if globalInfo.Business == nil { + globalInfo.Business, err = business.Get() + graph.CheckError(err) + } + + a.applyServiceEntries(trafficMap, globalInfo, namespaceInfo) +} + +func (a ServiceEntryAppender) applyServiceEntries(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + for _, n := range trafficMap { + // only a service node can be a service entry + if n.NodeType != graph.NodeTypeService { + continue + } + // only a terminal node can be a service entry (no outgoing edges because the service is performed outside the mesh) + if len(n.Edges) > 0 { + continue + } + + // A service node with no outgoing edges may be an egress. + // If so flag it, don't discard it (kiali-1526, see also kiali-2014). + // The flag will be passed to the UI to inhibit links to non-existent detail pages. + if location, ok := a.getServiceEntry(n.Service, globalInfo); ok { + n.Metadata["isServiceEntry"] = location + } + } +} + +// getServiceEntry queries the cluster API to resolve service entries +// across all accessible namespaces in the cluster. All ServiceEntries are needed because +// Istio does not distinguish where a ServiceEntry is created when routing traffic (i.e. +// a ServiceEntry can be in any namespace and it will still work). +func (a ServiceEntryAppender) getServiceEntry(service string, globalInfo *GlobalInfo) (string, bool) { + if globalInfo.ServiceEntries == nil { + globalInfo.ServiceEntries = make(map[string]string) + + for ns := range a.AccessibleNamespaces { + istioCfg, err := globalInfo.Business.IstioConfig.GetIstioConfigList(business.IstioConfigCriteria{ + IncludeServiceEntries: true, + Namespace: ns, + }) + graph.CheckError(err) + + for _, entry := range istioCfg.ServiceEntries { + if entry.Spec.Hosts != nil { + location := "MESH_EXTERNAL" + if entry.Spec.Location == "MESH_INTERNAL" { + location = "MESH_INTERNAL" + } + for _, host := range entry.Spec.Hosts.([]interface{}) { + globalInfo.ServiceEntries[host.(string)] = location + } + } + } + } + log.Tracef("Found [%v] service entries", len(globalInfo.ServiceEntries)) + } + + location, ok := globalInfo.ServiceEntries[service] + return location, ok +} diff --git a/vendor/github.com/kiali/kiali/graph/appender/sidecars_check.go b/vendor/github.com/kiali/kiali/graph/appender/sidecars_check.go new file mode 100644 index 000000000..70256f776 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/appender/sidecars_check.go @@ -0,0 +1,89 @@ +package appender + +import ( + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/graph" +) + +const SidecarsCheckAppenderName = "sidecarsCheck" + +// SidecarsCheckAppender flags nodes whose backing workloads are missing at least one Envoy sidecar. Note that +// a node with no backing workloads is not flagged. +// Name: sidecarsCheck +type SidecarsCheckAppender struct{} + +// Name implements Appender +func (a SidecarsCheckAppender) Name() string { + return SidecarsCheckAppenderName +} + +// AppendGraph implements Appender +func (a SidecarsCheckAppender) AppendGraph(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + if len(trafficMap) == 0 { + return + } + + if globalInfo.Business == nil { + var err error + globalInfo.Business, err = business.Get() + graph.CheckError(err) + } + if namespaceInfo.WorkloadList == nil { + workloadList, err := globalInfo.Business.Workload.GetWorkloadList(namespaceInfo.Namespace) + graph.CheckError(err) + namespaceInfo.WorkloadList = &workloadList + } + + a.applySidecarsChecks(trafficMap, namespaceInfo) +} + +func (a *SidecarsCheckAppender) applySidecarsChecks(trafficMap graph.TrafficMap, namespaceInfo *NamespaceInfo) { + cfg := config.Get() + istioNamespace := cfg.IstioNamespace + workloadList := namespaceInfo.WorkloadList + + for _, n := range trafficMap { + // Skip the check if this node is outside the requested namespace, we limit badging to the requested namespaces + if n.Namespace != namespaceInfo.Namespace { + continue + } + + // We whitelist istio components because they may not report telemetry using injected sidecars. + if n.Namespace == istioNamespace { + continue + } + + // dead nodes tell no tales (er, have no pods) + if isDead, ok := n.Metadata["isDead"]; ok && isDead.(bool) { + continue + } + + // get the workloads for the node and check to see if they have sidecars. Note that + // if there are no workloads/pods we don't flag it as missing sidecars. No pods means + // no missing sidecars. (In most cases this means it was flagged as dead, and handled above) + hasIstioSidecar := true + switch n.NodeType { + case graph.NodeTypeWorkload: + if workload, found := getWorkload(n.Workload, workloadList); found { + hasIstioSidecar = workload.IstioSidecar + } + case graph.NodeTypeApp: + workloads := getAppWorkloads(n.App, n.Version, workloadList) + if len(workloads) > 0 { + for _, workload := range workloads { + if !workload.IstioSidecar { + hasIstioSidecar = false + break + } + } + } + default: + continue + } + + if !hasIstioSidecar { + n.Metadata["hasMissingSC"] = true + } + } +} diff --git a/vendor/github.com/kiali/kiali/graph/appender/unused_node.go b/vendor/github.com/kiali/kiali/graph/appender/unused_node.go new file mode 100644 index 000000000..c13d1ff2f --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/appender/unused_node.go @@ -0,0 +1,116 @@ +package appender + +import ( + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" +) + +const UnusedNodeAppenderName = "unusedNode" + +// UnusedNodeAppender looks for services that have never seen request traffic. It adds nodes to represent the +// unused definitions. The added node types depend on the graph type and/or labeling on the definition. +// Name: unusedNode +type UnusedNodeAppender struct { + GraphType string // This appender does not operate on service graphs because it adds workload nodes. + IsNodeGraph bool // This appender does not operate on node detail graphs because we want to focus on the specific node. +} + +// Name implements Appender +func (a UnusedNodeAppender) Name() string { + return UnusedNodeAppenderName +} + +// AppendGraph implements Appender +func (a UnusedNodeAppender) AppendGraph(trafficMap graph.TrafficMap, globalInfo *GlobalInfo, namespaceInfo *NamespaceInfo) { + if graph.GraphTypeService == a.GraphType || a.IsNodeGraph { + return + } + + if globalInfo.Business == nil { + var err error + globalInfo.Business, err = business.Get() + graph.CheckError(err) + } + if namespaceInfo.WorkloadList == nil { + workloadList, err := globalInfo.Business.Workload.GetWorkloadList(namespaceInfo.Namespace) + graph.CheckError(err) + namespaceInfo.WorkloadList = &workloadList + } + + a.addUnusedNodes(trafficMap, namespaceInfo.Namespace, namespaceInfo.WorkloadList.Workloads) +} + +func (a UnusedNodeAppender) addUnusedNodes(trafficMap graph.TrafficMap, namespace string, workloads []models.WorkloadListItem) { + unusedTrafficMap := a.buildUnusedTrafficMap(trafficMap, namespace, workloads) + + // If trafficMap is empty just populate it with the unused nodes and return + if len(trafficMap) == 0 { + for k, v := range unusedTrafficMap { + trafficMap[k] = v + } + return + } + + // Integrate the unused nodes into the existing traffic map + for _, v := range unusedTrafficMap { + addUnusedNodeToTrafficMap(trafficMap, v) + } +} + +func (a UnusedNodeAppender) buildUnusedTrafficMap(trafficMap graph.TrafficMap, namespace string, workloads []models.WorkloadListItem) graph.TrafficMap { + unusedTrafficMap := graph.NewTrafficMap() + cfg := config.Get() + appLabel := cfg.IstioLabels.AppLabelName + versionLabel := cfg.IstioLabels.VersionLabelName + for _, w := range workloads { + labels := w.Labels + app := graph.Unknown + version := graph.Unknown + if v, ok := labels[appLabel]; ok { + app = v + } + if v, ok := labels[versionLabel]; ok { + version = v + } + id, nodeType := graph.Id(namespace, w.Name, app, version, "", a.GraphType) + if _, found := trafficMap[id]; !found { + if _, found = unusedTrafficMap[id]; !found { + log.Debugf("Adding unused node for workload [%s] with labels [%v]", w.Name, labels) + node := graph.NewNodeExplicit(id, namespace, w.Name, app, version, "", nodeType, a.GraphType) + // note: we don't know what the protocol really should be, http is most common, it's a dead edge anyway + node.Metadata = map[string]interface{}{"httpIn": 0.0, "httpOut": 0.0, "isUnused": true} + unusedTrafficMap[id] = &node + } + } + } + return unusedTrafficMap +} + +func addUnusedNodeToTrafficMap(trafficMap graph.TrafficMap, unusedNode *graph.Node) { + // add unused node to traffic map + trafficMap[unusedNode.ID] = unusedNode + + // Add a "sibling" edge to any node with an edge to the same app + for _, n := range trafficMap { + findAndAddSibling(n, unusedNode) + } +} + +func findAndAddSibling(parent, unusedNode *graph.Node) { + if unusedNode.App == graph.Unknown { + return + } + + found := false + for _, edge := range parent.Edges { + if found = edge.Dest.App == unusedNode.App; found { + break + } + } + if found { + parent.AddEdge(unusedNode) + } +} diff --git a/vendor/github.com/kiali/kiali/graph/appender/util.go b/vendor/github.com/kiali/kiali/graph/appender/util.go new file mode 100644 index 000000000..6b38c13fa --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/appender/util.go @@ -0,0 +1,38 @@ +package appender + +import ( + "context" + "fmt" + "time" + + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/prometheus/internalmetrics" + "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" +) + +// package-private util functions (used by multiple files) + +func promQuery(query string, queryTime time.Time, api v1.API, a Appender) model.Vector { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // wrap with a round() to be in line with metrics api + query = fmt.Sprintf("round(%s,0.001)", query) + log.Debugf("Appender query:\n%s&time=%v (now=%v, %v)\n", query, queryTime.Format(graph.TF), time.Now().Format(graph.TF), queryTime.Unix()) + + promtimer := internalmetrics.GetPrometheusProcessingTimePrometheusTimer("Graph-Appender-" + a.Name()) + value, err := api.Query(ctx, query, queryTime) + graph.CheckError(err) + promtimer.ObserveDuration() // notice we only collect metrics for successful prom queries + + switch t := value.Type(); t { + case model.ValVector: // Instant Vector + return value.(model.Vector) + default: + graph.Error(fmt.Sprintf("No handling for type %v!\n", t)) + } + + return nil +} diff --git a/vendor/github.com/kiali/kiali/graph/cytoscape/cytoscape.go b/vendor/github.com/kiali/kiali/graph/cytoscape/cytoscape.go new file mode 100644 index 000000000..a6adbabf8 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/cytoscape/cytoscape.go @@ -0,0 +1,417 @@ +// Cytoscape package provides conversion from our graph to the CystoscapeJS +// configuration json model. +// +// The following links are useful for understanding CytoscapeJS and it's configuration: +// +// Main page: http://js.cytoscape.org/ +// JSON config: http://js.cytoscape.org/#notation/elements-json +// Demos: http://js.cytoscape.org/#demos +// +// Algorithm: Process the graph structure adding nodes and edges, decorating each +// with information provided. An optional second pass generates compound +// nodes for version grouping. +// +package cytoscape + +import ( + "crypto/md5" + "fmt" + "sort" + + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/graph/options" +) + +type ProtocolTraffic struct { + Protocol string `json:"protocol"` // protocol + Rates map[string]string `json:"rates"` // map[rate]value +} + +type NodeData struct { + // Cytoscape Fields + Id string `json:"id"` // unique internal node ID (n0, n1...) + Parent string `json:"parent,omitempty"` // Compound Node parent ID + + // App Fields (not required by Cytoscape) + NodeType string `json:"nodeType"` + Namespace string `json:"namespace"` + Workload string `json:"workload,omitempty"` + App string `json:"app,omitempty"` + Version string `json:"version,omitempty"` + Service string `json:"service,omitempty"` // requested service for NodeTypeService + DestServices map[string]bool `json:"destServices,omitempty"` // requested services for [dest] node + Traffic []ProtocolTraffic `json:"traffic,omitempty"` // traffic rates for all detected protocols + HasCB bool `json:"hasCB,omitempty"` // true (has circuit breaker) | false + HasMissingSC bool `json:"hasMissingSC,omitempty"` // true (has missing sidecar) | false + HasVS bool `json:"hasVS,omitempty"` // true (has route rule) | false + IsDead bool `json:"isDead,omitempty"` // true (has no pods) | false + IsGroup string `json:"isGroup,omitempty"` // set to the grouping type, current values: [ 'app', 'version' ] + IsInaccessible bool `json:"isInaccessible,omitempty"` // true if the node exists in an inaccessible namespace + IsMisconfigured string `json:"isMisconfigured,omitempty"` // set to misconfiguration list, current values: [ 'labels' ] + IsOutside bool `json:"isOutside,omitempty"` // true | false + IsRoot bool `json:"isRoot,omitempty"` // true | false + IsServiceEntry string `json:"isServiceEntry,omitempty"` // set to the location, current values: [ 'MESH_EXTERNAL', 'MESH_INTERNAL' ] + IsUnused bool `json:"isUnused,omitempty"` // true | false +} + +type EdgeData struct { + // Cytoscape Fields + Id string `json:"id"` // unique internal edge ID (e0, e1...) + Source string `json:"source"` // parent node ID + Target string `json:"target"` // child node ID + + // App Fields (not required by Cytoscape) + Traffic ProtocolTraffic `json:"traffic,omitempty"` // traffic rates for the edge protocol + ResponseTime string `json:"responseTime,omitempty"` // in millis + IsMTLS string `json:"isMTLS,omitempty"` // set to the percentage of traffic using a mutual TLS connection + IsUnused bool `json:"isUnused,omitempty"` // true | false +} + +type NodeWrapper struct { + Data *NodeData `json:"data"` +} + +type EdgeWrapper struct { + Data *EdgeData `json:"data"` +} + +type Elements struct { + Nodes []*NodeWrapper `json:"nodes"` + Edges []*EdgeWrapper `json:"edges"` +} + +type Config struct { + Timestamp int64 `json:"timestamp"` + Duration int64 `json:"duration"` + GraphType string `json:"graphType"` + Elements Elements `json:"elements"` +} + +func nodeHash(id string) string { + return fmt.Sprintf("%x", md5.Sum([]byte(id))) +} + +func edgeHash(from, to, protocol string) string { + return fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s.%s.%s", from, to, protocol)))) +} + +func NewConfig(trafficMap graph.TrafficMap, o options.VendorOptions) (result Config) { + nodes := []*NodeWrapper{} + edges := []*EdgeWrapper{} + + buildConfig(trafficMap, &nodes, &edges, o) + + // Add compound nodes as needed + switch o.GroupBy { + case options.GroupByApp: + if o.GraphType != graph.GraphTypeService { + groupByApp(&nodes) + } + case options.GroupByVersion: + if o.GraphType == graph.GraphTypeVersionedApp { + groupByVersion(&nodes) + } + default: + // no grouping + } + + // sort nodes and edges for better json presentation (and predictable testing) + // kiali-1258 compound/isGroup/parent nodes must come before the child references + sort.Slice(nodes, func(i, j int) bool { + switch { + case nodes[i].Data.Namespace != nodes[j].Data.Namespace: + return nodes[i].Data.Namespace < nodes[j].Data.Namespace + case nodes[i].Data.IsGroup != nodes[j].Data.IsGroup: + return nodes[i].Data.IsGroup > nodes[j].Data.IsGroup + case nodes[i].Data.App != nodes[j].Data.App: + return nodes[i].Data.App < nodes[j].Data.App + case nodes[i].Data.Version != nodes[j].Data.Version: + return nodes[i].Data.Version < nodes[j].Data.Version + case nodes[i].Data.Service != nodes[j].Data.Service: + return nodes[i].Data.Service < nodes[j].Data.Service + default: + return nodes[i].Data.Workload < nodes[j].Data.Workload + } + }) + sort.Slice(edges, func(i, j int) bool { + switch { + case edges[i].Data.Source < edges[j].Data.Source: + return true + case edges[i].Data.Source > edges[j].Data.Source: + return false + default: + return edges[i].Data.Target < edges[j].Data.Target + } + }) + + elements := Elements{nodes, edges} + result = Config{ + Duration: int64(o.Duration.Seconds()), + Timestamp: o.QueryTime, + GraphType: o.GraphType, + Elements: elements, + } + return result +} + +func buildConfig(trafficMap graph.TrafficMap, nodes *[]*NodeWrapper, edges *[]*EdgeWrapper, o options.VendorOptions) { + for id, n := range trafficMap { + nodeId := nodeHash(id) + + nd := &NodeData{ + Id: nodeId, + NodeType: n.NodeType, + Namespace: n.Namespace, + Workload: n.Workload, + App: n.App, + Version: n.Version, + Service: n.Service, + } + + addNodeTelemetry(n, nd) + + // node may have deployment but no pods running) + if val, ok := n.Metadata["isDead"]; ok { + nd.IsDead = val.(bool) + } + + // node may be a root + if val, ok := n.Metadata["isRoot"]; ok { + nd.IsRoot = val.(bool) + } + + // node may be unused + if val, ok := n.Metadata["isUnused"]; ok { + nd.IsUnused = val.(bool) + } + + // node is not accessible to the current user + if val, ok := n.Metadata["isInaccessible"]; ok { + nd.IsInaccessible = val.(bool) + } + + // node may have a circuit breaker + if val, ok := n.Metadata["hasCB"]; ok { + nd.HasCB = val.(bool) + } + + // node may have a virtual service + if val, ok := n.Metadata["hasVS"]; ok { + nd.HasVS = val.(bool) + } + + // set sidecars checks, if available + if val, ok := n.Metadata["hasMissingSC"]; ok { + nd.HasMissingSC = val.(bool) + } + + // check if node is misconfigured + if val, ok := n.Metadata["isMisconfigured"]; ok { + nd.IsMisconfigured = val.(string) + } + + // check if node is on another namespace + if val, ok := n.Metadata["isOutside"]; ok { + nd.IsOutside = val.(bool) + } + + // node may have destination service info + if val, ok := n.Metadata["destServices"]; ok { + nd.DestServices = val.(map[string]bool) + } + + // node may be a service entry + if val, ok := n.Metadata["isServiceEntry"]; ok { + nd.IsServiceEntry = val.(string) + } + + nw := NodeWrapper{ + Data: nd, + } + + *nodes = append(*nodes, &nw) + + for _, e := range n.Edges { + sourceIdHash := nodeHash(n.ID) + destIdHash := nodeHash(e.Dest.ID) + protocol := "" + if e.Metadata["protocol"] != nil { + protocol = e.Metadata["protocol"].(string) + } + edgeId := edgeHash(sourceIdHash, destIdHash, protocol) + ed := EdgeData{ + Id: edgeId, + Source: sourceIdHash, + Target: destIdHash, + } + addEdgeTelemetry(e, &ed) + + ew := EdgeWrapper{ + Data: &ed, + } + *edges = append(*edges, &ew) + } + } +} + +func addNodeTelemetry(n *graph.Node, nd *NodeData) { + nd.Traffic = []ProtocolTraffic{} + for _, p := range graph.Protocols { + protocolTraffic := ProtocolTraffic{Protocol: p.Name} + for _, r := range p.NodeRates { + if rateVal := getRate(n.Metadata, r.Name); rateVal > 0.0 { + if protocolTraffic.Rates == nil { + protocolTraffic.Rates = make(map[string]string) + } + protocolTraffic.Rates[r.Name] = fmt.Sprintf("%.*f", r.Precision, rateVal) + } + } + if protocolTraffic.Rates != nil { + nd.Traffic = append(nd.Traffic, protocolTraffic) + } + } +} + +func addEdgeTelemetry(e *graph.Edge, ed *EdgeData) { + if val, ok := e.Metadata["isMTLS"]; ok { + ed.IsMTLS = fmt.Sprintf("%.0f", val.(float64)) + } + if val, ok := e.Metadata["responseTime"]; ok { + responseTime := val.(float64) + ed.ResponseTime = fmt.Sprintf("%.0f", responseTime) + } + if val, ok := e.Source.Metadata["isUnused"]; ok { + ed.IsUnused = val.(bool) + } + + // an edge represents traffic for at most one protocol + ed.Traffic = ProtocolTraffic{} + for _, p := range graph.Protocols { + protocolTraffic := ProtocolTraffic{Protocol: p.Name} + total := 0.0 + err := 0.0 + var percentErr, percentReq graph.Rate + for _, r := range p.EdgeRates { + rateVal := getRate(e.Metadata, r.Name) + switch { + case r.IsTotal: + // there is one field holding the total traffic + total = rateVal + case r.IsErr: + // error rates can be reported for several error status codes, so sum up all + // of the error traffic to be used in the percentErr calculation below. + err += rateVal + case r.IsPercentErr: + // hold onto the percentErr field so we know how to report it below + percentErr = r + case r.IsPercentReq: + // hold onto the percentReq field so we know how to report it below + percentReq = r + } + if rateVal := getRate(e.Metadata, r.Name); rateVal > 0.0 { + if protocolTraffic.Rates == nil { + protocolTraffic.Rates = make(map[string]string) + } + protocolTraffic.Rates[r.Name] = fmt.Sprintf("%.*f", r.Precision, rateVal) + } + } + if protocolTraffic.Rates != nil { + if total > 0 { + if percentErr.Name != "" { + rateVal := err / total * 100 + if rateVal > 0.0 { + protocolTraffic.Rates[percentErr.Name] = fmt.Sprintf("%.*f", percentErr.Precision, rateVal) + } + } + if percentReq.Name != "" { + rateVal := 0.0 + for _, r := range p.NodeRates { + if !r.IsOut { + continue + } + rateVal = total / getRate(e.Source.Metadata, r.Name) * 100.0 + break + } + if rateVal > 0.0 { + protocolTraffic.Rates[percentReq.Name] = fmt.Sprintf("%.*f", percentReq.Precision, rateVal) + } + } + } + ed.Traffic = protocolTraffic + break + } + } +} + +func getRate(md map[string]interface{}, k string) float64 { + if rate, ok := md[k]; ok { + return rate.(float64) + } + return 0.0 +} + +// groupByVersion adds compound nodes to group multiple versions of the same app +func groupByVersion(nodes *[]*NodeWrapper) { + appBox := make(map[string][]*NodeData) + + for _, nw := range *nodes { + if nw.Data.NodeType == graph.NodeTypeApp { + k := fmt.Sprintf("box_%s_%s", nw.Data.Namespace, nw.Data.App) + appBox[k] = append(appBox[k], nw.Data) + } + } + + generateGroupCompoundNodes(appBox, nodes, options.GroupByVersion) +} + +// groupByApp adds compound nodes to group all nodes for the same app +func groupByApp(nodes *[]*NodeWrapper) { + appBox := make(map[string][]*NodeData) + + for _, nw := range *nodes { + if nw.Data.App != "unknown" && nw.Data.App != "" { + k := fmt.Sprintf("box_%s_%s", nw.Data.Namespace, nw.Data.App) + appBox[k] = append(appBox[k], nw.Data) + } + } + + generateGroupCompoundNodes(appBox, nodes, options.GroupByApp) +} + +func generateGroupCompoundNodes(appBox map[string][]*NodeData, nodes *[]*NodeWrapper, groupBy string) { + for k, members := range appBox { + if len(members) > 1 { + // create the compound (parent) node for the member nodes + nodeId := nodeHash(k) + nd := NodeData{ + Id: nodeId, + NodeType: graph.NodeTypeApp, + Namespace: members[0].Namespace, + App: members[0].App, + Version: "", + IsGroup: groupBy, + } + + nw := NodeWrapper{ + Data: &nd, + } + + // assign each member node to the compound parent + nd.HasMissingSC = false // TODO: this is probably unecessarily noisy + nd.IsInaccessible = false + nd.IsOutside = false + + for _, n := range members { + n.Parent = nodeId + + // copy some member attributes to to the compound node (aka app box) + nd.HasMissingSC = nd.HasMissingSC || n.HasMissingSC + nd.IsInaccessible = nd.IsInaccessible || n.IsInaccessible + nd.IsOutside = nd.IsOutside || n.IsOutside + } + + // add the compound node to the list of nodes + *nodes = append(*nodes, &nw) + } + } +} diff --git a/vendor/github.com/kiali/kiali/graph/graph.go b/vendor/github.com/kiali/kiali/graph/graph.go new file mode 100644 index 000000000..747bd3315 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/graph.go @@ -0,0 +1,179 @@ +// Graph package provides support for the graph handlers such as supported path +// variables and query params, as well as types for graph processing. +package graph + +import ( + "fmt" + "time" +) + +const ( + GraphTypeApp string = "app" + GraphTypeService string = "service" // Treated as graphType Workload, with service injection, and then condensed + GraphTypeVersionedApp string = "versionedApp" + GraphTypeWorkload string = "workload" + NodeTypeApp string = "app" + NodeTypeService string = "service" + NodeTypeUnknown string = "unknown" // The special "unknown" traffic gen node + NodeTypeWorkload string = "workload" + TF string = "2006-01-02 15:04:05" // TF is the TimeFormat for timestamps + Unknown string = "unknown" // Istio unknown label value +) + +type Node struct { + ID string // unique identifier for the node + NodeType string // Node type + Namespace string // Namespace + Workload string // Workload (deployment) name + App string // Workload app label value + Version string // Workload version label value + Service string // Service name + Edges []*Edge // child nodes + Metadata map[string]interface{} // app-specific data +} + +type Edge struct { + Source *Node + Dest *Node + Metadata map[string]interface{} // app-specific data +} + +type NamespaceInfo struct { + Name string + Duration time.Duration +} + +// TrafficMap is a map of app Nodes, each optionally holding Edge data. Metadata +// is a general purpose map for holding any desired node or edge information. +// Each app node should have a unique namespace+workload. Note that it is feasible +// but likely unusual to have two nodes with the same name+version in the same +// namespace. +type TrafficMap map[string]*Node + +func NewNode(namespace, workload, app, version, service, graphType string) Node { + id, nodeType := Id(namespace, workload, app, version, service, graphType) + + return NewNodeExplicit(id, namespace, workload, app, version, service, nodeType, graphType) +} + +func NewNodeExplicit(id, namespace, workload, app, version, service, nodeType, graphType string) Node { + // trim unnecessary fields + switch nodeType { + case NodeTypeWorkload: + // maintain the app+version labeling if it is set, it can be useful + // for identifying destination rules, providing links, and grouping + if app == Unknown { + app = "" + } + if version == Unknown { + version = "" + } + service = "" + case NodeTypeApp: + // note: we keep workload for a versioned app node because app+version labeling + // should be backed by a single workload and it can be useful to use the workload + // name as opposed to the label values. + if graphType != GraphTypeVersionedApp { + workload = "" + version = "" + } + service = "" + case NodeTypeService: + app = "" + workload = "" + version = "" + } + + return Node{ + ID: id, + NodeType: nodeType, + Namespace: namespace, + Workload: workload, + App: app, + Version: version, + Service: service, + Edges: []*Edge{}, + Metadata: make(map[string]interface{}), + } +} + +func (s *Node) AddEdge(dest *Node) *Edge { + e := NewEdge(s, dest) + s.Edges = append(s.Edges, &e) + return &e +} + +func NewEdge(source, dest *Node) Edge { + return Edge{ + Source: source, + Dest: dest, + Metadata: make(map[string]interface{}), + } +} + +func NewTrafficMap() TrafficMap { + return make(map[string]*Node) +} + +func Id(namespace, workload, app, version, service, graphType string) (id, nodeType string) { + // first, check for the special-case "unknown" source node + if Unknown == namespace && Unknown == workload && Unknown == app && "" == service { + return fmt.Sprintf("unknown_source"), NodeTypeUnknown + } + + // It is possible that a request is made for an unknown destination. For example, an Ingress + // request to an unknown path. In this case the namespace may or may not be unknown. + // Every other field is unknown. Allow one unknown service per namespace to help reflect these + // bad destinations in the graph, it may help diagnose a problem. + if Unknown == workload && Unknown == app && Unknown == service { + return fmt.Sprintf("svc_%s_unknown", namespace), NodeTypeService + } + + workloadOk := IsOK(workload) + appOk := IsOK(app) + serviceOk := IsOK(service) + + if !workloadOk && !appOk && !serviceOk { + panic(fmt.Sprintf("Failed ID gen: namespace=[%s] workload=[%s] app=[%s] version=[%s] service=[%s] graphType=[%s]", namespace, workload, app, version, service, graphType)) + } + + // handle workload graph nodes (service graphs are initially processed as workload graphs) + if graphType == GraphTypeWorkload || graphType == GraphTypeService { + // workload graph nodes are type workload or service + if !workloadOk && !serviceOk { + panic(fmt.Sprintf("Failed ID gen: namespace=[%s] workload=[%s] app=[%s] version=[%s] service=[%s] graphType=[%s]", namespace, workload, app, version, service, graphType)) + } + if !workloadOk { + return fmt.Sprintf("svc_%v_%v", namespace, service), NodeTypeService + } + return fmt.Sprintf("wl_%v_%v", namespace, workload), NodeTypeWorkload + } + + // handle app and versionedApp graphs + versionOk := IsOK(version) + if appOk { + // For a versionedApp graph use workload as the Id, if available. It allows us some protection + // against labeling anti-patterns. It won't be there in a few cases like: + // - root node of a node graph + // - app box node + // Otherwise use what we have and alter node type as necessary + // For a [versionless] App graph use the app label to aggregate versions/workloads into one node + if graphType == GraphTypeVersionedApp { + if workloadOk { + return fmt.Sprintf("vapp_%v_%v", namespace, workload), NodeTypeApp + } + if versionOk { + return fmt.Sprintf("vapp_%v_%v_%v", namespace, app, version), NodeTypeApp + } + } + return fmt.Sprintf("app_%v_%v", namespace, app), NodeTypeApp + } + + // fall back to workload if applicable + if workloadOk { + return fmt.Sprintf("wl_%v_%v", namespace, workload), NodeTypeWorkload + } + + // fall back to service as a last resort in the app graph + return fmt.Sprintf("svc_%v_%v", namespace, service), NodeTypeService +} diff --git a/vendor/github.com/kiali/kiali/graph/options/options.go b/vendor/github.com/kiali/kiali/graph/options/options.go new file mode 100644 index 000000000..05f7047f6 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/options/options.go @@ -0,0 +1,355 @@ +// Package options holds the option settings for a single graph generation. +package options + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/gorilla/mux" + + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/graph/appender" +) + +const ( + GroupByApp string = "app" + GroupByNone string = "none" + GroupByVersion string = "version" + NamespaceIstio string = "istio-system" + VendorCytoscape string = "cytoscape" + defaultDuration string = "10m" + defaultGraphType string = graph.GraphTypeWorkload + defaultGroupBy string = GroupByNone + defaultIncludeIstio bool = false + defaultInjectServiceNodes bool = false + defaultVendor string = VendorCytoscape +) + +const ( + graphKindNamespace string = "namespace" + graphKindNode string = "node" +) + +// NodeOptions are those that apply only to node-detail graphs +type NodeOptions struct { + App string + Namespace string + Service string + Version string + Workload string +} + +// VendorOptions are those that are supplied to the vendor-specific generators. +type VendorOptions struct { + Duration time.Duration + GraphType string + GroupBy string + QueryTime int64 // unix time in seconds +} + +// Options are all supported graph generation options. +type Options struct { + AccessibleNamespaces map[string]time.Time + Appenders []appender.Appender + IncludeIstio bool // include istio-system services. Ignored for istio-system ns. Default false. + InjectServiceNodes bool // inject destination service nodes between source and destination nodes. + Namespaces map[string]graph.NamespaceInfo + Vendor string + NodeOptions + VendorOptions +} + +func NewOptions(r *http.Request) Options { + // path variables (0 or more will be set) + vars := mux.Vars(r) + app := vars["app"] + namespace := vars["namespace"] + service := vars["service"] + version := vars["version"] + workload := vars["workload"] + + // query params + params := r.URL.Query() + var duration time.Duration + var includeIstio bool + var injectServiceNodes bool + var queryTime int64 + durationString := params.Get("duration") + graphType := params.Get("graphType") + groupBy := params.Get("groupBy") + includeIstioString := params.Get("includeIstio") + injectServiceNodesString := params.Get("injectServiceNodes") + namespaces := params.Get("namespaces") // csl of namespaces + queryTimeString := params.Get("queryTime") + vendor := params.Get("vendor") + + if durationString == "" { + duration, _ = time.ParseDuration(defaultDuration) + } else { + var durationErr error + duration, durationErr = time.ParseDuration(durationString) + if durationErr != nil { + graph.BadRequest(fmt.Sprintf("Invalid duration [%s]", durationString)) + } + } + if graphType == "" { + graphType = defaultGraphType + } else if graphType != graph.GraphTypeApp && graphType != graph.GraphTypeService && graphType != graph.GraphTypeVersionedApp && graphType != graph.GraphTypeWorkload { + graph.BadRequest(fmt.Sprintf("Invalid graphType [%s]", graphType)) + } + // app node graphs require an app graph type + if app != "" && graphType != graph.GraphTypeApp && graphType != graph.GraphTypeVersionedApp { + graph.BadRequest(fmt.Sprintf("Invalid graphType [%s]. This node detail graph supports only graphType app or versionedApp.", graphType)) + } + if groupBy == "" { + groupBy = defaultGroupBy + } else if groupBy != GroupByApp && groupBy != GroupByNone && groupBy != GroupByVersion { + graph.BadRequest(fmt.Sprintf("Invalid groupBy [%s]", groupBy)) + } + if includeIstioString == "" { + includeIstio = defaultIncludeIstio + } else { + var includeIstioErr error + includeIstio, includeIstioErr = strconv.ParseBool(includeIstioString) + if includeIstioErr != nil { + graph.BadRequest(fmt.Sprintf("Invalid includeIstio [%s]", includeIstioString)) + } + } + if injectServiceNodesString == "" { + injectServiceNodes = defaultInjectServiceNodes + } else { + var injectServiceNodesErr error + injectServiceNodes, injectServiceNodesErr = strconv.ParseBool(injectServiceNodesString) + if injectServiceNodesErr != nil { + graph.BadRequest(fmt.Sprintf("Invalid injectServiceNodes [%s]", injectServiceNodesString)) + } + } + if queryTimeString == "" { + queryTime = time.Now().Unix() + } else { + var queryTimeErr error + queryTime, queryTimeErr = strconv.ParseInt(queryTimeString, 10, 64) + if queryTimeErr != nil { + graph.BadRequest(fmt.Sprintf("Invalid queryTime [%s]", queryTimeString)) + } + } + if vendor == "" { + vendor = defaultVendor + } else if vendor != VendorCytoscape { + graph.BadRequest(fmt.Sprintf("Invalid vendor [%s]", vendor)) + } + + // Process namespaces options: + namespaceMap := make(map[string]graph.NamespaceInfo) + accessibleNamespaces := getAccessibleNamespaces() + + // If path variable is set then it is the only relevant namespace (it's a node graph) + // Else if namespaces query param is set it specifies the relevant namespaces + // Else error, at least one namespace is required. + if namespace != "" { + namespaces = namespace + } + + if namespaces == "" { + graph.BadRequest(fmt.Sprintf("At least one namespace must be specified via the namespaces query parameter.")) + } + + for _, namespaceToken := range strings.Split(namespaces, ",") { + namespaceToken = strings.TrimSpace(namespaceToken) + if creationTime, found := accessibleNamespaces[namespaceToken]; found { + namespaceMap[namespaceToken] = graph.NamespaceInfo{ + Name: namespaceToken, + Duration: resolveNamespaceDuration(creationTime, duration, queryTime), + } + } else { + graph.Forbidden(fmt.Sprintf("Requested namespace [%s] is not accessible.", namespaceToken)) + } + } + + // Service graphs require service injection + if graphType == graph.GraphTypeService { + injectServiceNodes = true + } + + options := Options{ + AccessibleNamespaces: accessibleNamespaces, + IncludeIstio: includeIstio, + InjectServiceNodes: injectServiceNodes, + Namespaces: namespaceMap, + Vendor: vendor, + NodeOptions: NodeOptions{ + App: app, + Namespace: namespace, + Service: service, + Version: version, + Workload: workload, + }, + VendorOptions: VendorOptions{ + Duration: duration, + GraphType: graphType, + GroupBy: groupBy, + QueryTime: queryTime, + }, + } + + appenders := parseAppenders(params, options) + options.Appenders = appenders + + return options +} + +// GetGraphKind will return the kind of graph represented by the options. +func (o *Options) GetGraphKind() string { + if o.NodeOptions.App != "" || + o.NodeOptions.Version != "" || + o.NodeOptions.Workload != "" || + o.NodeOptions.Service != "" { + return graphKindNode + } else { + return graphKindNamespace + } +} + +func parseAppenders(params url.Values, o Options) []appender.Appender { + requestedAppenders := make(map[string]bool) + allAppenders := false + if _, ok := params["appenders"]; ok { + for _, requestedAppender := range strings.Split(params.Get("appenders"), ",") { + switch strings.TrimSpace(requestedAppender) { + case appender.DeadNodeAppenderName: + requestedAppenders[appender.DeadNodeAppenderName] = true + case appender.ServiceEntryAppenderName: + requestedAppenders[appender.ServiceEntryAppenderName] = true + case appender.IstioAppenderName: + requestedAppenders[appender.IstioAppenderName] = true + case appender.ResponseTimeAppenderName: + requestedAppenders[appender.ResponseTimeAppenderName] = true + case appender.SecurityPolicyAppenderName: + requestedAppenders[appender.SecurityPolicyAppenderName] = true + case appender.SidecarsCheckAppenderName: + requestedAppenders[appender.SidecarsCheckAppenderName] = true + case appender.UnusedNodeAppenderName: + requestedAppenders[appender.UnusedNodeAppenderName] = true + case "": + // skip + default: + graph.BadRequest(fmt.Sprintf("Invalid appender [%s]", strings.TrimSpace(requestedAppender))) + } + } + } else { + allAppenders = true + } + + // The appender order is important + // To pre-process service nodes run service_entry appender first + // To reduce processing, filter dead nodes next + // To reduce processing, next run appenders that don't apply to unused services + // Add orphan (unused) services + // Run remaining appenders + var appenders []appender.Appender + + if _, ok := requestedAppenders[appender.ServiceEntryAppenderName]; ok || allAppenders { + a := appender.ServiceEntryAppender{ + AccessibleNamespaces: o.AccessibleNamespaces, + } + appenders = append(appenders, a) + } + if _, ok := requestedAppenders[appender.DeadNodeAppenderName]; ok || allAppenders { + a := appender.DeadNodeAppender{} + appenders = append(appenders, a) + } + if _, ok := requestedAppenders[appender.ResponseTimeAppenderName]; ok || allAppenders { + quantile := appender.DefaultQuantile + if _, ok := params["responseTimeQuantile"]; ok { + if responseTimeQuantile, err := strconv.ParseFloat(params.Get("responseTimeQuantile"), 64); err == nil { + quantile = responseTimeQuantile + } + } + a := appender.ResponseTimeAppender{ + Quantile: quantile, + GraphType: o.GraphType, + InjectServiceNodes: o.InjectServiceNodes, + IncludeIstio: o.IncludeIstio, + Namespaces: o.Namespaces, + QueryTime: o.QueryTime, + } + appenders = append(appenders, a) + } + if _, ok := requestedAppenders[appender.SecurityPolicyAppenderName]; ok || allAppenders { + a := appender.SecurityPolicyAppender{ + GraphType: o.GraphType, + IncludeIstio: o.IncludeIstio, + InjectServiceNodes: o.InjectServiceNodes, + Namespaces: o.Namespaces, + QueryTime: o.QueryTime, + } + appenders = append(appenders, a) + } + if _, ok := requestedAppenders[appender.UnusedNodeAppenderName]; ok || allAppenders { + hasNodeOptions := o.App != "" || o.Workload != "" || o.Service != "" + a := appender.UnusedNodeAppender{ + GraphType: o.GraphType, + IsNodeGraph: hasNodeOptions, + } + appenders = append(appenders, a) + } + if _, ok := requestedAppenders[appender.IstioAppenderName]; ok || allAppenders { + a := appender.IstioAppender{} + appenders = append(appenders, a) + } + if _, ok := requestedAppenders[appender.SidecarsCheckAppenderName]; ok || allAppenders { + a := appender.SidecarsCheckAppender{} + appenders = append(appenders, a) + } + + return appenders +} + +// getAccessibleNamespaces returns a Set of all namespaces accessible to the user. +// The Set is implemented using the map convention. Each map entry is set to the +// creation timestamp of the namespace, to be used to ensure valid time ranges for +// queries against the namespace. +func getAccessibleNamespaces() map[string]time.Time { + // Get the namespaces + business, err := business.Get() + graph.CheckError(err) + + namespaces, err := business.Namespace.GetNamespaces() + graph.CheckError(err) + + // Create a map to store the namespaces + namespaceMap := make(map[string]time.Time) + for _, namespace := range namespaces { + namespaceMap[namespace.Name] = namespace.CreationTimestamp + } + + return namespaceMap +} + +// resolveNamespaceDuration determines if, given queryTime, the requestedRange won't lead to +// querying data before nsCreationTime. If this is the case, resolveNamespaceDuration returns +// and adjusted range. Else, the original requestedRange is returned. +func resolveNamespaceDuration(nsCreationTime time.Time, requestedRange time.Duration, queryTime int64) time.Duration { + var referenceTime time.Time + resolvedBound := requestedRange + + if !nsCreationTime.IsZero() { + if queryTime != 0 { + referenceTime = time.Unix(queryTime, 0) + } else { + referenceTime = time.Now() + } + + nsLifetime := referenceTime.Sub(nsCreationTime) + if nsLifetime < resolvedBound { + resolvedBound = nsLifetime + } + } + + return resolvedBound +} diff --git a/vendor/github.com/kiali/kiali/graph/protocol.go b/vendor/github.com/kiali/kiali/graph/protocol.go new file mode 100644 index 000000000..32aca55f0 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/protocol.go @@ -0,0 +1,203 @@ +package graph + +import ( + "fmt" + "strings" + + "github.com/kiali/kiali/log" +) + +type Rate struct { + Name string + IsErr bool + IsIn bool + IsOut bool + IsPercentErr bool + IsPercentReq bool + IsTotal bool + Precision int +} + +type Protocol struct { + Name string + EdgeRates []Rate + NodeRates []Rate + Unit string + UnitShort string +} + +var GRPC Protocol = Protocol{ + Name: "grpc", + EdgeRates: []Rate{ + Rate{Name: "grpc", IsTotal: true, Precision: 2}, + Rate{Name: "grpcErr", IsErr: true, Precision: 2}, + Rate{Name: "grpcPercentErr", IsPercentErr: true, Precision: 1}, + Rate{Name: "grpcPercentReq", IsPercentReq: true, Precision: 1}, + }, + NodeRates: []Rate{ + Rate{Name: "grpcIn", IsIn: true, Precision: 2}, + Rate{Name: "grpcInErr", IsErr: true, Precision: 2}, + Rate{Name: "grpcOut", IsOut: true, Precision: 2}, + }, + Unit: "requests per second", + UnitShort: "rps", +} + +var HTTP Protocol = Protocol{ + Name: "http", + EdgeRates: []Rate{ + Rate{Name: "http", IsTotal: true, Precision: 2}, + Rate{Name: "http3xx", Precision: 2}, + Rate{Name: "http4xx", IsErr: true, Precision: 2}, + Rate{Name: "http5xx", IsErr: true, Precision: 2}, + Rate{Name: "httpPercentErr", IsPercentErr: true, Precision: 1}, + Rate{Name: "httpPercentReq", IsPercentReq: true, Precision: 1}, + }, + NodeRates: []Rate{ + Rate{Name: "httpIn", IsIn: true, Precision: 2}, + Rate{Name: "httpIn3xx", Precision: 2}, + Rate{Name: "httpIn4xx", IsErr: true, Precision: 2}, + Rate{Name: "httpIn5xx", IsErr: true, Precision: 2}, + Rate{Name: "httpOut", IsOut: true, Precision: 2}, + }, + Unit: "requests per second", + UnitShort: "rps", +} +var TCP Protocol = Protocol{ + Name: "tcp", + EdgeRates: []Rate{ + Rate{Name: "tcp", IsTotal: true, Precision: 2}, + }, + NodeRates: []Rate{ + Rate{Name: "tcpIn", IsIn: true, Precision: 2}, + Rate{Name: "tcpOut", IsOut: true, Precision: 2}, + }, + Unit: "bytes per second", + UnitShort: "bps", +} + +var Protocols []Protocol = []Protocol{GRPC, HTTP, TCP} + +func AddToMetadata(protocol string, val float64, code string, sourceMetadata, destMetadata, edgeMetadata map[string]interface{}) { + switch protocol { + case "grpc": + addToMetadataGrpc(val, code, sourceMetadata, destMetadata, edgeMetadata) + case "http": + addToMetadataHttp(val, code, sourceMetadata, destMetadata, edgeMetadata) + case "tcp": + addToMetadataTcp(val, code, sourceMetadata, destMetadata, edgeMetadata) + default: + log.Tracef("Ignore unhandled metadata protocol [%s]", protocol) + } +} + +func addToMetadataGrpc(val float64, code string, sourceMetadata, destMetadata, edgeMetadata map[string]interface{}) { + addToMetadataValue(sourceMetadata, "grpcOut", val) + addToMetadataValue(destMetadata, "grpcIn", val) + addToMetadataValue(edgeMetadata, "grpc", val) + + // Istio telemetry may use HTTP codes for gRPC, so if it quacks like a duck... + isHttpCode := len(code) == 3 + isErr := false + if isHttpCode { + isErr = strings.HasPrefix(code, "4") || strings.HasPrefix(code, "5") + } else { + isErr = code != "0" + } + if isErr { + addToMetadataValue(destMetadata, "grpcInErr", val) + addToMetadataValue(edgeMetadata, "grpcErr", val) + } +} + +func addToMetadataHttp(val float64, code string, sourceMetadata, destMetadata, edgeMetadata map[string]interface{}) { + addToMetadataValue(sourceMetadata, "httpOut", val) + addToMetadataValue(destMetadata, "httpIn", val) + addToMetadataValue(edgeMetadata, "http", val) + + // note, we don't track 2xx because it's not used downstream and can be easily + // calculated: 2xx = (rate - 3xx - 4xx - 5xx) + switch { + case strings.HasPrefix(code, "3"): + addToMetadataValue(destMetadata, "httpIn3xx", val) + addToMetadataValue(edgeMetadata, "http3xx", val) + case strings.HasPrefix(code, "4"): + addToMetadataValue(destMetadata, "httpIn4xx", val) + addToMetadataValue(edgeMetadata, "http4xx", val) + case strings.HasPrefix(code, "5"): + addToMetadataValue(destMetadata, "httpIn5xx", val) + addToMetadataValue(edgeMetadata, "http5xx", val) + } +} + +func addToMetadataTcp(val float64, code string, sourceMetadata, destMetadata, edgeMetadata map[string]interface{}) { + addToMetadataValue(sourceMetadata, "tcpOut", val) + addToMetadataValue(destMetadata, "tcpIn", val) + addToMetadataValue(edgeMetadata, "tcp", val) +} + +func AddOutgoingEdgeToMetadata(sourceMetadata, edgeMetadata map[string]interface{}) { + if val, valOk := edgeMetadata["grpc"]; valOk { + addToMetadataValue(sourceMetadata, "grpcOut", val.(float64)) + } + if val, valOk := edgeMetadata["http"]; valOk { + addToMetadataValue(sourceMetadata, "httpOut", val.(float64)) + } + if val, valOk := edgeMetadata["tcp"]; valOk { + addToMetadataValue(sourceMetadata, "tcpOut", val.(float64)) + } +} + +func AddServiceGraphTraffic(toEdge, fromEdge *Edge) { + protocol := toEdge.Metadata["protocol"] + switch protocol { + case "grpc": + addToMetadataValue(toEdge.Metadata, "grpc", fromEdge.Metadata["grpc"].(float64)) + if val, ok := fromEdge.Metadata["grpcErr"]; ok { + addToMetadataValue(toEdge.Metadata, "grpcErr", val.(float64)) + } + case "http": + addToMetadataValue(toEdge.Metadata, "http", fromEdge.Metadata["http"].(float64)) + if val, ok := fromEdge.Metadata["http3xx"]; ok { + addToMetadataValue(toEdge.Metadata, "http3xx", val.(float64)) + } + if val, ok := fromEdge.Metadata["http4xx"]; ok { + addToMetadataValue(toEdge.Metadata, "http4xx", val.(float64)) + } + if val, ok := fromEdge.Metadata["http5xx"]; ok { + addToMetadataValue(toEdge.Metadata, "http5xx", val.(float64)) + } + case "tcp": + addToMetadataValue(toEdge.Metadata, "tcp", fromEdge.Metadata["tcp"].(float64)) + default: + Error(fmt.Sprintf("Unexpected edge protocol [%v] for edge [%+v]", protocol, toEdge)) + } + + // handle any appender-based edge data (nothing currently) + // note: We used to average response times of the aggregated edges but realized that + // we can't average quantiles (kiali-2297). +} + +func addToMetadataValue(md map[string]interface{}, k string, v float64) { + if curr, ok := md[k]; ok { + md[k] = curr.(float64) + v + } else { + md[k] = v + } +} + +func averageMetadataValue(md map[string]interface{}, k string, v float64) { + total := v + count := 1.0 + kTotal := k + "_total" + kCount := k + "_count" + if prevTotal, ok := md[kTotal]; ok { + total += prevTotal.(float64) + } + if prevCount, ok := md[kCount]; ok { + count += prevCount.(float64) + } + md[kTotal] = total + md[kCount] = count + md[k] = total / count +} diff --git a/vendor/github.com/kiali/kiali/graph/util.go b/vendor/github.com/kiali/kiali/graph/util.go new file mode 100644 index 000000000..d19bb5a23 --- /dev/null +++ b/vendor/github.com/kiali/kiali/graph/util.go @@ -0,0 +1,45 @@ +package graph + +import ( + "net/http" +) + +type Response struct { + Message string + Code int +} + +// Error panics with InternalServerError and the provided message +func Error(message string) { + Panic(message, http.StatusInternalServerError) +} + +// BadRequest panics with BadRequest and the provided message +func BadRequest(message string) { + Panic(message, http.StatusBadRequest) +} + +// Forbidden panics with Forbidden and the provided message +func Forbidden(message string) { + Panic(message, http.StatusForbidden) +} + +// Panic panics with the provided HTTP response code and message +func Panic(message string, code int) Response { + panic(Response{ + Message: message, + Code: code, + }) +} + +// CheckError panics with the supplied error if it is non-nil +func CheckError(err error) { + if err != nil { + panic(err.Error) + } +} + +// IsOK just validates that a telemetry label value is not empty or unknown +func IsOK(telemetryVal string) bool { + return telemetryVal != "" && telemetryVal != Unknown +} diff --git a/vendor/github.com/kiali/kiali/handlers/apps.go b/vendor/github.com/kiali/kiali/handlers/apps.go new file mode 100644 index 000000000..7216e20c0 --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/apps.go @@ -0,0 +1,158 @@ +package handlers + +import ( + "net/http" + + "github.com/gorilla/mux" + "k8s.io/apimachinery/pkg/api/errors" + + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/prometheus" +) + +// AppList is the API handler to fetch all the apps to be displayed, related to a single namespace +func AppList(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Apps initialization error: "+err.Error()) + return + } + namespace := params["namespace"] + + // Fetch and build apps + appList, err := business.App.GetAppList(namespace) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + + RespondWithJSON(w, http.StatusOK, appList) +} + +// AppDetails is the API handler to fetch all details to be displayed, related to a single app +func AppDetails(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + namespace := params["namespace"] + app := params["app"] + + // Fetch and build app + appDetails, err := business.App.GetApp(namespace, app) + if err != nil { + if errors.IsNotFound(err) { + RespondWithError(w, http.StatusNotFound, err.Error()) + } else { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + } + return + } + + RespondWithJSON(w, http.StatusOK, appDetails) +} + +// AppMetrics is the API handler to fetch metrics to be displayed, related to an app-label grouping +func AppMetrics(w http.ResponseWriter, r *http.Request) { + getAppMetrics(w, r, defaultPromClientSupplier, defaultK8SClientSupplier) +} + +// getAppMetrics (mock-friendly version) +func getAppMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { + vars := mux.Vars(r) + namespace := vars["namespace"] + app := vars["app"] + + prom, _, namespaceInfo := initClientsForMetrics(w, promSupplier, k8sSupplier, namespace) + if prom == nil { + // any returned value nil means error & response already written + return + } + + params := prometheus.IstioMetricsQuery{Namespace: namespace, App: app} + err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + if err != nil { + RespondWithError(w, http.StatusBadRequest, err.Error()) + return + } + + metrics := prom.GetMetrics(¶ms) + RespondWithJSON(w, http.StatusOK, metrics) +} + +// CustomDashboard is the API handler to fetch runtime metrics to be displayed, related to a single app +func CustomDashboard(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + namespace := vars["namespace"] + app := vars["app"] + template := vars["template"] + + prom, _, namespaceInfo := initClientsForMetrics(w, defaultPromClientSupplier, defaultK8SClientSupplier, namespace) + if prom == nil { + // any returned value nil means error & response already written + return + } + + monitoringClient, err := kubernetes.NewKialiMonitoringClient() + if err != nil { + log.Error(err) + RespondWithError(w, http.StatusServiceUnavailable, "Kiali monitoring client error: "+err.Error()) + return + } + + svc := business.NewDashboardsService(monitoringClient, prom) + + params := prometheus.CustomMetricsQuery{Namespace: namespace, App: app} + err = extractCustomMetricsQueryParams(r, ¶ms, namespaceInfo) + if err != nil { + RespondWithError(w, http.StatusBadRequest, err.Error()) + return + } + + dashboard, err := svc.GetDashboard(params, template) + if err != nil { + if errors.IsNotFound(err) { + RespondWithError(w, http.StatusNotFound, err.Error()) + } else { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + } + return + } + RespondWithJSON(w, http.StatusOK, dashboard) +} + +// AppDashboard is the API handler to fetch Istio dashboard, related to a single app +func AppDashboard(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + namespace := vars["namespace"] + app := vars["app"] + + prom, _, namespaceInfo := initClientsForMetrics(w, defaultPromClientSupplier, defaultK8SClientSupplier, namespace) + if prom == nil { + // any returned value nil means error & response already written + return + } + + params := prometheus.IstioMetricsQuery{Namespace: namespace, App: app} + err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + if err != nil { + RespondWithError(w, http.StatusBadRequest, err.Error()) + return + } + + svc := business.NewDashboardsService(nil, prom) + dashboard, err := svc.GetIstioDashboard(params) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + RespondWithJSON(w, http.StatusOK, dashboard) +} diff --git a/vendor/github.com/kiali/kiali/handlers/base.go b/vendor/github.com/kiali/kiali/handlers/base.go new file mode 100644 index 000000000..99014fad7 --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/base.go @@ -0,0 +1,38 @@ +package handlers + +import ( + "encoding/json" + "net/http" +) + +func RespondWithJSON(w http.ResponseWriter, code int, payload interface{}) { + response, err := json.Marshal(payload) + if err != nil { + response, _ = json.Marshal(map[string]string{"error": err.Error()}) + code = http.StatusInternalServerError + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + w.Write(response) +} + +func RespondWithJSONIndent(w http.ResponseWriter, code int, payload interface{}) { + response, err := json.MarshalIndent(payload, "", " ") + if err != nil { + response, _ = json.Marshal(map[string]string{"error": err.Error()}) + code = http.StatusInternalServerError + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + w.Write(response) +} + +func RespondWithError(w http.ResponseWriter, code int, message string) { + RespondWithJSON(w, code, map[string]string{"error": message}) +} + +func RespondWithCode(w http.ResponseWriter, code int) { + w.WriteHeader(code) +} diff --git a/vendor/github.com/kiali/kiali/handlers/config.go b/vendor/github.com/kiali/kiali/handlers/config.go new file mode 100644 index 000000000..0b7510f8a --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/config.go @@ -0,0 +1,102 @@ +package handlers + +import ( + "fmt" + "net/http" + "time" + + yaml "gopkg.in/yaml.v2" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/prometheus" +) + +const ( + defaultPrometheusGlobalScrapeInterval = 15 // seconds +) + +// PrometheusConfig holds actual Prometheus configuration that is useful to Kiali. +// All durations are in seconds. +type PrometheusConfig struct { + GlobalScrapeInterval int64 `json:"globalScrapeInterval,omitempty"` + StorageTsdbRetention int64 `json:"storageTsdbRetention,omitempty"` +} + +// PublicConfig is a subset of Kiali configuration that can be exposed to clients to +// help them interact with the system. +type PublicConfig struct { + IstioNamespace string `json:"istioNamespace,omitempty"` + IstioLabels config.IstioLabels `json:"istioLabels,omitempty"` + Prometheus PrometheusConfig `json:"prometheus,omitempty"` +} + +// Config is a REST http.HandlerFunc serving up the Kiali configuration made public to clients. +func Config(w http.ResponseWriter, r *http.Request) { + defer handlePanic(w) + + // Note that determine the Prometheus config at request time because it is not + // guaranteed to remain the same during the Kiali lifespan. + promConfig := getPrometheusConfig() + config := config.Get() + publicConfig := PublicConfig{ + IstioNamespace: config.IstioNamespace, + IstioLabels: config.IstioLabels, + Prometheus: PrometheusConfig{ + GlobalScrapeInterval: promConfig.GlobalScrapeInterval, + StorageTsdbRetention: promConfig.StorageTsdbRetention, + }, + } + + RespondWithJSONIndent(w, http.StatusOK, publicConfig) +} + +type PrometheusPartialConfig struct { + Global struct { + Scrape_interval string + } +} + +func getPrometheusConfig() PrometheusConfig { + promConfig := PrometheusConfig{ + GlobalScrapeInterval: defaultPrometheusGlobalScrapeInterval, + } + + client, err := prometheus.NewClient() + if !checkErr(err, "") { + log.Error(err) + return promConfig + } + + configResult, err := client.GetConfiguration() + if checkErr(err, "Failed to fetch Prometheus configuration") { + var config PrometheusPartialConfig + if checkErr(yaml.Unmarshal([]byte(configResult.YAML), &config), "Failed to unmarshal Prometheus configuration") { + scrapeIntervalString := config.Global.Scrape_interval + scrapeInterval, err := time.ParseDuration(scrapeIntervalString) + if checkErr(err, fmt.Sprintf("Invalid global scrape interval [%s]", scrapeIntervalString)) { + promConfig.GlobalScrapeInterval = int64(scrapeInterval.Seconds()) + } + } + } + + flags, err := client.GetFlags() + if checkErr(err, "Failed to fetch Prometheus flags") { + if retentionString, ok := flags["storage.tsdb.retention"]; ok { + retention, err := time.ParseDuration(retentionString) + if checkErr(err, fmt.Sprintf("Invalid storage.tsdb.retention [%s]", retentionString)) { + promConfig.StorageTsdbRetention = int64(retention.Seconds()) + } + } + } + + return promConfig +} + +func checkErr(err error, message string) bool { + if err != nil { + log.Errorf("%s: %v", message, err) + return false + } + return true +} diff --git a/vendor/github.com/kiali/kiali/handlers/grafana.go b/vendor/github.com/kiali/kiali/handlers/grafana.go new file mode 100644 index 000000000..ed12c231b --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/grafana.go @@ -0,0 +1,163 @@ +package handlers + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + + "k8s.io/api/core/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" +) + +type serviceSupplier func(string, string) (*v1.ServiceSpec, error) +type dashboardSupplier func(string, string, string) ([]byte, int, error) + +// GetGrafanaInfo provides the Grafana URL and other info, first by checking if a config exists +// then (if not) by inspecting the Kubernetes Grafana service in namespace istio-system +func GetGrafanaInfo(w http.ResponseWriter, r *http.Request) { + info, code, err := getGrafanaInfo(getService, findDashboard) + if err != nil { + log.Error(err) + RespondWithError(w, code, err.Error()) + return + } + RespondWithJSON(w, code, info) +} + +// getGrafanaInfo returns the Grafana URL and other info, the HTTP status code (int) and eventually an error +func getGrafanaInfo(serviceSupplier serviceSupplier, dashboardSupplier dashboardSupplier) (*models.GrafanaInfo, int, error) { + grafanaConfig := config.Get().ExternalServices.Grafana + + if !grafanaConfig.DisplayLink { + return nil, http.StatusNoContent, nil + } + + // Check if URL is in the configuration + if grafanaConfig.URL == "" { + return nil, http.StatusServiceUnavailable, errors.New("Grafana URL is not set in Kiali configuration") + } + + // Check if URL is valid + _, err := validateURL(grafanaConfig.URL) + if err != nil { + return nil, http.StatusServiceUnavailable, errors.New("Wrong format for Grafana URL in Kiali configuration: " + err.Error()) + } + + // Find the in-cluster URL to reach Grafana's REST API + spec, err := serviceSupplier(grafanaConfig.ServiceNamespace, grafanaConfig.Service) + if err != nil { + if k8serr.IsNotFound(err) { + return nil, http.StatusServiceUnavailable, err + } + return nil, http.StatusInternalServerError, err + } + if len(spec.Ports) == 0 { + return nil, http.StatusServiceUnavailable, errors.New("No port found for Grafana service, cannot access in-cluster service") + } + if len(spec.Ports) > 1 { + log.Warning("Several ports found for Grafana service, picking the first one") + } + internalURL := fmt.Sprintf("http://%s.%s:%d", grafanaConfig.Service, grafanaConfig.ServiceNamespace, spec.Ports[0].Port) + + credentials, err := buildAuthHeader(grafanaConfig) + if err != nil { + log.Warning("Failed to build auth header token: " + err.Error()) + } + + // Call Grafana REST API to get dashboard urls + serviceDashboardPath, err := getDashboardPath(internalURL, grafanaConfig.ServiceDashboardPattern, credentials, dashboardSupplier) + if err != nil { + return nil, http.StatusInternalServerError, err + } + workloadDashboardPath, err := getDashboardPath(internalURL, grafanaConfig.WorkloadDashboardPattern, credentials, dashboardSupplier) + if err != nil { + return nil, http.StatusInternalServerError, err + } + + grafanaInfo := models.GrafanaInfo{ + URL: grafanaConfig.URL, + ServiceDashboardPath: serviceDashboardPath, + WorkloadDashboardPath: workloadDashboardPath, + VarNamespace: grafanaConfig.VarNamespace, + VarService: grafanaConfig.VarService, + VarWorkload: grafanaConfig.VarWorkload, + } + + return &grafanaInfo, http.StatusOK, nil +} + +func getDashboardPath(url string, searchPattern string, credentials string, dashboardSupplier dashboardSupplier) (string, error) { + body, code, err := dashboardSupplier(url, searchPattern, credentials) + if err != nil { + return "", err + } + if code != http.StatusOK { + // Get error message + var f map[string]string + err = json.Unmarshal(body, &f) + if err != nil { + return "", fmt.Errorf("Unknown error from Grafana (%d)", code) + } + message, ok := f["message"] + if !ok { + return "", fmt.Errorf("Unknown error from Grafana (%d)", code) + } + return "", fmt.Errorf("Error from Grafana (%d): %s", code, message) + } + + // Status OK, read dashboards info + var dashboards []map[string]interface{} + err = json.Unmarshal(body, &dashboards) + if err != nil { + return "", err + } + if len(dashboards) == 0 { + return "", fmt.Errorf("No Grafana dashboard found for search pattern '%s'", searchPattern) + } + if len(dashboards) > 1 { + log.Infof("Several Grafana dashboards found for pattern '%s', picking the first one", searchPattern) + } + dashPath, ok := dashboards[0]["url"] + if !ok { + return "", fmt.Errorf("URL field not found in Grafana dashboard for search pattern '%s'", searchPattern) + } + return dashPath.(string), nil +} + +func findDashboard(url, searchPattern string, credentials string) ([]byte, int, error) { + req, err := http.NewRequest(http.MethodGet, url+"/api/search?query="+searchPattern, nil) + if err != nil { + return nil, 0, err + } + if credentials != "" { + req.Header.Add("Authorization", credentials) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, 0, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + return body, resp.StatusCode, err +} + +func buildAuthHeader(grafanaConfig config.GrafanaConfig) (string, error) { + var credHeader string + if grafanaConfig.APIKey != "" { + credHeader = "Bearer " + grafanaConfig.APIKey + } else if grafanaConfig.Username != "" { + if grafanaConfig.Password == "" { + return "", fmt.Errorf("Grafana username set but no Grafana password provided") + } + basicAuth := base64.StdEncoding.EncodeToString([]byte(grafanaConfig.Username + ":" + grafanaConfig.Password)) + credHeader = "Basic " + basicAuth + } + return credHeader, nil +} diff --git a/vendor/github.com/kiali/kiali/handlers/graph.go b/vendor/github.com/kiali/kiali/handlers/graph.go new file mode 100644 index 000000000..2245919ca --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/graph.go @@ -0,0 +1,979 @@ +package handlers + +// Graph.go provides handlers for graph request endpoints. The handlers return configuration +// for a specified vendor (default cytoscape). The configuration format is vendor-specific, typically +// JSON, and provides what is necessary to allow the vendor's graphing tool to render the graph. +// +// The algorithm is three-pass: +// First Pass: Query Prometheus (istio-requests-total metric) to retrieve the source-destination +// dependencies. Build a traffic map to provide a full representation of nodes and edges. +// +// Second Pass: Apply any requested appenders to alter or append to the graph. +// +// Third Pass: Supply the traffic map to a vendor-specific config generator that +// constructs the vendor-specific output. +// +// The current Handlers: +// GraphNamespace: Generate a graph for all services in a namespace (whether source or destination) +// GraphNode: Generate a graph centered on a specified node, limited to requesting and requested nodes. +// +// The handlers accept the following query parameters (some handlers may ignore some parameters): +// appenders: Comma-separated list of appenders to run from [circuit_breaker, unused_service...] (default all) +// Note, appenders may support appender-specific query parameters +// duration: time.Duration indicating desired query range duration, (default 10m) +// graphType: Determines how to present the telemetry data. app | service | versionedApp | workload (default workload) +// groupBy: If supported by vendor, visually group by a specified node attribute (default version) +// includeIstio: Include istio-system (infra) services (default false) +// namespaces: Comma-separated list of namespace names to use in the graph. Will override namespace path param +// queryTime: Unix time (seconds) for query such that range is queryTime-duration..queryTime (default now) +// vendor: cytoscape (default cytoscape) +// +// * Error% is the percentage of requests with response code != 2XX +// * See the vendor-specific config generators for more details about the specific vendor. +// +import ( + "context" + "fmt" + "net/http" + "runtime/debug" + "time" + + "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/graph" + "github.com/kiali/kiali/graph/appender" + "github.com/kiali/kiali/graph/cytoscape" + "github.com/kiali/kiali/graph/options" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/prometheus" + "github.com/kiali/kiali/prometheus/internalmetrics" +) + +// GraphNamespaces is a REST http.HandlerFunc handling graph generation for 1 or more namespaces +func GraphNamespaces(w http.ResponseWriter, r *http.Request) { + defer handlePanic(w) + + client, err := prometheus.NewClient() + graph.CheckError(err) + + graphNamespaces(w, r, client) +} + +// graphNamespaces provides a testing hook that can supply a mock client +func graphNamespaces(w http.ResponseWriter, r *http.Request, client *prometheus.Client) { + o := options.NewOptions(r) + + // time how long it takes to generate this graph + promtimer := internalmetrics.GetGraphGenerationTimePrometheusTimer(o.GetGraphKind(), o.GraphType, o.InjectServiceNodes) + defer promtimer.ObserveDuration() + + trafficMap := buildNamespacesTrafficMap(o, client) + generateGraph(trafficMap, w, o) + + // update metrics + internalmetrics.SetGraphNodes(o.GetGraphKind(), o.GraphType, o.InjectServiceNodes, len(trafficMap)) +} + +func buildNamespacesTrafficMap(o options.Options, client *prometheus.Client) graph.TrafficMap { + switch o.Vendor { + case "cytoscape": + default: + graph.Error(fmt.Sprintf("Vendor [%s] not supported", o.Vendor)) + } + + log.Debugf("Build [%s] graph for [%v] namespaces [%s]", o.GraphType, len(o.Namespaces), o.Namespaces) + + trafficMap := graph.NewTrafficMap() + + globalInfo := appender.NewGlobalInfo() + for _, namespace := range o.Namespaces { + log.Debugf("Build traffic map for namespace [%s]", namespace) + namespaceTrafficMap := buildNamespaceTrafficMap(namespace.Name, o, client) + namespaceInfo := appender.NewNamespaceInfo(namespace.Name) + for _, a := range o.Appenders { + appenderTimer := internalmetrics.GetGraphAppenderTimePrometheusTimer(a.Name()) + a.AppendGraph(namespaceTrafficMap, globalInfo, namespaceInfo) + appenderTimer.ObserveDuration() + } + mergeTrafficMaps(trafficMap, namespace.Name, namespaceTrafficMap) + } + + // The appenders can add/remove/alter nodes. After the manipulations are complete + // we can make some final adjustments: + // - mark the outsiders (i.e. nodes not in the requested namespaces) + // - mark the insider traffic generators (i.e. inside the namespace and only outgoing edges) + markOutsideOrInaccessible(trafficMap, o) + markTrafficGenerators(trafficMap) + + if graph.GraphTypeService == o.GraphType { + trafficMap = reduceToServiceGraph(trafficMap) + } + + return trafficMap +} + +// mergeTrafficMaps ensures that we only have unique nodes by removing duplicate +// nodes and merging their edges. When removing a duplicate prefer an instance +// from the namespace being merged-in because it is guaranteed to have all appender +// information applied (i.e. not an outsider). We also need to avoid duplicate edges, +// it can happen when a terminal node of one namespace is a root node of another: +// ns1 graph: unknown -> ns1:A -> ns2:B +// ns2 graph: ns1:A -> ns2:B -> ns2:C +func mergeTrafficMaps(trafficMap graph.TrafficMap, ns string, nsTrafficMap graph.TrafficMap) { + for nsId, nsNode := range nsTrafficMap { + if node, isDup := trafficMap[nsId]; isDup { + if nsNode.Namespace == ns { + // prefer nsNode (see above comment), so do a swap + trafficMap[nsId] = nsNode + temp := node + node = nsNode + nsNode = temp + } + for _, nsEdge := range nsNode.Edges { + isDupEdge := false + for _, e := range node.Edges { + if nsEdge.Dest.ID == e.Dest.ID && nsEdge.Metadata["protocol"] == e.Metadata["protocol"] { + isDupEdge = true + break + } + } + if !isDupEdge { + node.Edges = append(node.Edges, nsEdge) + // add traffic for the new edge + graph.AddOutgoingEdgeToMetadata(node.Metadata, nsEdge.Metadata) + } + } + } else { + trafficMap[nsId] = nsNode + } + } +} + +func markOutsideOrInaccessible(trafficMap graph.TrafficMap, o options.Options) { + for _, n := range trafficMap { + switch n.NodeType { + case graph.NodeTypeUnknown: + n.Metadata["isInaccessible"] = true + case graph.NodeTypeService: + if _, ok := n.Metadata["isServiceEntry"]; ok { + n.Metadata["isInaccessible"] = true + } else { + if isOutside(n, o.Namespaces) { + n.Metadata["isOutside"] = true + } + } + default: + if isOutside(n, o.Namespaces) { + n.Metadata["isOutside"] = true + } + } + if isOutsider, ok := n.Metadata["isOutside"]; ok && isOutsider.(bool) { + if _, ok2 := n.Metadata["isInaccessible"]; !ok2 { + if isInaccessible(n, o.AccessibleNamespaces) { + n.Metadata["isInaccessible"] = true + } + } + } + } +} + +func isOutside(n *graph.Node, namespaces map[string]graph.NamespaceInfo) bool { + if n.Namespace == graph.Unknown { + return false + } + for _, ns := range namespaces { + if n.Namespace == ns.Name { + return false + } + } + return true +} + +func isInaccessible(n *graph.Node, accessibleNamespaces map[string]time.Time) bool { + if _, found := accessibleNamespaces[n.Namespace]; !found { + return true + } else { + return false + } +} + +func markTrafficGenerators(trafficMap graph.TrafficMap) { + destMap := make(map[string]*graph.Node) + for _, n := range trafficMap { + for _, e := range n.Edges { + destMap[e.Dest.ID] = e.Dest + } + } + for _, n := range trafficMap { + if len(n.Edges) == 0 { + continue + } + if _, isDest := destMap[n.ID]; !isDest { + n.Metadata["isRoot"] = true + } + } +} + +// reduceToServicGraph compresses a [service-injected workload] graph by removing +// the workload nodes such that, with exception of non-service root nodes, the resulting +// graph has edges only from and to service nodes. +func reduceToServiceGraph(trafficMap graph.TrafficMap) graph.TrafficMap { + reducedTrafficMap := graph.NewTrafficMap() + + for id, n := range trafficMap { + if n.NodeType != graph.NodeTypeService { + // if node isRoot then keep it to better understand traffic flow. + if val, ok := n.Metadata["isRoot"]; ok && val.(bool) { + // Remove any edge to a non-service node. The service graph only shows non-service root + // nodes, all other nodes are service nodes. The use case is direct workload-to-workload + // traffic, which is unusual but possible. This can lead to nodes with outgoing traffic + // not represented by an outgoing edge, but that is the nature of the graph type. + serviceEdges := []*graph.Edge{} + for _, e := range n.Edges { + if e.Dest.NodeType == graph.NodeTypeService { + serviceEdges = append(serviceEdges, e) + } else { + log.Debugf("Service graph ignoring non-service root destination [%s]", e.Dest.Workload) + } + } + n.Edges = serviceEdges + reducedTrafficMap[id] = n + } + continue + } + + // handle service node, add to reduced traffic map and generate new edges + reducedTrafficMap[id] = n + workloadEdges := n.Edges + n.Edges = []*graph.Edge{} + for _, workloadEdge := range workloadEdges { + workload := workloadEdge.Dest + checkNodeType(graph.NodeTypeWorkload, workload) + for _, serviceEdge := range workload.Edges { + // As above, ignore edges to non-service destinations + if serviceEdge.Dest.NodeType != graph.NodeTypeService { + log.Debugf("Service graph ignoring non-service destination [%s]", serviceEdge.Dest.Workload) + continue + } + childService := serviceEdge.Dest + var edge *graph.Edge + for _, e := range n.Edges { + if childService.ID == e.Dest.ID && serviceEdge.Metadata["protocol"] == e.Metadata["protocol"] { + edge = e + break + } + } + if nil == edge { + n.Edges = append(n.Edges, serviceEdge) + } else { + addServiceGraphTraffic(edge, serviceEdge) + } + } + } + } + + return reducedTrafficMap +} + +func addServiceGraphTraffic(toEdge, fromEdge *graph.Edge) { + graph.AddServiceGraphTraffic(toEdge, fromEdge) + + // handle any appender-based edge data (nothing currently) + // note: We used to average response times of the aggregated edges but realized that + // we can't average quantiles (kiali-2297). +} + +func checkNodeType(expected string, n *graph.Node) { + if expected != n.NodeType { + graph.Error(fmt.Sprintf("Expected nodeType [%s] for node [%+v]", expected, n)) + } +} + +// buildNamespaceTrafficMap returns a map of all namespace nodes (key=id). All +// nodes either directly send and/or receive requests from a node in the namespace. +func buildNamespaceTrafficMap(namespace string, o options.Options, client *prometheus.Client) graph.TrafficMap { + // create map to aggregate traffic by protocol and response code + trafficMap := graph.NewTrafficMap() + + requestsMetric := "istio_requests_total" + duration := o.Namespaces[namespace].Duration + + // query prometheus for request traffic in three queries: + // 1) query for traffic originating from "unknown" (i.e. the internet). + groupBy := "source_workload_namespace,source_workload,source_app,source_version,destination_service_namespace,destination_service_name,destination_workload,destination_app,destination_version,request_protocol,response_code" + query := fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload="unknown",destination_service_namespace="%s"} [%vs])) by (%s)`, + requestsMetric, + namespace, + int(duration.Seconds()), // range duration for the query + groupBy) + unkVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMap(trafficMap, &unkVector, o) + + // 2) query for traffic originating from a workload outside of the namespace. Exclude any "unknown" source telemetry (an unusual corner case) + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace!="%s",source_workload!="unknown",destination_service_namespace="%s"} [%vs])) by (%s)`, + requestsMetric, + namespace, + namespace, + int(duration.Seconds()), // range duration for the query + groupBy) + + // fetch the externally originating request traffic time-series + extVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMap(trafficMap, &extVector, o) + + // 3) query for traffic originating from a workload inside of the namespace + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace="%s"} [%vs])) by (%s)`, + requestsMetric, + namespace, + int(duration.Seconds()), // range duration for the query + groupBy) + + // fetch the internally originating request traffic time-series + intVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMap(trafficMap, &intVector, o) + + // istio component telemetry is only reported destination-side, so we must perform additional queries + if o.IncludeIstio { + istioNamespace := config.Get().IstioNamespace + + // 4) if the target namespace is istioNamespace re-query for traffic originating from outside (other than unknown, covered in query #1) + if namespace == istioNamespace { + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload!="unknown",source_workload_namespace!="%s",destination_service_namespace="%s"} [%vs])) by (%s)`, + requestsMetric, + namespace, + namespace, + int(duration.Seconds()), // range duration for the query + groupBy) + + // fetch the externally originating request traffic time-series + extIstioVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMap(trafficMap, &extIstioVector, o) + } + + // 5) supplemental query for traffic originating from a workload inside of the namespace with istioSystem destination + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload_namespace="%s",destination_service_namespace="%s"} [%vs])) by (%s)`, + requestsMetric, + namespace, + istioNamespace, + int(duration.Seconds()), // range duration for the query + groupBy) + + // fetch the internally originating request traffic time-series + intIstioVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMap(trafficMap, &intIstioVector, o) + } + + // Section for TCP services + tcpMetric := "istio_tcp_sent_bytes_total" + + // 1) query for traffic originating from "unknown" (i.e. the internet) + tcpGroupBy := "source_workload_namespace,source_workload,source_app,source_version,destination_workload_namespace,destination_service_name,destination_workload,destination_app,destination_version" + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload="unknown",destination_workload_namespace="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + int(duration.Seconds()), // range duration for the query + tcpGroupBy) + tcpUnkVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMapTcp(trafficMap, &tcpUnkVector, o) + + // 2) query for traffic originating from a workload outside of the namespace. Exclude any "unknown" source telemetry (an unusual corner case) + tcpGroupBy = "source_workload_namespace,source_workload,source_app,source_version,destination_service_namespace,destination_service_name,destination_workload,destination_app,destination_version" + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace!="%s",source_workload!="unknown",destination_service_namespace="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + namespace, + int(duration.Seconds()), // range duration for the query + tcpGroupBy) + tcpExtVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMapTcp(trafficMap, &tcpExtVector, o) + + // 3) query for traffic originating from a workload inside of the namespace + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + int(duration.Seconds()), // range duration for the query + tcpGroupBy) + tcpInVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMapTcp(trafficMap, &tcpInVector, o) + + return trafficMap +} + +func populateTrafficMap(trafficMap graph.TrafficMap, vector *model.Vector, o options.Options) { + for _, s := range *vector { + m := s.Metric + lSourceWlNs, sourceWlNsOk := m["source_workload_namespace"] + lSourceWl, sourceWlOk := m["source_workload"] + lSourceApp, sourceAppOk := m["source_app"] + lSourceVer, sourceVerOk := m["source_version"] + lDestSvcNs, destSvcNsOk := m["destination_service_namespace"] + lDestSvcName, destSvcNameOk := m["destination_service_name"] + lDestWl, destWlOk := m["destination_workload"] + lDestApp, destAppOk := m["destination_app"] + lDestVer, destVerOk := m["destination_version"] + lProtocol, protocolOk := m["request_protocol"] + lCode, codeOk := m["response_code"] + + if !sourceWlNsOk || !sourceWlOk || !sourceAppOk || !sourceVerOk || !destSvcNsOk || !destSvcNameOk || !destWlOk || !destAppOk || !destVerOk || !protocolOk || !codeOk { + log.Warningf("Skipping %s, missing expected TS labels", m.String()) + continue + } + + sourceWlNs := string(lSourceWlNs) + sourceWl := string(lSourceWl) + sourceApp := string(lSourceApp) + sourceVer := string(lSourceVer) + destSvcNs := string(lDestSvcNs) + destSvcName := string(lDestSvcName) + destWl := string(lDestWl) + destApp := string(lDestApp) + destVer := string(lDestVer) + protocol := string(lProtocol) + code := string(lCode) + + val := float64(s.Value) + + if o.InjectServiceNodes { + // don't inject a service node if the dest node is already a service node. Also, we can't inject if destSvcName is not set. + destSvcNameOk = graph.IsOK(destSvcName) + _, destNodeType := graph.Id(destSvcNs, destWl, destApp, destVer, destSvcName, o.GraphType) + if destSvcNameOk && destNodeType != graph.NodeTypeService { + addTraffic(trafficMap, val, protocol, code, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, "", "", "", destSvcName, o) + addTraffic(trafficMap, val, protocol, code, destSvcNs, "", "", "", destSvcName, destSvcNs, destWl, destApp, destVer, destSvcName, o) + } else { + addTraffic(trafficMap, val, protocol, code, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, destWl, destApp, destVer, destSvcName, o) + } + } else { + addTraffic(trafficMap, val, protocol, code, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, destWl, destApp, destVer, destSvcName, o) + } + } +} + +func addTraffic(trafficMap graph.TrafficMap, val float64, protocol, code, sourceWlNs, sourceWl, sourceApp, sourceVer, sourceSvcName, destSvcNs, destWl, destApp, destVer, destSvcName string, o options.Options) (source, dest *graph.Node) { + source, sourceFound := addNode(trafficMap, sourceWlNs, sourceWl, sourceApp, sourceVer, sourceSvcName, o) + dest, destFound := addNode(trafficMap, destSvcNs, destWl, destApp, destVer, destSvcName, o) + + addToDestServices(dest.Metadata, destSvcName) + + var edge *graph.Edge + for _, e := range source.Edges { + if dest.ID == e.Dest.ID && e.Metadata["protocol"] == protocol { + edge = e + break + } + } + if nil == edge { + edge = source.AddEdge(dest) + edge.Metadata["protocol"] = protocol + } + + // A workload may mistakenly have multiple app and or version label values. + // This is a misconfiguration we need to handle. See Kiali-1309. + if sourceFound { + handleMisconfiguredLabels(source, sourceApp, sourceVer, val, o) + } + if destFound { + handleMisconfiguredLabels(dest, destApp, destVer, val, o) + } + + graph.AddToMetadata(protocol, val, code, source.Metadata, dest.Metadata, edge.Metadata) + + return source, dest +} + +func populateTrafficMapTcp(trafficMap graph.TrafficMap, vector *model.Vector, o options.Options) { + for _, s := range *vector { + m := s.Metric + lSourceWlNs, sourceWlNsOk := m["source_workload_namespace"] + lSourceWl, sourceWlOk := m["source_workload"] + lSourceApp, sourceAppOk := m["source_app"] + lSourceVer, sourceVerOk := m["source_version"] + lDestSvcNs, destSvcNsOk := m["destination_service_namespace"] + lDestSvcName, destSvcNameOk := m["destination_service_name"] + lDestWl, destWlOk := m["destination_workload"] + lDestApp, destAppOk := m["destination_app"] + lDestVer, destVerOk := m["destination_version"] + + // TCP queries doesn't use destination_service_namespace for the unknown node. + // Check if this is the case and use destination_workload_namespace + if !destSvcNsOk { + lDestSvcNs, destSvcNsOk = m["destination_workload_namespace"] + } + + if !sourceWlNsOk || !sourceWlOk || !sourceAppOk || !sourceVerOk || !destSvcNsOk || !destSvcNameOk || !destWlOk || !destAppOk || !destVerOk { + log.Warningf("Skipping %s, missing expected TS labels", m.String()) + continue + } + + sourceWlNs := string(lSourceWlNs) + sourceWl := string(lSourceWl) + sourceApp := string(lSourceApp) + sourceVer := string(lSourceVer) + destSvcNs := string(lDestSvcNs) + destSvcName := string(lDestSvcName) + destWl := string(lDestWl) + destApp := string(lDestApp) + destVer := string(lDestVer) + + val := float64(s.Value) + + if o.InjectServiceNodes { + // don't inject a service node if the dest node is already a service node. Also, we can't inject if destSvcName is not set. + destSvcNameOk = graph.IsOK(destSvcName) + _, destNodeType := graph.Id(destSvcNs, destWl, destApp, destVer, destSvcName, o.GraphType) + if destSvcNameOk && destNodeType != graph.NodeTypeService { + addTcpTraffic(trafficMap, val, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, "", "", "", destSvcName, o) + addTcpTraffic(trafficMap, val, destSvcNs, "", "", "", destSvcName, destSvcNs, destWl, destApp, destVer, destSvcName, o) + } else { + addTcpTraffic(trafficMap, val, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, destWl, destApp, destVer, destSvcName, o) + } + } else { + addTcpTraffic(trafficMap, val, sourceWlNs, sourceWl, sourceApp, sourceVer, "", destSvcNs, destWl, destApp, destVer, destSvcName, o) + } + } +} + +func addTcpTraffic(trafficMap graph.TrafficMap, val float64, sourceWlNs, sourceWl, sourceApp, sourceVer, sourceSvcName, destSvcNs, destWl, destApp, destVer, destSvcName string, o options.Options) (source, dest *graph.Node) { + + source, sourceFound := addNode(trafficMap, sourceWlNs, sourceWl, sourceApp, sourceVer, sourceSvcName, o) + dest, destFound := addNode(trafficMap, destSvcNs, destWl, destApp, destVer, destSvcName, o) + + addToDestServices(dest.Metadata, destSvcName) + + var edge *graph.Edge + for _, e := range source.Edges { + if dest.ID == e.Dest.ID && e.Metadata["procotol"] == "tcp" { + edge = e + break + } + } + if nil == edge { + edge = source.AddEdge(dest) + edge.Metadata["protocol"] = "tcp" + } + + // A workload may mistakenly have multiple app and or version label values. + // This is a misconfiguration we need to handle. See Kiali-1309. + if sourceFound { + handleMisconfiguredLabels(source, sourceApp, sourceVer, val, o) + } + if destFound { + handleMisconfiguredLabels(dest, destApp, destVer, val, o) + } + + graph.AddToMetadata("tcp", val, "", source.Metadata, dest.Metadata, edge.Metadata) + + return source, dest +} + +func addToDestServices(md map[string]interface{}, destService string) { + destServices, ok := md["destServices"] + if !ok { + destServices = make(map[string]bool) + md["destServices"] = destServices + } + destServices.(map[string]bool)[destService] = true +} + +func handleMisconfiguredLabels(node *graph.Node, app, version string, rate float64, o options.Options) { + isVersionedAppGraph := o.VendorOptions.GraphType == graph.GraphTypeVersionedApp + isWorkloadNode := node.NodeType == graph.NodeTypeWorkload + isVersionedAppNode := node.NodeType == graph.NodeTypeApp && isVersionedAppGraph + if isWorkloadNode || isVersionedAppNode { + labels := []string{} + if node.App != app { + labels = append(labels, "app") + } + if node.Version != version { + labels = append(labels, "version") + } + // prefer the labels of an active time series as often the other labels are inactive + if len(labels) > 0 { + node.Metadata["isMisconfigured"] = fmt.Sprintf("labels=%v", labels) + if rate > 0.0 { + node.App = app + node.Version = version + } + } + } +} + +func addNode(trafficMap graph.TrafficMap, namespace, workload, app, version, service string, o options.Options) (*graph.Node, bool) { + id, nodeType := graph.Id(namespace, workload, app, version, service, o.GraphType) + node, found := trafficMap[id] + if !found { + newNode := graph.NewNodeExplicit(id, namespace, workload, app, version, service, nodeType, o.GraphType) + node = &newNode + trafficMap[id] = node + } + return node, found +} + +// GraphNode is a REST http.HandlerFunc handling node-detail graph +// config generation. +func GraphNode(w http.ResponseWriter, r *http.Request) { + defer handlePanic(w) + + client, err := prometheus.NewClient() + graph.CheckError(err) + + graphNode(w, r, client) +} + +// graphNode provides a testing hook that can supply a mock client +func graphNode(w http.ResponseWriter, r *http.Request, client *prometheus.Client) { + o := options.NewOptions(r) + switch o.Vendor { + case "cytoscape": + default: + graph.Error(fmt.Sprintf("Vendor [%s] not supported", o.Vendor)) + } + if len(o.Namespaces) != 1 { + graph.Error(fmt.Sprintf("Node graph does not support the 'namespaces' query parameter or the 'all' namespace")) + } + + // time how long it takes to generate this graph + promtimer := internalmetrics.GetGraphGenerationTimePrometheusTimer(o.GetGraphKind(), o.GraphType, o.InjectServiceNodes) + defer promtimer.ObserveDuration() + + n := graph.NewNode(o.NodeOptions.Namespace, o.NodeOptions.Workload, o.NodeOptions.App, o.NodeOptions.Version, o.NodeOptions.Service, o.GraphType) + + log.Debugf("Build graph for node [%+v]", n) + + trafficMap := buildNodeTrafficMap(o.NodeOptions.Namespace, n, o, client) + + globalInfo := appender.NewGlobalInfo() + namespaceInfo := appender.NewNamespaceInfo(o.NodeOptions.Namespace) + + for _, a := range o.Appenders { + appenderTimer := internalmetrics.GetGraphAppenderTimePrometheusTimer(a.Name()) + a.AppendGraph(trafficMap, globalInfo, namespaceInfo) + appenderTimer.ObserveDuration() + } + + // The appenders can add/remove/alter nodes. After the manipulations are complete + // we can make some final adjustments: + // - mark the outsiders (i.e. nodes not in the requested namespaces) + // - mark the traffic generators + markOutsideOrInaccessible(trafficMap, o) + markTrafficGenerators(trafficMap) + + // Note that this is where we would call reduceToServiceGraph for graphTypeService but + // the current decision is to not reduce the node graph to provide more detail. This may be + // confusing to users, we'll see... + + generateGraph(trafficMap, w, o) + + // update metrics + internalmetrics.SetGraphNodes(o.GetGraphKind(), o.GraphType, o.InjectServiceNodes, len(trafficMap)) +} + +// buildNodeTrafficMap returns a map of all nodes requesting or requested by the target node (key=id). +func buildNodeTrafficMap(namespace string, n graph.Node, o options.Options, client *prometheus.Client) graph.TrafficMap { + httpMetric := "istio_requests_total" + interval := o.Namespaces[namespace].Duration + + // create map to aggregate traffic by response code + trafficMap := graph.NewTrafficMap() + + // query prometheus for request traffic in two queries: + // 1) query for incoming traffic + var query string + groupBy := "source_workload_namespace,source_workload,source_app,source_version,destination_service_namespace,destination_service_name,destination_workload,destination_app,destination_version,request_protocol,response_code" + switch n.NodeType { + case graph.NodeTypeWorkload: + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",destination_workload_namespace="%s",destination_workload="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.Workload, + int(interval.Seconds()), // range duration for the query + groupBy) + case graph.NodeTypeApp: + if graph.IsOK(n.Version) { + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",destination_service_namespace="%s",destination_app="%s",destination_version="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.App, + n.Version, + int(interval.Seconds()), // range duration for the query + groupBy) + } else { + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",destination_service_namespace="%s",destination_app="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.App, + int(interval.Seconds()), // range duration for the query + groupBy) + } + case graph.NodeTypeService: + // for service requests we want source reporting to capture source-reported errors. But unknown only generates destination telemetry. So + // perform a special query just to capture [successful] request telemetry from unknown. + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload="unknown",destination_service_namespace="%s",destination_service_name="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.Service, + int(interval.Seconds()), // range duration for the query + groupBy) + vector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMap(trafficMap, &vector, o) + + query = fmt.Sprintf(`sum(rate(%s{reporter="source",destination_service_namespace="%s",destination_service_name="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.Service, + int(interval.Seconds()), // range duration for the query + groupBy) + default: + graph.Error(fmt.Sprintf("NodeType [%s] not supported", n.NodeType)) + } + inVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMap(trafficMap, &inVector, o) + + // 2) query for outbound traffic + switch n.NodeType { + case graph.NodeTypeWorkload: + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace="%s",source_workload="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.Workload, + int(interval.Seconds()), // range duration for the query + groupBy) + case graph.NodeTypeApp: + if graph.IsOK(n.Version) { + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace="%s",source_app="%s",source_version="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.App, + n.Version, + int(interval.Seconds()), // range duration for the query + groupBy) + } else { + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace="%s",source_app="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.App, + int(interval.Seconds()), // range duration for the query + groupBy) + } + case graph.NodeTypeService: + query = "" + default: + graph.Error(fmt.Sprintf("NodeType [%s] not supported", n.NodeType)) + } + outVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMap(trafficMap, &outVector, o) + + // istio component telemetry is only reported destination-side, so we must perform additional queries + if o.IncludeIstio { + istioNamespace := config.Get().IstioNamespace + + // 3) supplemental query for outbound traffic to the istio namespace + switch n.NodeType { + case graph.NodeTypeWorkload: + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload_namespace="%s",source_workload="%s",destination_service_namespace="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.Workload, + istioNamespace, + int(interval.Seconds()), // range duration for the query + groupBy) + case graph.NodeTypeApp: + if graph.IsOK(n.Version) { + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload_namespace="%s",source_app="%s",source_version="%s",destination_service_namespace="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.App, + n.Version, + istioNamespace, + int(interval.Seconds()), // range duration for the query + groupBy) + } else { + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",source_workload_namespace="%s",source_app="%s",destination_service_namespace="%s"} [%vs])) by (%s)`, + httpMetric, + namespace, + n.App, + istioNamespace, + int(interval.Seconds()), // range duration for the query + groupBy) + } + case graph.NodeTypeService: + query = fmt.Sprintf(`sum(rate(%s{reporter="destination",destination_service_namespace="%s",destination_service_name="%s"} [%vs])) by (%s)`, + httpMetric, + istioNamespace, + n.Service, + int(interval.Seconds()), // range duration for the query + groupBy) + default: + graph.Error(fmt.Sprintf("NodeType [%s] not supported", n.NodeType)) + } + outIstioVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMap(trafficMap, &outIstioVector, o) + } + + // Section for TCP services + tcpMetric := "istio_tcp_sent_bytes_total" + + tcpGroupBy := "source_workload_namespace,source_workload,source_app,source_version,destination_service_namespace,destination_service_name,destination_workload,destination_app,destination_version" + switch n.NodeType { + case graph.NodeTypeWorkload: + query = fmt.Sprintf(`sum(rate(%s{reporter="source",destination_workload_namespace="%s",destination_workload="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + n.Workload, + int(interval.Seconds()), // range duration for the query + tcpGroupBy) + case graph.NodeTypeApp: + if graph.IsOK(n.Version) { + query = fmt.Sprintf(`sum(rate(%s{reporter="source",destination_service_namespace="%s",destination_app="%s",destination_version="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + n.App, + n.Version, + int(interval.Seconds()), // range duration for the query + tcpGroupBy) + } else { + query = fmt.Sprintf(`sum(rate(%s{reporter="source",destination_service_namespace="%s",destination_app="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + n.App, + int(interval.Seconds()), // range duration for the query + tcpGroupBy) + } + case graph.NodeTypeService: + // TODO: Do we need to handle requests from unknown in a special way (like in HTTP above)? Not sure how tcp is reported from unknown. + query = fmt.Sprintf(`sum(rate(%s{reporter="source",destination_service_namespace="%s",destination_service_name="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + n.Service, + int(interval.Seconds()), // range duration for the query + tcpGroupBy) + default: + graph.Error(fmt.Sprintf("NodeType [%s] not supported", n.NodeType)) + } + tcpInVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMapTcp(trafficMap, &tcpInVector, o) + + // 2) query for outbound traffic + switch n.NodeType { + case graph.NodeTypeWorkload: + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace="%s",source_workload="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + n.Workload, + int(interval.Seconds()), // range duration for the query + tcpGroupBy) + case graph.NodeTypeApp: + if graph.IsOK(n.Version) { + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace="%s",source_app="%s",source_version="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + n.App, + n.Version, + int(interval.Seconds()), // range duration for the query + tcpGroupBy) + } else { + query = fmt.Sprintf(`sum(rate(%s{reporter="source",source_workload_namespace="%s",source_app="%s"} [%vs])) by (%s)`, + tcpMetric, + namespace, + n.App, + int(interval.Seconds()), // range duration for the query + tcpGroupBy) + } + case graph.NodeTypeService: + query = "" + default: + graph.Error(fmt.Sprintf("NodeType [%s] not supported", n.NodeType)) + } + tcpOutVector := promQuery(query, time.Unix(o.QueryTime, 0), client.API()) + populateTrafficMapTcp(trafficMap, &tcpOutVector, o) + + return trafficMap +} + +func generateGraph(trafficMap graph.TrafficMap, w http.ResponseWriter, o options.Options) { + log.Debugf("Generating config for [%s] service graph...", o.Vendor) + + promtimer := internalmetrics.GetGraphMarshalTimePrometheusTimer(o.GetGraphKind(), o.GraphType, o.InjectServiceNodes) + defer promtimer.ObserveDuration() + + var vendorConfig interface{} + switch o.Vendor { + case "cytoscape": + vendorConfig = cytoscape.NewConfig(trafficMap, o.VendorOptions) + default: + graph.Error(fmt.Sprintf("Vendor [%s] not supported", o.Vendor)) + } + + log.Debugf("Done generating config for [%s] service graph.", o.Vendor) + RespondWithJSONIndent(w, http.StatusOK, vendorConfig) +} + +func promQuery(query string, queryTime time.Time, api v1.API) model.Vector { + if "" == query { + return model.Vector{} + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // wrap with a round() to be in line with metrics api + query = fmt.Sprintf("round(%s,0.001)", query) + log.Debugf("Graph query:\n%s@time=%v (now=%v, %v)\n", query, queryTime.Format(graph.TF), time.Now().Format(graph.TF), queryTime.Unix()) + + promtimer := internalmetrics.GetPrometheusProcessingTimePrometheusTimer("Graph-Generation") + value, err := api.Query(ctx, query, queryTime) + graph.CheckError(err) + promtimer.ObserveDuration() // notice we only collect metrics for successful prom queries + + switch t := value.Type(); t { + case model.ValVector: // Instant Vector + return value.(model.Vector) + default: + graph.Error(fmt.Sprintf("No handling for type %v!\n", t)) + } + + return nil +} + +func handlePanic(w http.ResponseWriter) { + code := http.StatusInternalServerError + if r := recover(); r != nil { + var message string + switch r.(type) { + case string: + message = r.(string) + case error: + message = r.(error).Error() + case func() string: + message = r.(func() string)() + case graph.Response: + message = r.(graph.Response).Message + code = r.(graph.Response).Code + default: + message = fmt.Sprintf("%v", r) + } + if code == http.StatusInternalServerError { + log.Errorf("%s: %s", message, debug.Stack()) + } + RespondWithError(w, code, message) + } +} + +// some debugging utils +//func ids(r *[]graph.Node) []string { +// s := []string{} +// for _, r := range *r { +// s = append(s, r.ID) +// } +// return s +//} + +//func keys(m map[string]*graph.Node) []string { +// s := []string{} +// for k := range m { +// s = append(s, k) +// } +// return s +//} diff --git a/vendor/github.com/kiali/kiali/handlers/health.go b/vendor/github.com/kiali/kiali/handlers/health.go new file mode 100644 index 000000000..8e6e88f0b --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/health.go @@ -0,0 +1,260 @@ +package handlers + +import ( + "net/http" + "time" + + "github.com/gorilla/mux" + "k8s.io/apimachinery/pkg/api/errors" + + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/util" +) + +const defaultHealthRateInterval = "10m" + +// NamespaceHealth is the API handler to get app-based health of every services in the given namespace +func NamespaceHealth(w http.ResponseWriter, r *http.Request) { + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + + p := namespaceHealthParams{} + if ok, err := p.extract(r); !ok { + // Bad request + RespondWithError(w, http.StatusBadRequest, err) + return + } + + // Adjust rate interval + rateInterval, err := adjustRateInterval(business, p.Namespace, p.RateInterval, p.QueryTime) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Adjust rate interval error: "+err.Error()) + return + } + + switch p.Type { + case "app": + health, err := business.Health.GetNamespaceAppHealth(p.Namespace, rateInterval, p.QueryTime) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Error while fetching app health: "+err.Error()) + return + } + RespondWithJSON(w, http.StatusOK, health) + case "service": + health, err := business.Health.GetNamespaceServiceHealth(p.Namespace, rateInterval, p.QueryTime) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Error while fetching service health: "+err.Error()) + return + } + RespondWithJSON(w, http.StatusOK, health) + case "workload": + health, err := business.Health.GetNamespaceWorkloadHealth(p.Namespace, rateInterval, p.QueryTime) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Error while fetching workload health: "+err.Error()) + return + } + RespondWithJSON(w, http.StatusOK, health) + } +} + +// AppHealth is the API handler to get health of a single app +func AppHealth(w http.ResponseWriter, r *http.Request) { + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + + p := appHealthParams{} + p.extract(r) + rateInterval, err := adjustRateInterval(business, p.Namespace, p.RateInterval, p.QueryTime) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Adjust rate interval error: "+err.Error()) + return + } + + health, err := business.Health.GetAppHealth(p.Namespace, p.App, rateInterval, p.QueryTime) + handleHealthResponse(w, health, err) +} + +// WorkloadHealth is the API handler to get health of a single workload +func WorkloadHealth(w http.ResponseWriter, r *http.Request) { + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + + p := workloadHealthParams{} + p.extract(r) + rateInterval, err := adjustRateInterval(business, p.Namespace, p.RateInterval, p.QueryTime) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Adjust rate interval error: "+err.Error()) + return + } + p.RateInterval = rateInterval + + health, err := business.Health.GetWorkloadHealth(p.Namespace, p.Workload, rateInterval, p.QueryTime) + handleHealthResponse(w, health, err) +} + +// ServiceHealth is the API handler to get health of a single service +func ServiceHealth(w http.ResponseWriter, r *http.Request) { + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + + p := serviceHealthParams{} + p.extract(r) + rateInterval, err := adjustRateInterval(business, p.Namespace, p.RateInterval, p.QueryTime) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Adjust rate interval error: "+err.Error()) + return + } + + health, err := business.Health.GetServiceHealth(p.Namespace, p.Service, rateInterval, p.QueryTime) + handleHealthResponse(w, health, err) +} + +func handleHealthResponse(w http.ResponseWriter, health interface{}, err error) { + if err != nil { + if errors.IsNotFound(err) { + RespondWithError(w, http.StatusNotFound, err.Error()) + } else if statusError, isStatus := err.(*errors.StatusError); isStatus { + RespondWithError(w, http.StatusInternalServerError, statusError.ErrStatus.Message) + } else { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + } + } else { + RespondWithJSON(w, http.StatusOK, health) + } +} + +type baseHealthParams struct { + // The namespace scope + // + // in: path + Namespace string `json:"namespace"` + // The rate interval used for fetching error rate + // + // in: query + // default: 10m + RateInterval string `json:"rateInterval"` + + // The time to use for the prometheus query + QueryTime time.Time +} + +func (p *baseHealthParams) baseExtract(r *http.Request, vars map[string]string) { + p.RateInterval = defaultHealthRateInterval + p.QueryTime = util.Clock.Now() + queryParams := r.URL.Query() + if rateIntervals, ok := queryParams["rateInterval"]; ok && len(rateIntervals) > 0 { + p.RateInterval = rateIntervals[0] + } + p.Namespace = vars["namespace"] +} + +// namespaceHealthParams holds the path and query parameters for NamespaceHealth +// +// swagger:parameters namespaceHealth +type namespaceHealthParams struct { + baseHealthParams + // The type of health, "app", "service" or "workload". + // + // in: query + // pattern: ^(app|service|workload)$ + // default: app + Type string `json:"type"` +} + +func (p *namespaceHealthParams) extract(r *http.Request) (bool, string) { + vars := mux.Vars(r) + p.baseExtract(r, vars) + p.Type = "app" + queryParams := r.URL.Query() + if healthTypes, ok := queryParams["type"]; ok && len(healthTypes) > 0 { + if healthTypes[0] != "app" && healthTypes[0] != "service" && healthTypes[0] != "workload" { + // Bad request + return false, "Bad request, query parameter 'type' must be one of ['app','service','workload']" + } + p.Type = healthTypes[0] + } + return true, "" +} + +// appHealthParams holds the path and query parameters for AppHealth +// +// swagger:parameters appHealth +type appHealthParams struct { + baseHealthParams + // The target app + // + // in: path + App string `json:"app"` +} + +func (p *appHealthParams) extract(r *http.Request) { + vars := mux.Vars(r) + p.baseExtract(r, vars) + p.App = vars["app"] +} + +// serviceHealthParams holds the path and query parameters for ServiceHealth +// +// swagger:parameters serviceHealth +type serviceHealthParams struct { + baseHealthParams + // The target service + // + // in: path + Service string `json:"service"` +} + +func (p *serviceHealthParams) extract(r *http.Request) { + vars := mux.Vars(r) + p.baseExtract(r, vars) + p.Service = vars["service"] +} + +// workloadHealthParams holds the path and query parameters for WorkloadHealth +// +// swagger:parameters workloadHealth +type workloadHealthParams struct { + baseHealthParams + // The target workload + // + // in: path + Workload string `json:"workload"` +} + +func (p *workloadHealthParams) extract(r *http.Request) { + vars := mux.Vars(r) + p.baseExtract(r, vars) + p.Workload = vars["workload"] +} + +func adjustRateInterval(business *business.Layer, namespace, rateInterval string, queryTime time.Time) (string, error) { + namespaceInfo, err := business.Namespace.GetNamespace(namespace) + if err != nil { + return "", err + } + interval, err := util.AdjustRateInterval(namespaceInfo.CreationTimestamp, queryTime, rateInterval) + if err != nil { + return "", err + } + + if interval != rateInterval { + log.Debugf("Rate interval for namespace %v was adjusted to %v (original = %v, query time = %v, namespace created = %v)", + namespace, interval, rateInterval, queryTime, namespaceInfo.CreationTimestamp) + } + + return interval, nil +} diff --git a/vendor/github.com/kiali/kiali/handlers/istio_config.go b/vendor/github.com/kiali/kiali/handlers/istio_config.go new file mode 100644 index 000000000..0ba192040 --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/istio_config.go @@ -0,0 +1,350 @@ +package handlers + +import ( + "io/ioutil" + "net/http" + "strings" + "sync" + + "github.com/gorilla/mux" + + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "k8s.io/apimachinery/pkg/api/errors" +) + +func IstioConfigList(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + namespace := params["namespace"] + query := r.URL.Query() + objects := "" + parsedTypes := make([]string, 0) + if _, ok := query["objects"]; ok { + objects = strings.ToLower(query.Get("objects")) + if len(objects) > 0 { + parsedTypes = strings.Split(objects, ",") + } + } + + includeValidations := false + if _, found := query["validate"]; found { + includeValidations = true + } + + criteria := parseCriteria(namespace, objects) + + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + + var istioConfigValidations models.IstioValidations + + wg := sync.WaitGroup{} + if includeValidations { + wg.Add(1) + go func(namespace string, istioConfigValidations *models.IstioValidations, err *error) { + defer wg.Done() + // We don't filter by objects when calling validations, because certain validations require fetching all types to get the correct errors + istioConfigValidationResults, errValidations := business.Validations.GetValidations(namespace, "") + if errValidations != nil && err == nil { + *err = errValidations + } else { + if len(parsedTypes) > 0 { + istioConfigValidationResults = istioConfigValidationResults.FilterByTypes(parsedTypes) + } + *istioConfigValidations = istioConfigValidationResults + } + }(namespace, &istioConfigValidations, &err) + } + + istioConfig, err := business.IstioConfig.GetIstioConfigList(criteria) + if includeValidations { + // Add validation results to the IstioConfigList once they're available (previously done in the UI layer) + wg.Wait() + istioConfig.IstioValidations = istioConfigValidations + } + + if err != nil { + log.Error(err) + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + + RespondWithJSON(w, http.StatusOK, istioConfig) +} + +func checkType(types []string, name string) bool { + for _, typeName := range types { + if typeName == name { + return true + } + } + return false +} + +func parseCriteria(namespace string, objects string) business.IstioConfigCriteria { + defaultInclude := objects == "" + criteria := business.IstioConfigCriteria{} + criteria.Namespace = namespace + criteria.IncludeGateways = defaultInclude + criteria.IncludeVirtualServices = defaultInclude + criteria.IncludeDestinationRules = defaultInclude + criteria.IncludeServiceEntries = defaultInclude + criteria.IncludeRules = defaultInclude + criteria.IncludeAdapters = defaultInclude + criteria.IncludeTemplates = defaultInclude + criteria.IncludeQuotaSpecs = defaultInclude + criteria.IncludeQuotaSpecBindings = defaultInclude + criteria.IncludePolicies = defaultInclude + criteria.IncludeMeshPolicies = defaultInclude + criteria.IncludeClusterRbacConfigs = defaultInclude + criteria.IncludeServiceRoles = defaultInclude + criteria.IncludeServiceRoleBindings = defaultInclude + + if defaultInclude { + return criteria + } + + types := strings.Split(objects, ",") + if checkType(types, business.Gateways) { + criteria.IncludeGateways = true + } + if checkType(types, business.VirtualServices) { + criteria.IncludeVirtualServices = true + } + if checkType(types, business.DestinationRules) { + criteria.IncludeDestinationRules = true + } + if checkType(types, business.ServiceEntries) { + criteria.IncludeServiceEntries = true + } + if checkType(types, business.Rules) { + criteria.IncludeRules = true + } + if checkType(types, business.Adapters) { + criteria.IncludeAdapters = true + } + if checkType(types, business.Templates) { + criteria.IncludeTemplates = true + } + if checkType(types, business.QuotaSpecs) { + criteria.IncludeQuotaSpecs = true + } + if checkType(types, business.QuotaSpecBindings) { + criteria.IncludeQuotaSpecBindings = true + } + if checkType(types, business.Policies) { + criteria.IncludePolicies = true + } + if checkType(types, business.MeshPolicies) { + criteria.IncludeMeshPolicies = true + } + if checkType(types, business.ClusterRbacConfigs) { + criteria.IncludeClusterRbacConfigs = true + } + if checkType(types, business.ServiceRoles) { + criteria.IncludeServiceRoles = true + } + if checkType(types, business.ServiceRoleBindings) { + criteria.IncludeServiceRoleBindings = true + } + return criteria +} + +func IstioConfigDetails(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + namespace := params["namespace"] + objectType := params["object_type"] + objectSubtype := params["object_subtype"] + object := params["object"] + + includeValidations := false + query := r.URL.Query() + if _, found := query["validate"]; found { + includeValidations = true + } + + if !checkObjectType(objectType) { + RespondWithError(w, http.StatusBadRequest, "Object type not managed: "+objectType) + return + } + + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + + var istioConfigValidations models.IstioValidations + + wg := sync.WaitGroup{} + if includeValidations { + wg.Add(1) + go func(istioConfigValidations *models.IstioValidations, err *error) { + defer wg.Done() + istioConfigValidationResults, errValidations := business.Validations.GetIstioObjectValidations(namespace, objectType, object) + if errValidations != nil && err == nil { + *err = errValidations + } else { + *istioConfigValidations = istioConfigValidationResults + } + }(&istioConfigValidations, &err) + } + + istioConfigDetails, err := business.IstioConfig.GetIstioConfigDetails(namespace, objectType, objectSubtype, object) + + if includeValidations && err == nil { + wg.Wait() + + if validation, found := istioConfigValidations[models.IstioValidationKey{ObjectType: models.ObjectTypeSingular[objectType], Name: object}]; found { + istioConfigDetails.IstioValidation = validation + } + } + + if errors.IsNotFound(err) { + RespondWithError(w, http.StatusNotFound, err.Error()) + return + } else if statusError, isStatus := err.(*errors.StatusError); isStatus { + RespondWithError(w, http.StatusInternalServerError, statusError.ErrStatus.Message) + return + } else if err != nil { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + + RespondWithJSON(w, http.StatusOK, istioConfigDetails) +} + +func IstioConfigDelete(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + namespace := params["namespace"] + objectType := params["object_type"] + objectSubtype := params["object_subtype"] + object := params["object"] + + api := business.GetIstioAPI(objectType) + if api == "" { + RespondWithError(w, http.StatusBadRequest, "Object type not managed: "+objectType) + return + } + + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + err = business.IstioConfig.DeleteIstioConfigDetail(api, namespace, objectType, objectSubtype, object) + if err != nil { + log.Error(err) + if errors.IsNotFound(err) { + RespondWithError(w, http.StatusNotFound, err.Error()) + } else { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + } + } else { + audit(r, "DELETE on Namespace: "+namespace+" Type: "+objectType+" Subtype: "+objectSubtype+" Name: "+object) + RespondWithCode(w, http.StatusOK) + } + return +} + +func IstioConfigUpdate(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + namespace := params["namespace"] + objectType := params["object_type"] + objectSubtype := params["object_subtype"] + object := params["object"] + + api := business.GetIstioAPI(objectType) + if api == "" { + RespondWithError(w, http.StatusBadRequest, "Object type not managed: "+objectType) + return + } + + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + RespondWithError(w, http.StatusBadRequest, "Update request with bad update patch: "+err.Error()) + } + jsonPatch := string(body) + updatedConfigDetails, err := business.IstioConfig.UpdateIstioConfigDetail(api, namespace, objectType, objectSubtype, object, jsonPatch) + + if errors.IsNotFound(err) { + RespondWithError(w, http.StatusNotFound, err.Error()) + return + } else if statusError, isStatus := err.(*errors.StatusError); isStatus { + RespondWithError(w, http.StatusInternalServerError, statusError.ErrStatus.Message) + return + } else if err != nil { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + + audit(r, "UPDATE on Namespace: "+namespace+" Type: "+objectType+" Subtype: "+objectSubtype+" Name: "+object+" Patch: "+jsonPatch) + RespondWithJSON(w, http.StatusOK, updatedConfigDetails) +} + +func IstioConfigCreate(w http.ResponseWriter, r *http.Request) { + // Feels kinda replicated for multiple functions.. + params := mux.Vars(r) + namespace := params["namespace"] + objectType := params["object_type"] + objectSubtype := params["object_subtype"] + + api := business.GetIstioAPI(objectType) + if api == "" { + RespondWithError(w, http.StatusBadRequest, "Object type not managed: "+objectType) + return + } + + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + RespondWithError(w, http.StatusBadRequest, "Create request could not be read: "+err.Error()) + } + + createdConfigDetails, err := business.IstioConfig.CreateIstioConfigDetail(api, namespace, objectType, objectSubtype, body) + if errors.IsNotFound(err) { + RespondWithError(w, http.StatusNotFound, err.Error()) + return + } else if statusError, isStatus := err.(*errors.StatusError); isStatus { + RespondWithError(w, http.StatusInternalServerError, statusError.ErrStatus.Message) + return + } else if err != nil { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + + audit(r, "CREATE on Namespace: "+namespace+" Type: "+objectType+" Subtype: "+objectSubtype+" Object: "+string(body)) + RespondWithJSON(w, http.StatusOK, createdConfigDetails) +} + +func checkObjectType(objectType string) bool { + return business.GetIstioAPI(objectType) != "" +} + +func audit(r *http.Request, message string) { + if config.Get().Server.AuditLog { + user := r.Header.Get("Kiali-User") + log.Infof("AUDIT User [%s] Msg [%s]", user, message) + } +} diff --git a/vendor/github.com/kiali/kiali/handlers/jaeger.go b/vendor/github.com/kiali/kiali/handlers/jaeger.go new file mode 100644 index 000000000..7556014f3 --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/jaeger.go @@ -0,0 +1,31 @@ +package handlers + +import ( + "net/http" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/models" +) + +// Get JaegerInfo provides the proxy Jaeger URL +func GetJaegerInfo(w http.ResponseWriter, r *http.Request) { + jaegerConfig := config.Get().ExternalServices.Jaeger + info := models.JaegerInfo{ + URL: jaegerConfig.URL, + } + + // Check if URL is in the configuration + if info.URL == "" { + RespondWithError(w, http.StatusNotFound, "You need to set the Jaeger URL configuration.") + return + } + + // Check if URL is valid + _, error := validateURL(info.URL) + if error != nil { + RespondWithError(w, http.StatusNotAcceptable, "You need to set a correct format for Jaeger URL in the configuration error: "+error.Error()) + return + } + + RespondWithJSON(w, http.StatusOK, info) +} diff --git a/vendor/github.com/kiali/kiali/handlers/metrics.go b/vendor/github.com/kiali/kiali/handlers/metrics.go new file mode 100644 index 000000000..9eb6f286a --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/metrics.go @@ -0,0 +1,150 @@ +package handlers + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus" + "github.com/kiali/kiali/util" +) + +func extractIstioMetricsQueryParams(r *http.Request, q *prometheus.IstioMetricsQuery, namespaceInfo *models.Namespace) error { + q.FillDefaults() + queryParams := r.URL.Query() + if filters, ok := queryParams["filters[]"]; ok && len(filters) > 0 { + q.Filters = filters + } + dir := queryParams.Get("direction") + if dir != "" { + if dir != "inbound" && dir != "outbound" { + return errors.New("Bad request, query parameter 'direction' must be either 'inbound' or 'outbound'") + } + q.Direction = dir + } + requestProtocol := queryParams.Get("requestProtocol") + if requestProtocol != "" { + q.RequestProtocol = requestProtocol + } + reporter := queryParams.Get("reporter") + if reporter != "" { + if reporter != "source" && reporter != "destination" { + return errors.New("Bad request, query parameter 'reporter' must be either 'source' or 'destination'") + } + q.Reporter = reporter + } + return extractBaseMetricsQueryParams(queryParams, &q.BaseMetricsQuery, namespaceInfo) +} + +func extractCustomMetricsQueryParams(r *http.Request, q *prometheus.CustomMetricsQuery, namespaceInfo *models.Namespace) error { + q.FillDefaults() + queryParams := r.URL.Query() + q.Version = queryParams.Get("version") + op := queryParams.Get("rawDataAggregator") + // Explicit white-listing operators to prevent any kind of injection + // For a list of operators, see https://prometheus.io/docs/prometheus/latest/querying/operators/#aggregation-operators + if op == "sum" || op == "min" || op == "max" || op == "avg" || op == "stddev" || op == "stdvar" { + q.RawDataAggregator = op + } + return extractBaseMetricsQueryParams(queryParams, &q.BaseMetricsQuery, namespaceInfo) +} + +func extractBaseMetricsQueryParams(queryParams url.Values, q *prometheus.BaseMetricsQuery, namespaceInfo *models.Namespace) error { + if rateIntervals, ok := queryParams["rateInterval"]; ok && len(rateIntervals) > 0 { + // Only first is taken into consideration + q.RateInterval = rateIntervals[0] + } + if rateFuncs, ok := queryParams["rateFunc"]; ok && len(rateFuncs) > 0 { + // Only first is taken into consideration + if rateFuncs[0] != "rate" && rateFuncs[0] != "irate" { + // Bad request + return errors.New("Bad request, query parameter 'rateFunc' must be either 'rate' or 'irate'") + } + q.RateFunc = rateFuncs[0] + } + if queryTimes, ok := queryParams["queryTime"]; ok && len(queryTimes) > 0 { + if num, err := strconv.ParseInt(queryTimes[0], 10, 64); err == nil { + q.End = time.Unix(num, 0) + } else { + // Bad request + return errors.New("Bad request, cannot parse query parameter 'queryTime'") + } + } + if durations, ok := queryParams["duration"]; ok && len(durations) > 0 { + if num, err := strconv.ParseInt(durations[0], 10, 64); err == nil { + duration := time.Duration(num) * time.Second + q.Start = q.End.Add(-duration) + } else { + // Bad request + return errors.New("Bad request, cannot parse query parameter 'duration'") + } + } + if steps, ok := queryParams["step"]; ok && len(steps) > 0 { + if num, err := strconv.Atoi(steps[0]); err == nil { + q.Step = time.Duration(num) * time.Second + } else { + // Bad request + return errors.New("Bad request, cannot parse query parameter 'step'") + } + } + if quantiles, ok := queryParams["quantiles[]"]; ok && len(quantiles) > 0 { + for _, quantile := range quantiles { + f, err := strconv.ParseFloat(quantile, 64) + if err != nil { + // Non parseable quantile + return errors.New("Bad request, cannot parse query parameter 'quantiles', float expected") + } + if f < 0 || f > 1 { + return errors.New("Bad request, invalid quantile(s): should be between 0 and 1") + } + } + q.Quantiles = quantiles + } + if avgFlags, ok := queryParams["avg"]; ok && len(avgFlags) > 0 { + if avgFlag, err := strconv.ParseBool(avgFlags[0]); err == nil { + q.Avg = avgFlag + } else { + // Bad request + return errors.New("Bad request, cannot parse query parameter 'avg'") + } + } + if lbls, ok := queryParams["byLabels[]"]; ok && len(lbls) > 0 { + q.ByLabels = lbls + } + + // If needed, adjust interval -- Make sure query won't fetch data before the namespace creation + intervalStartTime, err := util.GetStartTimeForRateInterval(q.End, q.RateInterval) + if err != nil { + return err + } + if intervalStartTime.Before(namespaceInfo.CreationTimestamp) { + q.RateInterval = fmt.Sprintf("%ds", int(q.End.Sub(namespaceInfo.CreationTimestamp).Seconds())) + intervalStartTime = namespaceInfo.CreationTimestamp + log.Debugf("[extractMetricsQueryParams] Interval set to: %v", q.RateInterval) + } + // If needed, adjust query start time (bound to namespace creation time) + log.Debugf("[extractMetricsQueryParams] Requested query start time: %v", q.Start) + intervalDuration := q.End.Sub(intervalStartTime) + allowedStart := namespaceInfo.CreationTimestamp.Add(intervalDuration) + if q.Start.Before(allowedStart) { + q.Start = allowedStart + log.Debugf("[extractMetricsQueryParams] Query start time set to: %v", q.Start) + + if q.Start.After(q.End) { + // This means that the query range does not fall in the range + // of life of the namespace. So, there are no metrics to query. + log.Debugf("[extractMetricsQueryParams] Query end time = %v; not querying metrics.", q.End) + return errors.New("After checks, query start time is after end time") + } + } + + // Adjust start & end times to be a multiple of step + stepInSecs := int64(q.Step.Seconds()) + q.Start = time.Unix((q.Start.Unix()/stepInSecs)*stepInSecs, 0) + return nil +} diff --git a/vendor/github.com/kiali/kiali/handlers/namespaces.go b/vendor/github.com/kiali/kiali/handlers/namespaces.go new file mode 100644 index 000000000..5d8448f2a --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/namespaces.go @@ -0,0 +1,58 @@ +package handlers + +import ( + "net/http" + + "github.com/gorilla/mux" + + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/prometheus" +) + +func NamespaceList(w http.ResponseWriter, r *http.Request) { + + business, err := business.Get() + if err != nil { + log.Error(err) + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + + namespaces, err := business.Namespace.GetNamespaces() + if err != nil { + log.Error(err) + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + + RespondWithJSON(w, http.StatusOK, namespaces) +} + +// NamespaceMetrics is the API handler to fetch metrics to be displayed, related to all +// services in the namespace +func NamespaceMetrics(w http.ResponseWriter, r *http.Request) { + getNamespaceMetrics(w, r, defaultPromClientSupplier, defaultK8SClientSupplier) +} + +// getServiceMetrics (mock-friendly version) +func getNamespaceMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { + vars := mux.Vars(r) + namespace := vars["namespace"] + + prom, _, namespaceInfo := initClientsForMetrics(w, promSupplier, k8sSupplier, namespace) + if prom == nil { + // any returned value nil means error & response already written + return + } + + params := prometheus.IstioMetricsQuery{Namespace: namespace} + err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + if err != nil { + RespondWithError(w, http.StatusBadRequest, err.Error()) + return + } + + metrics := prom.GetMetrics(¶ms) + RespondWithJSON(w, http.StatusOK, metrics) +} diff --git a/vendor/github.com/kiali/kiali/handlers/root.go b/vendor/github.com/kiali/kiali/handlers/root.go new file mode 100644 index 000000000..d15bdcf53 --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/root.go @@ -0,0 +1,30 @@ +package handlers + +import ( + "net/http" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/status" +) + +func Root(w http.ResponseWriter, r *http.Request) { + getStatus(w, r) +} + +func getStatus(w http.ResponseWriter, r *http.Request) { + RespondWithJSONIndent(w, http.StatusOK, status.Get()) +} + +func GetToken(w http.ResponseWriter, r *http.Request) { + u, _, ok := r.BasicAuth() + if !ok { + RespondWithJSONIndent(w, http.StatusInternalServerError, u) + return + } + token, error := config.GenerateToken(u) + if error != nil { + RespondWithJSONIndent(w, http.StatusInternalServerError, error) + return + } + RespondWithJSONIndent(w, http.StatusOK, token) +} diff --git a/vendor/github.com/kiali/kiali/handlers/services.go b/vendor/github.com/kiali/kiali/handlers/services.go new file mode 100644 index 000000000..eeb0aae27 --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/services.go @@ -0,0 +1,159 @@ +package handlers + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" + "k8s.io/apimachinery/pkg/api/errors" + + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus" + "github.com/kiali/kiali/util" +) + +// ServiceList is the API handler to fetch the list of services in a given namespace +func ServiceList(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + namespace := params["namespace"] + + // Fetch and build services + serviceList, err := business.Svc.GetServiceList(namespace) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + + RespondWithJSON(w, http.StatusOK, serviceList) +} + +// ServiceMetrics is the API handler to fetch metrics to be displayed, related to a single service +func ServiceMetrics(w http.ResponseWriter, r *http.Request) { + getServiceMetrics(w, r, defaultPromClientSupplier, defaultK8SClientSupplier) +} + +// getServiceMetrics (mock-friendly version) +func getServiceMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { + vars := mux.Vars(r) + namespace := vars["namespace"] + service := vars["service"] + + prom, _, namespaceInfo := initClientsForMetrics(w, promSupplier, k8sSupplier, namespace) + if prom == nil { + // any returned value nil means error & response already written + return + } + + params := prometheus.IstioMetricsQuery{Namespace: namespace, Service: service} + err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + if err != nil { + RespondWithError(w, http.StatusBadRequest, err.Error()) + return + } + + metrics := prom.GetMetrics(¶ms) + RespondWithJSON(w, http.StatusOK, metrics) +} + +// ServiceDetails is the API handler to fetch full details of an specific service +func ServiceDetails(w http.ResponseWriter, r *http.Request) { + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Services initialization error: "+err.Error()) + return + } + + // Rate interval is needed to fetch request rates based health + rateInterval := defaultHealthRateInterval + queryParams := r.URL.Query() + if rateIntervals, ok := queryParams["rateInterval"]; ok && len(rateIntervals) > 0 { + rateInterval = rateIntervals[0] + } + + includeValidations := false + if _, found := queryParams["validate"]; found { + includeValidations = true + } + + params := mux.Vars(r) + namespace := params["namespace"] + service := params["service"] + queryTime := util.Clock.Now() + rateInterval, err = adjustRateInterval(business, namespace, rateInterval, queryTime) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Adjust rate interval error: "+err.Error()) + return + } + + var istioConfigValidations = models.IstioValidations{} + + wg := sync.WaitGroup{} + if includeValidations { + wg.Add(1) + go func(istioConfigValidations *models.IstioValidations, err *error) { + defer wg.Done() + istioConfigValidationResults, errValidations := business.Validations.GetValidations(namespace, service) + if errValidations != nil && err == nil { + *err = errValidations + } else { + *istioConfigValidations = istioConfigValidationResults + } + }(&istioConfigValidations, &err) + } + + serviceDetails, err := business.Svc.GetService(namespace, service, rateInterval, queryTime) + if includeValidations && err == nil { + wg.Wait() + serviceDetails.Validations = istioConfigValidations + } + + if err != nil { + if errors.IsNotFound(err) { + RespondWithError(w, http.StatusNotFound, err.Error()) + } else if statusError, isStatus := err.(*errors.StatusError); isStatus { + RespondWithError(w, http.StatusInternalServerError, statusError.ErrStatus.Message) + } else { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + } + return + } + + RespondWithJSON(w, http.StatusOK, serviceDetails) +} + +// ServiceDashboard is the API handler to fetch Istio dashboard, related to a single service +func ServiceDashboard(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + namespace := vars["namespace"] + service := vars["service"] + + prom, _, namespaceInfo := initClientsForMetrics(w, defaultPromClientSupplier, defaultK8SClientSupplier, namespace) + if prom == nil { + // any returned value nil means error & response already written + return + } + + params := prometheus.IstioMetricsQuery{Namespace: namespace, Service: service} + err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + if err != nil { + RespondWithError(w, http.StatusBadRequest, err.Error()) + return + } + + svc := business.NewDashboardsService(nil, prom) + dashboard, err := svc.GetIstioDashboard(params) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + RespondWithJSON(w, http.StatusOK, dashboard) +} diff --git a/vendor/github.com/kiali/kiali/handlers/utils.go b/vendor/github.com/kiali/kiali/handlers/utils.go new file mode 100644 index 000000000..4e4fa1437 --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/utils.go @@ -0,0 +1,70 @@ +package handlers + +import ( + "net/http" + "net/url" + + "k8s.io/api/core/v1" + + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/models" + "github.com/kiali/kiali/prometheus" +) + +type promClientSupplier func() (*prometheus.Client, error) +type k8sClientSupplier func() (kubernetes.IstioClientInterface, error) + +var defaultPromClientSupplier = prometheus.NewClient +var defaultK8SClientSupplier = func() (kubernetes.IstioClientInterface, error) { + return kubernetes.NewClient() +} + +func getService(namespace string, service string) (*v1.ServiceSpec, error) { + client, err := kubernetes.NewClient() + if err != nil { + return nil, err + } + svc, err := client.GetService(namespace, service) + if err != nil { + return nil, err + } + return &svc.Spec, nil +} + +func validateURL(serviceURL string) (*url.URL, error) { + return url.ParseRequestURI(serviceURL) +} + +func checkNamespaceAccess(w http.ResponseWriter, k8s kubernetes.IstioClientInterface, prom prometheus.ClientInterface, namespace string) *models.Namespace { + layer := business.NewWithBackends(k8s, prom) + + if nsInfo, err := layer.Namespace.GetNamespace(namespace); err != nil { + RespondWithError(w, http.StatusForbidden, "Cannot access namespace data: "+err.Error()) + return nil + } else { + return nsInfo + } +} + +func initClientsForMetrics(w http.ResponseWriter, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier, namespace string) (*prometheus.Client, kubernetes.IstioClientInterface, *models.Namespace) { + k8s, err := k8sSupplier() + if err != nil { + log.Error(err) + RespondWithError(w, http.StatusServiceUnavailable, "Kubernetes client error: "+err.Error()) + return nil, nil, nil + } + prom, err := promSupplier() + if err != nil { + log.Error(err) + RespondWithError(w, http.StatusServiceUnavailable, "Prometheus client error: "+err.Error()) + return nil, nil, nil + } + + nsInfo := checkNamespaceAccess(w, k8s, prom, namespace) + if nsInfo == nil { + return nil, nil, nil + } + return prom, k8s, nsInfo +} diff --git a/vendor/github.com/kiali/kiali/handlers/workloads.go b/vendor/github.com/kiali/kiali/handlers/workloads.go new file mode 100644 index 000000000..564dd5caa --- /dev/null +++ b/vendor/github.com/kiali/kiali/handlers/workloads.go @@ -0,0 +1,116 @@ +package handlers + +import ( + "net/http" + + "github.com/gorilla/mux" + "k8s.io/apimachinery/pkg/api/errors" + + "github.com/kiali/kiali/business" + "github.com/kiali/kiali/prometheus" +) + +// WorkloadList is the API handler to fetch all the workloads to be displayed, related to a single namespace +func WorkloadList(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Workloads initialization error: "+err.Error()) + return + } + namespace := params["namespace"] + + // Fetch and build workloads + workloadList, err := business.Workload.GetWorkloadList(namespace) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + + RespondWithJSON(w, http.StatusOK, workloadList) +} + +// WorkloadDetails is the API handler to fetch all details to be displayed, related to a single workload +func WorkloadDetails(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + + // Get business layer + business, err := business.Get() + if err != nil { + RespondWithError(w, http.StatusInternalServerError, "Workloads initialization error: "+err.Error()) + return + } + namespace := params["namespace"] + workload := params["workload"] + + // Fetch and build workload + workloadDetails, err := business.Workload.GetWorkload(namespace, workload, true) + if err != nil { + if errors.IsNotFound(err) { + RespondWithError(w, http.StatusNotFound, err.Error()) + } else { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + } + return + } + + RespondWithJSON(w, http.StatusOK, workloadDetails) +} + +// WorkloadMetrics is the API handler to fetch metrics to be displayed, related to a single workload +func WorkloadMetrics(w http.ResponseWriter, r *http.Request) { + getWorkloadMetrics(w, r, defaultPromClientSupplier, defaultK8SClientSupplier) +} + +// getWorkloadMetrics (mock-friendly version) +func getWorkloadMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { + vars := mux.Vars(r) + namespace := vars["namespace"] + workload := vars["workload"] + + prom, _, namespaceInfo := initClientsForMetrics(w, promSupplier, k8sSupplier, namespace) + if prom == nil { + // any returned value nil means error & response already written + return + } + + params := prometheus.IstioMetricsQuery{Namespace: namespace, Workload: workload} + err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + if err != nil { + RespondWithError(w, http.StatusBadRequest, err.Error()) + return + } + + metrics := prom.GetMetrics(¶ms) + RespondWithJSON(w, http.StatusOK, metrics) +} + +// WorkloadDashboard is the API handler to fetch Istio dashboard, related to a single workload +func WorkloadDashboard(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + namespace := vars["namespace"] + workload := vars["workload"] + + prom, _, namespaceInfo := initClientsForMetrics(w, defaultPromClientSupplier, defaultK8SClientSupplier, namespace) + if prom == nil { + // any returned value nil means error & response already written + return + } + + params := prometheus.IstioMetricsQuery{Namespace: namespace, Workload: workload} + err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + if err != nil { + RespondWithError(w, http.StatusBadRequest, err.Error()) + return + } + + svc := business.NewDashboardsService(nil, prom) + dashboard, err := svc.GetIstioDashboard(params) + if err != nil { + RespondWithError(w, http.StatusInternalServerError, err.Error()) + return + } + RespondWithJSON(w, http.StatusOK, dashboard) +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/cache.go b/vendor/github.com/kiali/kiali/kubernetes/cache.go new file mode 100644 index 000000000..3055baae9 --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/cache.go @@ -0,0 +1,459 @@ +package kubernetes + +import ( + "errors" + "fmt" + "sync" + "time" + + "k8s.io/api/apps/v1beta1" + "k8s.io/api/apps/v1beta2" + batch_v1 "k8s.io/api/batch/v1" + batch_v1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/api/core/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/informers" + kube "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + + "github.com/kiali/kiali/log" +) + +type ( + // Inspired/reused from istio code: + // https://github.com/istio/istio/blob/master/mixer/adapter/kubernetesenv/cache.go + cacheController interface { + // Control Cache + Start() + HasSynced() bool + WaitForSync() bool + Stop() + + // Business methods + GetCronJobs(namespace string) ([]batch_v1beta1.CronJob, error) + GetDeployment(namespace string, name string) (*v1beta1.Deployment, error) + GetDeployments(namespace string) ([]v1beta1.Deployment, error) + GetEndpoints(namespace, name string) (*v1.Endpoints, error) + GetJobs(namespace string) ([]batch_v1.Job, error) + GetPods(namespace string) ([]v1.Pod, error) + GetReplicationControllers(namespace string) ([]v1.ReplicationController, error) + GetReplicaSets(namespace string) ([]v1beta2.ReplicaSet, error) + GetService(namespace string, name string) (*v1.Service, error) + GetServices(namespace string) ([]v1.Service, error) + GetStatefulSet(namespace string, name string) (*v1beta2.StatefulSet, error) + GetStatefulSets(namespace string) ([]v1beta2.StatefulSet, error) + } + + controllerImpl struct { + clientset kube.Interface + refreshDuration time.Duration + stopChan chan struct{} + syncCount int + maxSyncCount int + isErrorState bool + lastError error + lastErrorLock sync.Mutex + controllers map[string]cache.SharedIndexInformer + } +) + +var ( + lastCacheErrorLock sync.Mutex + errorCallbacks []func(error) +) + +func init() { + setupErrorHandlers() + errorCallbacks = make([]func(error), 0) +} + +func setupErrorHandlers() { + nErrFunc := len(utilruntime.ErrorHandlers) + customErrorHandler := make([]func(error), nErrFunc+1) + for i, errorFunc := range utilruntime.ErrorHandlers { + customErrorHandler[i] = errorFunc + } + customErrorHandler[nErrFunc] = func(err error) { + for _, callback := range errorCallbacks { + callback(err) + } + } + utilruntime.ErrorHandlers = customErrorHandler +} + +func registerErrorCallback(callback func(error)) { + defer lastCacheErrorLock.Unlock() + lastCacheErrorLock.Lock() + errorCallbacks = append(errorCallbacks, callback) +} + +func newCacheController(clientset kube.Interface, refreshDuration time.Duration) cacheController { + newControllerImpl := controllerImpl{ + clientset: clientset, + refreshDuration: refreshDuration, + stopChan: nil, + controllers: initControllers(clientset, refreshDuration), + syncCount: 0, + maxSyncCount: 20, // Move this to config ? or this constant is good enough ? + } + registerErrorCallback(newControllerImpl.ErrorCallback) + + return &newControllerImpl +} + +func initControllers(clientset kube.Interface, refreshDuration time.Duration) map[string]cache.SharedIndexInformer { + sharedInformers := informers.NewSharedInformerFactory(clientset, refreshDuration) + controllers := make(map[string]cache.SharedIndexInformer) + controllers["Pod"] = sharedInformers.Core().V1().Pods().Informer() + controllers["ReplicationController"] = sharedInformers.Core().V1().ReplicationControllers().Informer() + controllers["Deployment"] = sharedInformers.Apps().V1beta1().Deployments().Informer() + controllers["ReplicaSet"] = sharedInformers.Apps().V1beta2().ReplicaSets().Informer() + controllers["StatefulSet"] = sharedInformers.Apps().V1beta2().StatefulSets().Informer() + controllers["Job"] = sharedInformers.Batch().V1().Jobs().Informer() + controllers["CronJob"] = sharedInformers.Batch().V1beta1().CronJobs().Informer() + controllers["Service"] = sharedInformers.Core().V1().Services().Informer() + controllers["Endpoints"] = sharedInformers.Core().V1().Endpoints().Informer() + + return controllers +} + +func (c *controllerImpl) Start() { + if c.stopChan == nil { + c.stopChan = make(chan struct{}) + go c.run(c.stopChan) + log.Infof("K8S cache started") + } else { + log.Warningf("K8S cache is already running") + } +} + +func (c *controllerImpl) run(stop <-chan struct{}) { + for _, cn := range c.controllers { + go cn.Run(stop) + } + <-stop + log.Infof("K8S cache stopped") +} + +func (c *controllerImpl) HasSynced() bool { + if c.syncCount > c.maxSyncCount { + log.Errorf("Max attempts reached syncing cache. Error connecting to k8s API: %d > %d", c.syncCount, c.maxSyncCount) + c.Stop() + return false + } + hasSynced := true + for _, cn := range c.controllers { + hasSynced = hasSynced && cn.HasSynced() + } + if hasSynced { + c.syncCount = 0 + } else { + c.syncCount = c.syncCount + 1 + } + return hasSynced +} + +func (c *controllerImpl) WaitForSync() bool { + return cache.WaitForCacheSync(c.stopChan, c.HasSynced) +} + +func (c *controllerImpl) Stop() { + if c.stopChan != nil { + close(c.stopChan) + c.stopChan = nil + } +} + +func (c *controllerImpl) ErrorCallback(err error) { + if c.isErrorState == false { + log.Warningf("Error callback received: %s", err) + c.lastErrorLock.Lock() + c.isErrorState = true + c.lastError = err + c.lastErrorLock.Unlock() + c.Stop() + } +} + +func (c *controllerImpl) checkStateAndRetry() error { + if c.isErrorState == false { + return nil + } + + // Retry of the cache is hold by one single goroutine + c.lastErrorLock.Lock() + if c.isErrorState == true { + // ping to check if backend is still unavailable (used namespace endpoint) + _, err := c.clientset.CoreV1().Namespaces().List(emptyListOptions) + if err != nil { + c.lastError = fmt.Errorf("Error retrying to connect to K8S API backend. %s", err) + } else { + c.lastError = nil + c.isErrorState = false + c.Start() + c.WaitForSync() + } + } + c.lastErrorLock.Unlock() + return c.lastError +} + +func (c *controllerImpl) GetCronJobs(namespace string) ([]batch_v1beta1.CronJob, error) { + if err := c.checkStateAndRetry(); err != nil { + return []batch_v1beta1.CronJob{}, err + } + indexer := c.controllers["CronJob"].GetIndexer() + cronjobs, err := indexer.ByIndex("namespace", namespace) + if err != nil { + return []batch_v1beta1.CronJob{}, err + } + if len(cronjobs) > 0 { + _, ok := cronjobs[0].(*batch_v1beta1.CronJob) + if !ok { + return []batch_v1beta1.CronJob{}, errors.New("Bad CronJob type found in cache") + } + nsCronjobs := make([]batch_v1beta1.CronJob, len(cronjobs)) + for i, cronjob := range cronjobs { + nsCronjobs[i] = *(cronjob.(*batch_v1beta1.CronJob)) + } + return nsCronjobs, nil + } + return []batch_v1beta1.CronJob{}, nil +} + +func (c *controllerImpl) GetDeployment(namespace, name string) (*v1beta1.Deployment, error) { + if err := c.checkStateAndRetry(); err != nil { + return nil, err + } + indexer := c.controllers["Deployment"].GetIndexer() + deps, exist, err := indexer.GetByKey(namespace + "/" + name) + if err != nil { + return nil, err + } + if exist { + dep, ok := deps.(*v1beta1.Deployment) + if !ok { + return nil, errors.New("Bad Deployment type found in cache") + } + return dep, nil + } + return nil, NewNotFound(name, "apps/v1beta1", "Deployment") +} + +func (c *controllerImpl) GetDeployments(namespace string) ([]v1beta1.Deployment, error) { + if err := c.checkStateAndRetry(); err != nil { + return []v1beta1.Deployment{}, err + } + indexer := c.controllers["Deployment"].GetIndexer() + deps, err := indexer.ByIndex("namespace", namespace) + if err != nil { + return []v1beta1.Deployment{}, err + } + if len(deps) > 0 { + _, ok := deps[0].(*v1beta1.Deployment) + if !ok { + return nil, errors.New("Bad Deployment type found in cache") + } + nsDeps := make([]v1beta1.Deployment, len(deps)) + for i, dep := range deps { + nsDeps[i] = *(dep.(*v1beta1.Deployment)) + } + return nsDeps, nil + } + return []v1beta1.Deployment{}, nil +} + +func (c *controllerImpl) GetEndpoints(namespace, name string) (*v1.Endpoints, error) { + if err := c.checkStateAndRetry(); err != nil { + return nil, err + } + indexer := c.controllers["Endpoints"].GetIndexer() + endpoints, exist, err := indexer.GetByKey(namespace + "/" + name) + if err != nil { + return nil, err + } + if exist { + endpoint, ok := endpoints.(*v1.Endpoints) + if !ok { + return nil, errors.New("Bad Endpoints type found in cache") + } + return endpoint, nil + } + return nil, NewNotFound(name, "core/v1", "Endpoints") +} + +func (c *controllerImpl) GetJobs(namespace string) ([]batch_v1.Job, error) { + if err := c.checkStateAndRetry(); err != nil { + return []batch_v1.Job{}, err + } + indexer := c.controllers["Job"].GetIndexer() + jobs, err := indexer.ByIndex("namespace", namespace) + if err != nil { + return []batch_v1.Job{}, err + } + if len(jobs) > 0 { + _, ok := jobs[0].(*batch_v1.Job) + if !ok { + return []batch_v1.Job{}, errors.New("Bad Job type found in cache") + } + nsJobs := make([]batch_v1.Job, len(jobs)) + for i, job := range jobs { + nsJobs[i] = *(job.(*batch_v1.Job)) + } + return nsJobs, nil + } + return []batch_v1.Job{}, nil +} + +func (c *controllerImpl) GetPods(namespace string) ([]v1.Pod, error) { + if err := c.checkStateAndRetry(); err != nil { + return []v1.Pod{}, err + } + indexer := c.controllers["Pod"].GetIndexer() + pods, err := indexer.ByIndex("namespace", namespace) + if err != nil { + return []v1.Pod{}, err + } + if len(pods) > 0 { + _, ok := pods[0].(*v1.Pod) + if !ok { + return []v1.Pod{}, errors.New("Bad Pod type found in cache") + } + nsPods := make([]v1.Pod, len(pods)) + for i, pod := range pods { + nsPods[i] = *(pod.(*v1.Pod)) + } + return nsPods, nil + } + return []v1.Pod{}, nil +} + +func (c *controllerImpl) GetReplicationControllers(namespace string) ([]v1.ReplicationController, error) { + if err := c.checkStateAndRetry(); err != nil { + return []v1.ReplicationController{}, err + } + indexer := c.controllers["ReplicationController"].GetIndexer() + repcons, err := indexer.ByIndex("namespace", namespace) + if err != nil { + return []v1.ReplicationController{}, err + } + if len(repcons) > 0 { + _, ok := repcons[0].(*v1.ReplicationController) + if !ok { + return []v1.ReplicationController{}, errors.New("Bad ReplicationController type found in cache") + } + nsRepcons := make([]v1.ReplicationController, len(repcons)) + for i, repcon := range repcons { + nsRepcons[i] = *(repcon.(*v1.ReplicationController)) + } + return nsRepcons, nil + } + return []v1.ReplicationController{}, nil +} + +func (c *controllerImpl) GetReplicaSets(namespace string) ([]v1beta2.ReplicaSet, error) { + if err := c.checkStateAndRetry(); err != nil { + return []v1beta2.ReplicaSet{}, err + } + indexer := c.controllers["ReplicaSet"].GetIndexer() + repsets, err := indexer.ByIndex("namespace", namespace) + if err != nil { + return []v1beta2.ReplicaSet{}, err + } + if len(repsets) > 0 { + _, ok := repsets[0].(*v1beta2.ReplicaSet) + if !ok { + return []v1beta2.ReplicaSet{}, errors.New("Bad ReplicaSet type found in cache") + } + nsRepsets := make([]v1beta2.ReplicaSet, len(repsets)) + for i, repset := range repsets { + nsRepsets[i] = *(repset.(*v1beta2.ReplicaSet)) + } + return nsRepsets, nil + } + return []v1beta2.ReplicaSet{}, nil +} + +func (c *controllerImpl) GetStatefulSet(namespace, name string) (*v1beta2.StatefulSet, error) { + if err := c.checkStateAndRetry(); err != nil { + return nil, err + } + indexer := c.controllers["StatefulSet"].GetIndexer() + fulsets, exist, err := indexer.GetByKey(namespace + "/" + name) + if err != nil { + return nil, err + } + if exist { + fulset, ok := fulsets.(*v1beta2.StatefulSet) + if !ok { + return nil, errors.New("Bad StatefulSet type found in cache") + } + return fulset, nil + } + return nil, NewNotFound(name, "apps/v1beta2", "StatefulSet") +} + +func (c *controllerImpl) GetStatefulSets(namespace string) ([]v1beta2.StatefulSet, error) { + if err := c.checkStateAndRetry(); err != nil { + return []v1beta2.StatefulSet{}, err + } + indexer := c.controllers["StatefulSet"].GetIndexer() + fulsets, err := indexer.ByIndex("namespace", namespace) + if err != nil { + return []v1beta2.StatefulSet{}, err + } + if len(fulsets) > 0 { + _, ok := fulsets[0].(*v1beta2.StatefulSet) + if !ok { + return []v1beta2.StatefulSet{}, errors.New("Bad StatefulSet type found in cache") + } + nsFulsets := make([]v1beta2.StatefulSet, len(fulsets)) + for i, fulset := range fulsets { + nsFulsets[i] = *(fulset.(*v1beta2.StatefulSet)) + } + return nsFulsets, nil + } + return []v1beta2.StatefulSet{}, nil +} + +func (c *controllerImpl) GetService(namespace, name string) (*v1.Service, error) { + if err := c.checkStateAndRetry(); err != nil { + return nil, err + } + indexer := c.controllers["Service"].GetIndexer() + services, exist, err := indexer.GetByKey(namespace + "/" + name) + if err != nil { + return nil, err + } + if exist { + service, ok := services.(*v1.Service) + if !ok { + return nil, errors.New("Bad Service type found in cache") + } + return service, nil + } + return nil, NewNotFound(name, "core/v1", "Service") +} + +func (c *controllerImpl) GetServices(namespace string) ([]v1.Service, error) { + if err := c.checkStateAndRetry(); err != nil { + return []v1.Service{}, err + } + indexer := c.controllers["Service"].GetIndexer() + services, err := indexer.ByIndex("namespace", namespace) + if err != nil { + return []v1.Service{}, err + } + if len(services) > 0 { + _, ok := services[0].(*v1.Service) + if !ok { + return []v1.Service{}, errors.New("Bad Service type found in cache") + } + nsServices := make([]v1.Service, len(services)) + for i, service := range services { + nsServices[i] = *(service.(*v1.Service)) + } + return nsServices, nil + } + return []v1.Service{}, nil +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/client.go b/vendor/github.com/kiali/kiali/kubernetes/client.go new file mode 100644 index 000000000..c3fc929d1 --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/client.go @@ -0,0 +1,293 @@ +package kubernetes + +import ( + "errors" + "fmt" + "net" + "os" + "time" + + "k8s.io/api/apps/v1beta1" + "k8s.io/api/apps/v1beta2" + auth_v1 "k8s.io/api/authorization/v1" + batch_v1 "k8s.io/api/batch/v1" + batch_v1beta1 "k8s.io/api/batch/v1beta1" + v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + kube "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + kialiConfig "github.com/kiali/kiali/config" + "github.com/kiali/kiali/log" + + osappsv1 "github.com/openshift/api/apps/v1" + osv1 "github.com/openshift/api/project/v1" +) + +var ( + emptyListOptions = meta_v1.ListOptions{} + emptyGetOptions = meta_v1.GetOptions{} +) + +// IstioClientInterface for mocks (only mocked function are necessary here) +type IstioClientInterface interface { + CreateIstioObject(api, namespace, resourceType, json string) (IstioObject, error) + DeleteIstioObject(api, namespace, resourceType, name string) error + GetAdapter(namespace, adapterType, adapterName string) (IstioObject, error) + GetAdapters(namespace string) ([]IstioObject, error) + GetCronJobs(namespace string) ([]batch_v1beta1.CronJob, error) + GetDeployment(namespace string, deploymentName string) (*v1beta1.Deployment, error) + GetDeployments(namespace string) ([]v1beta1.Deployment, error) + GetDeploymentConfig(namespace string, deploymentconfigName string) (*osappsv1.DeploymentConfig, error) + GetDeploymentConfigs(namespace string) ([]osappsv1.DeploymentConfig, error) + GetDestinationRule(namespace string, destinationrule string) (IstioObject, error) + GetDestinationRules(namespace string, serviceName string) ([]IstioObject, error) + GetEndpoints(namespace string, serviceName string) (*v1.Endpoints, error) + GetGateway(namespace string, gateway string) (IstioObject, error) + GetGateways(namespace string) ([]IstioObject, error) + GetIstioDetails(namespace string, serviceName string) (*IstioDetails, error) + GetIstioRule(namespace string, istiorule string) (IstioObject, error) + GetIstioRules(namespace string) ([]IstioObject, error) + GetJobs(namespace string) ([]batch_v1.Job, error) + GetNamespace(namespace string) (*v1.Namespace, error) + GetNamespaces() ([]v1.Namespace, error) + GetPods(namespace, labelSelector string) ([]v1.Pod, error) + GetProject(project string) (*osv1.Project, error) + GetProjects() ([]osv1.Project, error) + GetQuotaSpec(namespace string, quotaSpecName string) (IstioObject, error) + GetQuotaSpecs(namespace string) ([]IstioObject, error) + GetQuotaSpecBinding(namespace string, quotaSpecBindingName string) (IstioObject, error) + GetQuotaSpecBindings(namespace string) ([]IstioObject, error) + GetReplicationControllers(namespace string) ([]v1.ReplicationController, error) + GetReplicaSets(namespace string) ([]v1beta2.ReplicaSet, error) + GetSelfSubjectAccessReview(namespace, api, resourceType string, verbs []string) ([]*auth_v1.SelfSubjectAccessReview, error) + GetService(namespace string, serviceName string) (*v1.Service, error) + GetServices(namespace string, selectorLabels map[string]string) ([]v1.Service, error) + GetServiceEntries(namespace string) ([]IstioObject, error) + GetServiceEntry(namespace string, serviceEntryName string) (IstioObject, error) + GetStatefulSet(namespace string, statefulsetName string) (*v1beta2.StatefulSet, error) + GetStatefulSets(namespace string) ([]v1beta2.StatefulSet, error) + GetTemplate(namespace, templateType, templateName string) (IstioObject, error) + GetTemplates(namespace string) ([]IstioObject, error) + GetPolicy(namespace string, policyName string) (IstioObject, error) + GetPolicies(namespace string) ([]IstioObject, error) + GetMeshPolicy(namespace string, policyName string) (IstioObject, error) + GetMeshPolicies(namespace string) ([]IstioObject, error) + GetClusterRbacConfig(namespace string, name string) (IstioObject, error) + GetClusterRbacConfigs(namespace string) ([]IstioObject, error) + GetServiceRole(namespace string, name string) (IstioObject, error) + GetServiceRoles(namespace string) ([]IstioObject, error) + GetServiceRoleBinding(namespace string, name string) (IstioObject, error) + GetServiceRoleBindings(namespace string) ([]IstioObject, error) + GetVirtualService(namespace string, virtualservice string) (IstioObject, error) + GetVirtualServices(namespace string, serviceName string) ([]IstioObject, error) + IsOpenShift() bool + Stop() + UpdateIstioObject(api, namespace, resourceType, name, jsonPatch string) (IstioObject, error) +} + +// IstioClient is the client struct for Kubernetes and Istio APIs +// It hides the way it queries each API +type IstioClient struct { + IstioClientInterface + k8s *kube.Clientset + istioConfigApi *rest.RESTClient + istioNetworkingApi *rest.RESTClient + istioAuthenticationApi *rest.RESTClient + istioRbacApi *rest.RESTClient + // isOpenShift private variable will check if kiali is deployed under an OpenShift cluster or not + // It is represented as a pointer to include the initialization phase. + // See kubernetes_service.go#IsOpenShift() for more details. + isOpenShift *bool + // Cache controller is a global cache for all k8s objects fetched by kiali in multiple namespaces. + // It doesn't support reduced permissions scenarios yet, don't forget to disabled on those use cases. + k8sCache cacheController + stopCache chan struct{} +} + +// GetK8sApi returns the clientset referencing all K8s rest clients +func (client *IstioClient) GetK8sApi() *kube.Clientset { + return client.k8s +} + +// GetIstioConfigApi returns the istio config rest client +func (client *IstioClient) GetIstioConfigApi() *rest.RESTClient { + return client.istioConfigApi +} + +// GetIstioNetworkingApi returns the istio config rest client +func (client *IstioClient) GetIstioNetworkingApi() *rest.RESTClient { + return client.istioNetworkingApi +} + +// GetIstioRbacApi returns the istio rbac rest client +func (client *IstioClient) GetIstioRbacApi() *rest.RESTClient { + return client.istioRbacApi +} + +// ConfigClient return a client with the correct configuration +// Returns configuration if Kiali is in Cluster when InCluster is true +// Returns configuration if Kiali is not int Cluster when InCluster is false +// It returns an error on any problem +func ConfigClient() (*rest.Config, error) { + if kialiConfig.Get().InCluster { + incluster, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + incluster.QPS = kialiConfig.Get().KubernetesConfig.QPS + incluster.Burst = kialiConfig.Get().KubernetesConfig.Burst + return incluster, nil + } + host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") + if len(host) == 0 || len(port) == 0 { + return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") + } + return &rest.Config{ + // TODO: switch to using cluster DNS. + Host: "http://" + net.JoinHostPort(host, port), + QPS: kialiConfig.Get().KubernetesConfig.QPS, + Burst: kialiConfig.Get().KubernetesConfig.Burst, + }, nil +} + +// NewClient creates a new client to the Kubernetes and Istio APIs. +func NewClient() (*IstioClient, error) { + config, err := ConfigClient() + + if err != nil { + return nil, err + } + + return NewClientFromConfig(config) +} + +// NewClientFromConfig creates a new client to the Kubernetes and Istio APIs. +// It takes the assumption that Istio is deployed into the cluster. +// It hides the access to Kubernetes/Openshift credentials. +// It hides the low level use of the API of Kubernetes and Istio, it should be considered as an implementation detail. +// It returns an error on any problem. +func NewClientFromConfig(config *rest.Config) (*IstioClient, error) { + client := IstioClient{} + log.Debugf("Rest perf config QPS: %f Burst: %d", config.QPS, config.Burst) + + k8s, err := kube.NewForConfig(config) + if err != nil { + return nil, err + } + client.k8s = k8s + + // Init client cache + // Note that cache will work only in full permissions scenarios (similar permissions as mixer/istio-telemetry component) + kialiK8sCfg := kialiConfig.Get().KubernetesConfig + if client.k8sCache == nil && kialiK8sCfg.CacheEnabled { + log.Infof("Kiali K8S Cache enabled") + client.stopCache = make(chan struct{}) + client.k8sCache = newCacheController(client.k8s, time.Duration(kialiConfig.Get().KubernetesConfig.CacheDuration)) + client.k8sCache.Start() + if !client.k8sCache.WaitForSync() { + return nil, errors.New("Cache cannot connect with the k8s API on host: " + config.Host) + } + } + + // Istio is a CRD extension of Kubernetes API, so any custom type should be registered here. + // KnownTypes registers the Istio objects we use, as soon as we get more info we will increase the number of types. + types := runtime.NewScheme() + schemeBuilder := runtime.NewSchemeBuilder( + func(scheme *runtime.Scheme) error { + // Register networking types + for _, nt := range networkingTypes { + scheme.AddKnownTypeWithName(NetworkingGroupVersion.WithKind(nt.objectKind), &GenericIstioObject{}) + scheme.AddKnownTypeWithName(NetworkingGroupVersion.WithKind(nt.collectionKind), &GenericIstioObjectList{}) + } + // Register config types + for _, cf := range configTypes { + scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(cf.objectKind), &GenericIstioObject{}) + scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(cf.collectionKind), &GenericIstioObjectList{}) + } + // Register adapter types + for _, ad := range adapterTypes { + scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(ad.objectKind), &GenericIstioObject{}) + scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(ad.collectionKind), &GenericIstioObjectList{}) + } + // Register template types + for _, tp := range templateTypes { + scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(tp.objectKind), &GenericIstioObject{}) + scheme.AddKnownTypeWithName(ConfigGroupVersion.WithKind(tp.collectionKind), &GenericIstioObjectList{}) + } + // Register authentication types + for _, at := range authenticationTypes { + scheme.AddKnownTypeWithName(AuthenticationGroupVersion.WithKind(at.objectKind), &GenericIstioObject{}) + scheme.AddKnownTypeWithName(AuthenticationGroupVersion.WithKind(at.collectionKind), &GenericIstioObjectList{}) + } + // Register rbac types + for _, rt := range rbacTypes { + scheme.AddKnownTypeWithName(RbacGroupVersion.WithKind(rt.objectKind), &GenericIstioObject{}) + scheme.AddKnownTypeWithName(RbacGroupVersion.WithKind(rt.collectionKind), &GenericIstioObjectList{}) + + } + meta_v1.AddToGroupVersion(scheme, ConfigGroupVersion) + meta_v1.AddToGroupVersion(scheme, NetworkingGroupVersion) + meta_v1.AddToGroupVersion(scheme, AuthenticationGroupVersion) + meta_v1.AddToGroupVersion(scheme, RbacGroupVersion) + return nil + }) + + err = schemeBuilder.AddToScheme(types) + if err != nil { + return nil, err + } + + // Istio needs another type as it queries a different K8S API. + istioConfigAPI, err := newClientForAPI(config, ConfigGroupVersion, types) + if err != nil { + return nil, err + } + + istioNetworkingAPI, err := newClientForAPI(config, NetworkingGroupVersion, types) + if err != nil { + return nil, err + } + + istioAuthenticationAPI, err := newClientForAPI(config, AuthenticationGroupVersion, types) + if err != nil { + return nil, err + } + + istioRbacApi, err := newClientForAPI(config, RbacGroupVersion, types) + if err != nil { + return nil, err + } + + client.istioConfigApi = istioConfigAPI + client.istioNetworkingApi = istioNetworkingAPI + client.istioAuthenticationApi = istioAuthenticationAPI + client.istioRbacApi = istioRbacApi + return &client, nil +} + +func newClientForAPI(fromCfg *rest.Config, groupVersion schema.GroupVersion, scheme *runtime.Scheme) (*rest.RESTClient, error) { + cfg := rest.Config{ + Host: fromCfg.Host, + APIPath: "/apis", + ContentConfig: rest.ContentConfig{ + GroupVersion: &groupVersion, + NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)}, + ContentType: runtime.ContentTypeJSON, + }, + BearerToken: fromCfg.BearerToken, + TLSClientConfig: fromCfg.TLSClientConfig, + QPS: fromCfg.QPS, + Burst: fromCfg.Burst, + } + return rest.RESTClientFor(&cfg) +} + +func (in *IstioClient) Stop() { + if in.k8sCache != nil { + in.k8sCache.Stop() + } +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/filters.go b/vendor/github.com/kiali/kiali/kubernetes/filters.go new file mode 100644 index 000000000..2d711cc87 --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/filters.go @@ -0,0 +1,70 @@ +package kubernetes + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// FilterPodsForService returns a subpart of pod list filtered according service selector +func FilterPodsForService(s *v1.Service, allPods []v1.Pod) []v1.Pod { + if s == nil || allPods == nil { + return nil + } + serviceSelector := labels.Set(s.Spec.Selector).AsSelector() + pods := FilterPodsForSelector(serviceSelector, allPods) + + return pods +} + +func FilterPodsForSelector(selector labels.Selector, allPods []v1.Pod) []v1.Pod { + var pods []v1.Pod + for _, pod := range allPods { + if selector.Matches(labels.Set(pod.ObjectMeta.Labels)) { + pods = append(pods, pod) + } + } + return pods +} + +// FilterPodsForEndpoints performs a second pass was selector may return too many data +// This case happens when a "nil" selector (such as one of default/kubernetes service) is used +func FilterPodsForEndpoints(endpoints *v1.Endpoints, unfiltered []v1.Pod) []v1.Pod { + endpointPods := make(map[string]bool) + for _, subset := range endpoints.Subsets { + for _, address := range subset.Addresses { + if address.TargetRef != nil && address.TargetRef.Kind == "Pod" { + endpointPods[address.TargetRef.Name] = true + } + } + } + var pods []v1.Pod + for _, pod := range unfiltered { + if _, ok := endpointPods[pod.Name]; ok { + pods = append(pods, pod) + } + } + return pods +} + +func FilterPodsForController(controllerName string, controllerType string, allPods []v1.Pod) []v1.Pod { + var pods []v1.Pod + for _, pod := range allPods { + for _, ref := range pod.OwnerReferences { + if ref.Controller != nil && *ref.Controller && ref.Name == controllerName && ref.Kind == controllerType { + pods = append(pods, pod) + break + } + } + } + return pods +} + +func FilterServicesForSelector(selector labels.Selector, allServices []v1.Service) []v1.Service { + var services []v1.Service + for _, svc := range allServices { + if selector.Matches(labels.Set(svc.Spec.Selector)) { + services = append(services, svc) + } + } + return services +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/istio_details_service.go b/vendor/github.com/kiali/kiali/kubernetes/istio_details_service.go new file mode 100644 index 000000000..e08e1a0a2 --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/istio_details_service.go @@ -0,0 +1,728 @@ +package kubernetes + +import ( + "fmt" + "regexp" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/log" +) + +var portNameMatcher = regexp.MustCompile("^[\\-].*") + +// GetIstioDetails returns Istio details for a given namespace, +// on this version it collects the VirtualServices and DestinationRules defined for a namespace. +// If serviceName param is provided, it filters all the Istio objects pointing to a particular service. +// It returns an error on any problem. +func (in *IstioClient) GetIstioDetails(namespace string, serviceName string) (*IstioDetails, error) { + + wg := sync.WaitGroup{} + errChan := make(chan error, 4) + + istioDetails := IstioDetails{} + vss := make([]IstioObject, 0) + drs := make([]IstioObject, 0) + gws := make([]IstioObject, 0) + ses := make([]IstioObject, 0) + + wg.Add(4) + go fetchNoEntry(&ses, namespace, in.GetServiceEntries, &wg, errChan) + go fetchNoEntry(&gws, namespace, in.GetGateways, &wg, errChan) + go fetch(&vss, namespace, serviceName, in.GetVirtualServices, &wg, errChan) + go fetch(&drs, namespace, serviceName, in.GetDestinationRules, &wg, errChan) + wg.Wait() + + if len(errChan) != 0 { + // We return first error only, likely to be the same issue for all + err := <-errChan + return nil, err + } + + istioDetails.VirtualServices = vss + istioDetails.DestinationRules = drs + istioDetails.Gateways = gws + istioDetails.ServiceEntries = ses + + return &istioDetails, nil +} + +// CreateIstioObject creates an Istio object +func (in *IstioClient) CreateIstioObject(api, namespace, resourceType, json string) (IstioObject, error) { + var result runtime.Object + var err error + + byteJson := []byte(json) + if api == ConfigGroupVersion.Group { + result, err = in.istioConfigApi.Post().Namespace(namespace).Resource(resourceType).Body(byteJson).Do().Get() + } else if api == NetworkingGroupVersion.Group { + result, err = in.istioNetworkingApi.Post().Namespace(namespace).Resource(resourceType).Body(byteJson).Do().Get() + } else if api == AuthenticationGroupVersion.Group { + result, err = in.istioAuthenticationApi.Post().Namespace(namespace).Resource(resourceType).Body(byteJson).Do().Get() + } else { + result, err = in.istioRbacApi.Post().Namespace(namespace).Resource(resourceType).Body(byteJson).Do().Get() + } + + if err != nil { + return nil, err + } + + istioObject, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return an IstioObject object", namespace, resourceType) + } + return istioObject, err +} + +// DeleteIstioObject deletes an Istio object from either config api or networking api +func (in *IstioClient) DeleteIstioObject(api, namespace, resourceType, name string) error { + log.Debugf("DeleteIstioObject input: %s / %s / %s / %s", api, namespace, resourceType, name) + var err error + if api == ConfigGroupVersion.Group { + _, err = in.istioConfigApi.Delete().Namespace(namespace).Resource(resourceType).Name(name).Do().Get() + } else if api == NetworkingGroupVersion.Group { + _, err = in.istioNetworkingApi.Delete().Namespace(namespace).Resource(resourceType).Name(name).Do().Get() + } else if api == AuthenticationGroupVersion.Group { + _, err = in.istioAuthenticationApi.Delete().Namespace(namespace).Resource(resourceType).Name(name).Do().Get() + } else { + _, err = in.istioRbacApi.Delete().Namespace(namespace).Resource(resourceType).Name(name).Do().Get() + } + return err +} + +// UpdateIstioObject updates an Istio object from either config api or networking api +func (in *IstioClient) UpdateIstioObject(api, namespace, resourceType, name, jsonPatch string) (IstioObject, error) { + log.Debugf("UpdateIstioObject input: %s / %s / %s / %s", api, namespace, resourceType, name) + var result runtime.Object + var err error + bytePatch := []byte(jsonPatch) + if api == ConfigGroupVersion.Group { + result, err = in.istioConfigApi.Patch(types.MergePatchType).Namespace(namespace).Resource(resourceType).SubResource(name).Body(bytePatch).Do().Get() + } else if api == NetworkingGroupVersion.Group { + result, err = in.istioNetworkingApi.Patch(types.MergePatchType).Namespace(namespace).Resource(resourceType).SubResource(name).Body(bytePatch).Do().Get() + } else if api == AuthenticationGroupVersion.Group { + result, err = in.istioAuthenticationApi.Patch(types.MergePatchType).Namespace(namespace).Resource(resourceType).SubResource(name).Body(bytePatch).Do().Get() + } else { + result, err = in.istioRbacApi.Patch(types.MergePatchType).Namespace(namespace).Resource(resourceType).SubResource(name).Body(bytePatch).Do().Get() + } + if err != nil { + return nil, err + } + istioObject, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return an IstioObject object", namespace, name) + } + return istioObject, err +} + +// GetVirtualServices return all VirtualServices for a given namespace. +// If serviceName param is provided it will filter all VirtualServices having a host defined on a particular service. +// It returns an error on any problem. +func (in *IstioClient) GetVirtualServices(namespace string, serviceName string) ([]IstioObject, error) { + result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(virtualServices).Do().Get() + if err != nil { + return nil, err + } + virtualServiceList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return a VirtualService list", namespace, serviceName) + } + + virtualServices := make([]IstioObject, 0) + for _, virtualService := range virtualServiceList.GetItems() { + appendVirtualService := serviceName == "" + routeProtocols := []string{"http", "tcp"} + if !appendVirtualService && FilterByRoute(virtualService.GetSpec(), routeProtocols, serviceName, namespace, nil) { + appendVirtualService = true + } + if appendVirtualService { + virtualServices = append(virtualServices, virtualService.DeepCopyIstioObject()) + } + } + return virtualServices, nil +} + +func (in *IstioClient) GetVirtualService(namespace string, virtualservice string) (IstioObject, error) { + result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(virtualServices).SubResource(virtualservice).Do().Get() + if err != nil { + return nil, err + } + + virtualService, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return a VirtualService object", namespace, virtualservice) + } + return virtualService.DeepCopyIstioObject(), nil +} + +// GetGateways return all Gateways for a given namespace. +// It returns an error on any problem. +func (in *IstioClient) GetGateways(namespace string) ([]IstioObject, error) { + result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(gateways).Do().Get() + if err != nil { + return nil, err + } + gatewayList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s doesn't return a Gateway list", namespace) + } + + gateways := make([]IstioObject, 0) + for _, gateway := range gatewayList.GetItems() { + gateways = append(gateways, gateway.DeepCopyIstioObject()) + } + return gateways, nil +} + +func (in *IstioClient) GetGateway(namespace string, gateway string) (IstioObject, error) { + result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(gateways).SubResource(gateway).Do().Get() + if err != nil { + return nil, err + } + + gatewayObject, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return a Gateway object", namespace, gateway) + } + return gatewayObject.DeepCopyIstioObject(), nil +} + +// GetServiceEntries return all ServiceEntry objects for a given namespace. +// It returns an error on any problem. +func (in *IstioClient) GetServiceEntries(namespace string) ([]IstioObject, error) { + result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(serviceentries).Do().Get() + if err != nil { + return nil, err + } + serviceEntriesList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s doesn't return a ServiceEntry list", namespace) + } + + serviceEntries := make([]IstioObject, 0) + for _, serviceEntry := range serviceEntriesList.GetItems() { + serviceEntries = append(serviceEntries, serviceEntry.DeepCopyIstioObject()) + } + return serviceEntries, nil +} + +func (in *IstioClient) GetServiceEntry(namespace string, serviceEntryName string) (IstioObject, error) { + result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(serviceentries).SubResource(serviceEntryName).Do().Get() + if err != nil { + return nil, err + } + + serviceEntry, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s/%v doesn't return a ServiceEntry object", namespace, serviceEntry) + } + return serviceEntry.DeepCopyIstioObject(), nil +} + +// GetDestinationRules returns all DestinationRules for a given namespace. +// If serviceName param is provided it will filter all DestinationRules having a host defined on a particular service. +// It returns an error on any problem. +func (in *IstioClient) GetDestinationRules(namespace string, serviceName string) ([]IstioObject, error) { + result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(destinationRules).Do().Get() + if err != nil { + return nil, err + } + destinationRuleList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return a DestinationRule list", namespace, serviceName) + } + + destinationRules := make([]IstioObject, 0) + for _, destinationRule := range destinationRuleList.Items { + appendDestinationRule := serviceName == "" + if host, ok := destinationRule.Spec["host"]; ok { + if dHost, ok := host.(string); ok && FilterByHost(dHost, serviceName, namespace) { + appendDestinationRule = true + } + } + if appendDestinationRule { + destinationRules = append(destinationRules, destinationRule.DeepCopyIstioObject()) + } + } + return destinationRules, nil +} + +func (in *IstioClient) GetDestinationRule(namespace string, destinationrule string) (IstioObject, error) { + result, err := in.istioNetworkingApi.Get().Namespace(namespace).Resource(destinationRules).SubResource(destinationrule).Do().Get() + if err != nil { + return nil, err + } + destinationRule, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return a DestinationRule object", namespace, destinationrule) + } + return destinationRule.DeepCopyIstioObject(), nil +} + +// GetQuotaSpecs returns all QuotaSpecs objects for a given namespace. +// It returns an error on any problem. +func (in *IstioClient) GetQuotaSpecs(namespace string) ([]IstioObject, error) { + result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(quotaspecs).Do().Get() + if err != nil { + return nil, err + } + quotaSpecList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s doesn't return a QuotaSpecList list", namespace) + } + + quotaSpecs := make([]IstioObject, 0) + for _, qs := range quotaSpecList.GetItems() { + quotaSpecs = append(quotaSpecs, qs.DeepCopyIstioObject()) + } + return quotaSpecs, nil +} + +func (in *IstioClient) GetQuotaSpec(namespace string, quotaSpecName string) (IstioObject, error) { + result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(quotaspecs).SubResource(quotaSpecName).Do().Get() + if err != nil { + return nil, err + } + + quotaSpec, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return a QuotaSpec object", namespace, quotaSpecName) + } + return quotaSpec.DeepCopyIstioObject(), nil +} + +// GetQuotaSpecBindings returns all QuotaSpecBindings objects for a given namespace. +// It returns an error on any problem. +func (in *IstioClient) GetQuotaSpecBindings(namespace string) ([]IstioObject, error) { + result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(quotaspecbindings).Do().Get() + if err != nil { + return nil, err + } + quotaSpecBindingList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s doesn't return a QuotaSpecBindingList list", namespace) + } + + quotaSpecBindings := make([]IstioObject, 0) + for _, qs := range quotaSpecBindingList.GetItems() { + quotaSpecBindings = append(quotaSpecBindings, qs.DeepCopyIstioObject()) + } + return quotaSpecBindings, nil +} + +func (in *IstioClient) GetQuotaSpecBinding(namespace string, quotaSpecBindingName string) (IstioObject, error) { + result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(quotaspecbindings).SubResource(quotaSpecBindingName).Do().Get() + if err != nil { + return nil, err + } + + quotaSpecBinding, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return a QuotaSpecBinding object", namespace, quotaSpecBindingName) + } + return quotaSpecBinding.DeepCopyIstioObject(), nil +} + +func (in *IstioClient) GetPolicies(namespace string) ([]IstioObject, error) { + result, err := in.istioAuthenticationApi.Get().Namespace(namespace).Resource(policies).Do().Get() + if err != nil { + return nil, err + } + + policyList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s doesn't return a PolicyList list", namespace) + } + + policies := make([]IstioObject, 0) + for _, ps := range policyList.GetItems() { + policies = append(policies, ps.DeepCopyIstioObject()) + } + + return policies, nil +} + +func (in *IstioClient) GetPolicy(namespace string, policyName string) (IstioObject, error) { + result, err := in.istioAuthenticationApi.Get().Namespace(namespace).Resource(policies).SubResource(policyName).Do().Get() + if err != nil { + return nil, err + } + + policy, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s doesn't return a Policy object", namespace) + } + + return policy.DeepCopyIstioObject(), nil +} + +func (in *IstioClient) GetMeshPolicies(namespace string) ([]IstioObject, error) { + // MeshPolicies are not namespaced. However, API returns all the instances even asking for one specific namespace. + // Due to soft-multitenancy, the call performed is namespaced to avoid triggering an error for cluster-wide access. + result, err := in.istioAuthenticationApi.Get().Namespace(namespace).Resource(meshPolicies).Do().Get() + if err != nil { + return nil, err + } + + policyList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("it doesn't return a PolicyList list") + } + + policies := make([]IstioObject, 0) + for _, ps := range policyList.GetItems() { + policies = append(policies, ps.DeepCopyIstioObject()) + } + + return policies, nil +} + +func (in *IstioClient) GetMeshPolicy(namespace string, policyName string) (IstioObject, error) { + result, err := in.istioAuthenticationApi.Get().Namespace(namespace).Resource(meshPolicies).SubResource(policyName).Do().Get() + if err != nil { + return nil, err + } + + mp, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s doesn't return a MeshPolicy object", namespace) + } + + return mp.DeepCopyIstioObject(), nil +} + +func (in *IstioClient) GetClusterRbacConfigs(namespace string) ([]IstioObject, error) { + result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(clusterrbacconfigs).Do().Get() + if err != nil { + return nil, err + } + + clusterRbacConfigList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s doesn't return a RbacConfigList list", namespace) + } + + clusterRbacConfigs := make([]IstioObject, 0) + for _, crc := range clusterRbacConfigList.GetItems() { + clusterRbacConfigs = append(clusterRbacConfigs, crc.DeepCopyIstioObject()) + } + + return clusterRbacConfigs, nil +} + +func (in *IstioClient) GetClusterRbacConfig(namespace string, name string) (IstioObject, error) { + result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(clusterrbacconfigs).SubResource(name).Do().Get() + if err != nil { + return nil, err + } + + clusterRbacConfig, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s doesn't return a ClusterRbacConfig object", namespace) + } + + return clusterRbacConfig.DeepCopyIstioObject(), nil +} + +func (in *IstioClient) GetServiceRoles(namespace string) ([]IstioObject, error) { + result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(serviceroles).Do().Get() + if err != nil { + return nil, err + } + + serviceRoleList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s doesn't return a ServiceRoleList list", namespace) + } + + serviceRoles := make([]IstioObject, 0) + for _, sr := range serviceRoleList.GetItems() { + serviceRoles = append(serviceRoles, sr.DeepCopyIstioObject()) + } + + return serviceRoles, nil +} + +func (in *IstioClient) GetServiceRole(namespace string, name string) (IstioObject, error) { + result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(serviceroles).SubResource(name).Do().Get() + if err != nil { + return nil, err + } + + serviceRole, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s doesn't return a ServiceRole object", namespace) + } + + return serviceRole.DeepCopyIstioObject(), nil +} + +func (in *IstioClient) GetServiceRoleBindings(namespace string) ([]IstioObject, error) { + result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(servicerolebindings).Do().Get() + if err != nil { + return nil, err + } + + serviceRoleBindingList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s doesn't return a ServiceRoleBindingList list", namespace) + } + + serviceRoleBindings := make([]IstioObject, 0) + for _, sr := range serviceRoleBindingList.GetItems() { + serviceRoleBindings = append(serviceRoleBindings, sr.DeepCopyIstioObject()) + } + + return serviceRoleBindings, nil +} + +func (in *IstioClient) GetServiceRoleBinding(namespace string, name string) (IstioObject, error) { + result, err := in.istioRbacApi.Get().Namespace(namespace).Resource(servicerolebindings).SubResource(name).Do().Get() + if err != nil { + return nil, err + } + + serviceRoleBinding, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s doesn't return a ServiceRoleBinding object", namespace) + } + + return serviceRoleBinding.DeepCopyIstioObject(), nil +} + +func FilterByHost(host, serviceName, namespace string) bool { + // Check single name + if host == serviceName { + return true + } + // Check service.namespace + if host == fmt.Sprintf("%s.%s", serviceName, namespace) { + return true + } + // Check the FQDN. ..svc + if host == fmt.Sprintf("%s.%s.%s", serviceName, namespace, "svc") { + return true + } + + // Check the FQDN. ..svc. + if host == fmt.Sprintf("%s.%s.%s", serviceName, namespace, config.Get().ExternalServices.Istio.IstioIdentityDomain) { + return true + } + + // Note, FQDN names are defined from Kubernetes registry specification [1] + // [1] https://github.com/kubernetes/dns/blob/master/docs/specification.md + + return false +} + +func FilterByRoute(spec map[string]interface{}, protocols []string, service string, namespace string, serviceEntries map[string]struct{}) bool { + if len(protocols) == 0 { + return false + } + for _, protocol := range protocols { + if prot, ok := spec[protocol]; ok { + if aHttp, ok := prot.([]interface{}); ok { + for _, httpRoute := range aHttp { + if mHttpRoute, ok := httpRoute.(map[string]interface{}); ok { + if route, ok := mHttpRoute["route"]; ok { + if aDestinationWeight, ok := route.([]interface{}); ok { + for _, destination := range aDestinationWeight { + if mDestination, ok := destination.(map[string]interface{}); ok { + if destinationW, ok := mDestination["destination"]; ok { + if mDestinationW, ok := destinationW.(map[string]interface{}); ok { + if host, ok := mDestinationW["host"]; ok { + if sHost, ok := host.(string); ok { + if FilterByHost(sHost, service, namespace) { + return true + } + if serviceEntries != nil { + // We have ServiceEntry to check + if _, found := serviceEntries[strings.ToLower(protocol)+sHost]; found { + return true + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + return false +} + +// ServiceEntryHostnames returns a list of hostnames defined in the ServiceEntries Specs. Key in the resulting map is the protocol (in lowercase) + hostname +// exported for test +func ServiceEntryHostnames(serviceEntries []IstioObject) map[string][]string { + hostnames := make(map[string][]string) + + for _, v := range serviceEntries { + if hostsSpec, found := v.GetSpec()["hosts"]; found { + if hosts, ok := hostsSpec.([]interface{}); ok { + // Seek the protocol + for _, h := range hosts { + if hostname, ok := h.(string); ok { + hostnames[hostname] = make([]string, 0, 1) + } + } + } + } + if portsSpec, found := v.GetSpec()["ports"]; found { + if portsArray, ok := portsSpec.([]interface{}); ok { + for _, portDef := range portsArray { + if ports, ok := portDef.(map[string]interface{}); ok { + if proto, found := ports["protocol"]; found { + if protocol, ok := proto.(string); ok { + protocol = mapPortToVirtualServiceProtocol(protocol) + for host := range hostnames { + hostnames[host] = append(hostnames[host], protocol) + } + } + } + } + } + } + } + } + + return hostnames +} + +// mapPortToVirtualServiceProtocol transforms Istio's Port-definitions' protocol names to VirtualService's protocol names +func mapPortToVirtualServiceProtocol(proto string) string { + // http: HTTP/HTTP2/GRPC/ TLS-terminated-HTTPS and service entry ports using HTTP/HTTP2/GRPC protocol + // tls: HTTPS/TLS protocols (i.e. with “passthrough” TLS mode) and service entry ports using HTTPS/TLS protocols. + // tcp: everything else + + switch proto { + case "HTTP": + fallthrough + case "HTTP2": + fallthrough + case "GRPC": + return "http" + case "HTTPS": + fallthrough + case "TLS": + return "tls" + default: + return "tcp" + } +} + +// ValidaPort parses the Istio Port definition and validates the naming scheme +func ValidatePort(portDef interface{}) bool { + return matchPortNameRule(parsePort(portDef)) +} + +func parsePort(portDef interface{}) (string, string) { + var name, proto string + if port, ok := portDef.(map[string]interface{}); ok { + if portNameDef, found := port["name"]; found { + if portName, ok := portNameDef.(string); ok { + name = portName + } + } + if protocolDef, found := port["protocol"]; found { + if protocol, ok := protocolDef.(string); ok { + proto = protocol + } + } + } + + return name, proto +} + +func matchPortNameRule(portName, protocol string) bool { + protocol = strings.ToLower(protocol) + // Check that portName begins with the protocol + + if protocol == "tcp" || protocol == "udp" { + // TCP and UDP protocols do not care about the name + return true + } + + if !strings.HasPrefix(portName, protocol) { + return false + } + + // If longer than protocol, then it must adhere to [-suffix] + // and if there's -, then there must be a suffix .. + if len(portName) > len(protocol) { + restPortName := portName[len(protocol):] + return portNameMatcher.MatchString(restPortName) + } + + // Case portName == protocolName + return true +} + +// GatewayNames extracts the gateway names for easier matching +func GatewayNames(gateways [][]IstioObject) map[string]struct{} { + var empty struct{} + names := make(map[string]struct{}) + for _, ns := range gateways { + for _, gw := range ns { + gw := gw + clusterName := gw.GetObjectMeta().ClusterName + if clusterName == "" { + clusterName = config.Get().ExternalServices.Istio.IstioIdentityDomain + } + names[ParseHost(gw.GetObjectMeta().Name, gw.GetObjectMeta().Namespace, clusterName).String()] = empty + } + } + return names +} + +// ValidateVirtualServiceGateways checks all VirtualService gateways (except mesh, which is reserved word) and checks that they're found from the given list of gatewayNames. Also return index of missing gatways to show clearer error path in editor +func ValidateVirtualServiceGateways(spec map[string]interface{}, gatewayNames map[string]struct{}, namespace, clusterName string) (bool, int) { + if gatewaysSpec, found := spec["gateways"]; found { + if gateways, ok := gatewaysSpec.([]interface{}); ok { + for index, g := range gateways { + if gate, ok := g.(string); ok { + if gate == "mesh" { + return true, -1 + } + hostname := ParseHost(gate, namespace, clusterName).String() + for gw := range gatewayNames { + if found := FilterByHost(hostname, gw, namespace); found { + return true, -1 + } + } + return false, index + } + } + } + } + // No gateways defined or all found. Return -1 indicates no missing gateway + return true, -1 +} + +func fetch(rValue *[]IstioObject, namespace string, service string, fetcher func(string, string) ([]IstioObject, error), wg *sync.WaitGroup, errChan chan error) { + defer wg.Done() + fetched, err := fetcher(namespace, service) + *rValue = append(*rValue, fetched...) + if err != nil { + errChan <- err + } +} + +// Identical to above, but since k8s layer has both (namespace, serviceentry) and (namespace) queries, we need two different functions +func fetchNoEntry(rValue *[]IstioObject, namespace string, fetcher func(string) ([]IstioObject, error), wg *sync.WaitGroup, errChan chan error) { + defer wg.Done() + fetched, err := fetcher(namespace) + *rValue = append(*rValue, fetched...) + if err != nil && len(errChan) == 0 { + errChan <- err + } +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/istio_rules_service.go b/vendor/github.com/kiali/kiali/kubernetes/istio_rules_service.go new file mode 100644 index 000000000..1b83ae920 --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/istio_rules_service.go @@ -0,0 +1,137 @@ +package kubernetes + +import ( + "fmt" + "github.com/kiali/kiali/log" +) + +// GetIstioRules returns a list of mixer rules for a given namespace. +func (in *IstioClient) GetIstioRules(namespace string) ([]IstioObject, error) { + result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(rules).Do().Get() + if err != nil { + return nil, err + } + ruleList, ok := result.(*GenericIstioObjectList) + if !ok { + return nil, fmt.Errorf("%s doesn't return a rules list", namespace) + } + + istioRules := make([]IstioObject, 0) + for _, rule := range ruleList.Items { + istioRules = append(istioRules, rule.DeepCopyIstioObject()) + } + return istioRules, nil +} + +func (in *IstioClient) GetAdapters(namespace string) ([]IstioObject, error) { + return in.getAdaptersTemplates(namespace, "adapter", adapterPlurals) +} + +func (in *IstioClient) GetTemplates(namespace string) ([]IstioObject, error) { + return in.getAdaptersTemplates(namespace, "template", templatePlurals) +} + +func (in *IstioClient) GetIstioRule(namespace string, istiorule string) (IstioObject, error) { + result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(rules).SubResource(istiorule).Do().Get() + if err != nil { + return nil, err + } + mRule, ok := result.(*GenericIstioObject) + if !ok { + return nil, fmt.Errorf("%s/%s doesn't return a Rule", namespace, istiorule) + } + return mRule.DeepCopyIstioObject(), nil +} + +func (in *IstioClient) GetAdapter(namespace, adapterType, adapterName string) (IstioObject, error) { + return in.getAdapterTemplate(namespace, "adapter", adapterType, adapterName, adapterPlurals) +} + +func (in *IstioClient) GetTemplate(namespace, templateType, templateName string) (IstioObject, error) { + return in.getAdapterTemplate(namespace, "template", templateType, templateName, templatePlurals) +} + +func (in *IstioClient) getAdaptersTemplates(namespace string, itemType string, pluralsMap map[string]string) ([]IstioObject, error) { + resultsChan := make(chan istioResponse) + for name, plural := range pluralsMap { + go func(name, plural string) { + results, err := in.istioConfigApi.Get().Namespace(namespace).Resource(plural).Do().Get() + istioObjects := istioResponse{} + resultList, ok := results.(*GenericIstioObjectList) + if !ok { + err = fmt.Errorf("%s doesn't return a %s list", namespace, plural) + } + if err == nil { + istioObjects.results = make([]IstioObject, 0) + for _, result := range resultList.Items { + adapter := result.DeepCopyIstioObject() + // We need to specifically add the adapter name in the label + if adapter.GetObjectMeta().Labels == nil { + objectMeta := adapter.GetObjectMeta() + objectMeta.Labels = make(map[string]string) + adapter.SetObjectMeta(objectMeta) + } + adapter.GetObjectMeta().Labels[itemType] = name + // To support plural, we have only adapter/template -> adapters/templates + adapter.GetObjectMeta().Labels[itemType] = name + adapter.GetObjectMeta().Labels[itemType+"s"] = plural + istioObjects.results = append(istioObjects.results, adapter) + istioObjects.err = nil + } + } else { + istioObjects.results = nil + istioObjects.err = err + } + resultsChan <- istioObjects + }(name, plural) + } + + results := make([]IstioObject, 0) + for i := 0; i < len(pluralsMap); i++ { + adapterTemplate := <-resultsChan + if adapterTemplate.err == nil { + for _, istioObject := range adapterTemplate.results { + results = append(results, istioObject) + } + } else { + log.Warningf("Querying %s got an error: %s", itemType, adapterTemplate.err) + } + } + return results, nil +} + +func (in *IstioClient) getAdapterTemplate(namespace string, itemType string, itemSubtype, itemName string, pluralsMap map[string]string) (IstioObject, error) { + ok := false + subtype := "" + for key, plural := range pluralsMap { + if itemSubtype == plural { + subtype = key + ok = true + break + } + } + if !ok { + return nil, fmt.Errorf("%s is not supported", itemSubtype) + } + + result, err := in.istioConfigApi.Get().Namespace(namespace).Resource(itemSubtype).SubResource(itemName).Do().Get() + istioObject, ok := result.(IstioObject) + if !ok { + istioObject = nil + if err == nil { + err = fmt.Errorf("%s/%s doesn't return a valid IstioObject", itemSubtype, itemName) + } + } + if err != nil { + return nil, err + } + if istioObject.GetObjectMeta().Labels == nil { + objectMeta := istioObject.GetObjectMeta() + objectMeta.Labels = make(map[string]string) + istioObject.SetObjectMeta(objectMeta) + } + // Adding the singular name of the adapter/template to propagate it into the Kiali model + istioObject.GetObjectMeta().Labels[itemType] = subtype + istioObject.GetObjectMeta().Labels[itemType+"s"] = itemSubtype + return istioObject, nil +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/kiali_monitoring_client.go b/vendor/github.com/kiali/kiali/kubernetes/kiali_monitoring_client.go new file mode 100644 index 000000000..b5360fa24 --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/kiali_monitoring_client.go @@ -0,0 +1,55 @@ +package kubernetes + +import ( + "encoding/json" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" +) + +// KialiMonitoringInterface for mocks (only mocked function are necessary here) +type KialiMonitoringInterface interface { + GetDashboard(namespace string, name string) (*MonitoringDashboard, error) +} + +// KialiMonitoringClient is the client struct for Kiali Monitoring API over Kubernetes +// API to get MonitoringDashboards +type KialiMonitoringClient struct { + KialiMonitoringInterface + client *rest.RESTClient +} + +// NewKialiMonitoringClient creates a new client able to fetch Kiali Monitoring API. +func NewKialiMonitoringClient() (*KialiMonitoringClient, error) { + config, err := ConfigClient() + if err != nil { + return nil, err + } + + types := runtime.NewScheme() + schemeBuilder := runtime.NewSchemeBuilder( + func(scheme *runtime.Scheme) error { + return nil + }) + + err = schemeBuilder.AddToScheme(types) + if err != nil { + return nil, err + } + + client, err := newClientForAPI(config, kialiMonitoringGroupVersion, types) + return &KialiMonitoringClient{ + client: client, + }, err +} + +// GetDashboard returns a MonitoringDashboard for the given name +func (in *KialiMonitoringClient) GetDashboard(namespace string, name string) (*MonitoringDashboard, error) { + result, err := in.client.Get().Namespace(namespace).Resource("monitoringdashboards").SubResource(name).Do().Raw() + if err != nil { + return nil, err + } + var dashboard MonitoringDashboard + err = json.Unmarshal(result, &dashboard) + return &dashboard, err +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/kiali_monitoring_types.go b/vendor/github.com/kiali/kiali/kubernetes/kiali_monitoring_types.go new file mode 100644 index 000000000..5d724aacc --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/kiali_monitoring_types.go @@ -0,0 +1,44 @@ +package kubernetes + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // Raw constant for DataType + Raw = "raw" + // Rate constant for DataType + Rate = "rate" + // Histogram constant for DataType + Histogram = "histogram" +) + +var kialiMonitoringGroupVersion = schema.GroupVersion{ + Group: "monitoring.kiali.io", + Version: "v1alpha1", +} + +type MonitoringDashboard struct { + Metadata map[string]interface{} + Spec MonitoringDashboardSpec +} + +type MonitoringDashboardSpec struct { + Title string + Charts []MonitoringDashboardChart +} + +type MonitoringDashboardChart struct { + Name string + Unit string + Spans int + MetricName string + DataType string // MetricType is either "raw", "rate" or "histogram" + Aggregator string // Aggregator can be set for raw data. Ex: "sum", "avg". See https://prometheus.io/docs/prometheus/latest/querying/operators/#aggregation-operators + Aggregations []MonitoringDashboardAggregation +} + +type MonitoringDashboardAggregation struct { + Label string + DisplayName string +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/kubernetes_service.go b/vendor/github.com/kiali/kiali/kubernetes/kubernetes_service.go new file mode 100644 index 000000000..6bc5b3d30 --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/kubernetes_service.go @@ -0,0 +1,305 @@ +package kubernetes + +import ( + "k8s.io/api/apps/v1beta1" + "k8s.io/api/apps/v1beta2" + auth_v1 "k8s.io/api/authorization/v1" + batch_v1 "k8s.io/api/batch/v1" + batch_v1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + + osappsv1 "github.com/openshift/api/apps/v1" + osv1 "github.com/openshift/api/project/v1" +) + +// GetNamespace fetches and returns the specified namespace definition +// from the cluster +func (in *IstioClient) GetNamespace(namespace string) (*v1.Namespace, error) { + ns, err := in.k8s.CoreV1().Namespaces().Get(namespace, emptyGetOptions) + if err != nil { + return &v1.Namespace{}, err + } + + return ns, nil +} + +// GetNamespaces returns a list of all namespaces of the cluster. +// It returns a list of all namespaces of the cluster. +// It returns an error on any problem. +func (in *IstioClient) GetNamespaces() ([]v1.Namespace, error) { + namespaces, err := in.k8s.CoreV1().Namespaces().List(emptyListOptions) + if err != nil { + return nil, err + } + + return namespaces.Items, nil +} + +// GetProject fetches and returns the definition of the project with +// the specified name by querying the cluster API. GetProject will fail +// if the underlying cluster is not Openshift. +func (in *IstioClient) GetProject(name string) (*osv1.Project, error) { + result := &osv1.Project{} + + err := in.k8s.RESTClient().Get().Prefix("apis", "project.openshift.io", "v1", "projects", name).Do().Into(result) + + if err != nil { + return nil, err + } + + return result, nil +} + +func (in *IstioClient) GetProjects() ([]osv1.Project, error) { + result := &osv1.ProjectList{} + + err := in.k8s.RESTClient().Get().Prefix("apis", "project.openshift.io", "v1", "projects").Do().Into(result) + + if err != nil { + return nil, err + } + + return result.Items, nil +} + +func (in *IstioClient) IsOpenShift() bool { + if in.isOpenShift == nil { + isOpenShift := false + _, err := in.k8s.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw() + if err == nil { + isOpenShift = true + } + in.isOpenShift = &isOpenShift + } + return *in.isOpenShift +} + +// GetServices returns a list of services for a given namespace. +// If selectorLabels is defined the list of services is filtered for those that matches Services selector labels. +// It returns an error on any problem. +func (in *IstioClient) GetServices(namespace string, selectorLabels map[string]string) ([]v1.Service, error) { + var allServices []v1.Service + var err error + if in.k8sCache != nil { + allServices, err = in.k8sCache.GetServices(namespace) + } else { + if allServicesList, err := in.k8s.CoreV1().Services(namespace).List(emptyListOptions); err == nil { + allServices = allServicesList.Items + } + } + if err != nil { + return []v1.Service{}, err + } + if selectorLabels == nil { + return allServices, nil + } + var services []v1.Service + for _, svc := range allServices { + svcSelector := labels.Set(svc.Spec.Selector).AsSelector() + if svcSelector.Matches(labels.Set(selectorLabels)) { + services = append(services, svc) + } + } + return services, nil +} + +// GetDeployment returns the definition of a specific deployment. +// It returns an error on any problem. +func (in *IstioClient) GetDeployment(namespace, deploymentName string) (*v1beta1.Deployment, error) { + if in.k8sCache != nil { + return in.k8sCache.GetDeployment(namespace, deploymentName) + } + return in.k8s.AppsV1beta1().Deployments(namespace).Get(deploymentName, emptyGetOptions) +} + +// GetDeployments returns an array of deployments for a given namespace and a set of labels. +// It returns an error on any problem. +func (in *IstioClient) GetDeployments(namespace string) ([]v1beta1.Deployment, error) { + if in.k8sCache != nil { + return in.k8sCache.GetDeployments(namespace) + } + if depList, err := in.k8s.AppsV1beta1().Deployments(namespace).List(emptyListOptions); err == nil { + return depList.Items, nil + } else { + return []v1beta1.Deployment{}, err + } +} + +// GetDeployment returns the definition of a specific deployment. +// It returns an error on any problem. +func (in *IstioClient) GetDeploymentConfig(namespace, deploymentconfigName string) (*osappsv1.DeploymentConfig, error) { + result := &osappsv1.DeploymentConfig{} + err := in.k8s.RESTClient().Get().Prefix("apis", "apps.openshift.io", "v1").Namespace(namespace).Resource("deploymentconfigs").SubResource(deploymentconfigName).Do().Into(result) + if err != nil { + return nil, err + } + return result, nil +} + +// GetDeployments returns an array of deployments for a given namespace and a set of labels. +// An empty labelSelector will fetch all Deployments for a namespace. +// It returns an error on any problem. +func (in *IstioClient) GetDeploymentConfigs(namespace string) ([]osappsv1.DeploymentConfig, error) { + result := &osappsv1.DeploymentConfigList{} + err := in.k8s.RESTClient().Get().Prefix("apis", "apps.openshift.io", "v1").Namespace(namespace).Resource("deploymentconfigs").Do().Into(result) + if err != nil { + return nil, err + } + return result.Items, nil +} + +func (in *IstioClient) GetReplicaSets(namespace string) ([]v1beta2.ReplicaSet, error) { + if in.k8sCache != nil { + return in.k8sCache.GetReplicaSets(namespace) + } + if rsList, err := in.k8s.AppsV1beta2().ReplicaSets(namespace).List(emptyListOptions); err == nil { + return rsList.Items, nil + } else { + return []v1beta2.ReplicaSet{}, err + } +} + +func (in *IstioClient) GetStatefulSet(namespace string, statefulsetName string) (*v1beta2.StatefulSet, error) { + if in.k8sCache != nil { + return in.k8sCache.GetStatefulSet(namespace, statefulsetName) + } + return in.k8s.AppsV1beta2().StatefulSets(namespace).Get(statefulsetName, emptyGetOptions) +} + +func (in *IstioClient) GetStatefulSets(namespace string) ([]v1beta2.StatefulSet, error) { + if in.k8sCache != nil { + return in.k8sCache.GetStatefulSets(namespace) + } + if ssList, err := in.k8s.AppsV1beta2().StatefulSets(namespace).List(emptyListOptions); err == nil { + return ssList.Items, nil + } else { + return []v1beta2.StatefulSet{}, err + } +} + +func (in *IstioClient) GetReplicationControllers(namespace string) ([]v1.ReplicationController, error) { + if in.k8sCache != nil { + return in.k8sCache.GetReplicationControllers(namespace) + } + if rcList, err := in.k8s.CoreV1().ReplicationControllers(namespace).List(emptyListOptions); err == nil { + return rcList.Items, nil + } else { + return []v1.ReplicationController{}, err + } +} + +// GetService returns the definition of a specific service. +// It returns an error on any problem. +func (in *IstioClient) GetService(namespace, serviceName string) (*v1.Service, error) { + if in.k8sCache != nil { + return in.k8sCache.GetService(namespace, serviceName) + } + return in.k8s.CoreV1().Services(namespace).Get(serviceName, emptyGetOptions) +} + +// GetEndpoints return the list of endpoint of a specific service. +// It returns an error on any problem. +func (in *IstioClient) GetEndpoints(namespace, serviceName string) (*v1.Endpoints, error) { + if in.k8sCache != nil { + return in.k8sCache.GetEndpoints(namespace, serviceName) + } + return in.k8s.CoreV1().Endpoints(namespace).Get(serviceName, emptyGetOptions) +} + +// GetPods returns the pods definitions for a given set of labels. +// An empty labelSelector will fetch all pods found per a namespace. +// It returns an error on any problem. +func (in *IstioClient) GetPods(namespace, labelSelector string) ([]v1.Pod, error) { + if in.k8sCache != nil { + pods, err := in.k8sCache.GetPods(namespace) + if err != nil { + return []v1.Pod{}, err + } + if labelSelector != "" { + selector, err := labels.Parse(labelSelector) + if err != nil { + return []v1.Pod{}, err + } + pods = FilterPodsForSelector(selector, pods) + } + return pods, nil + } + // An empty selector is ambiguous in the go client, could mean either "select all" or "select none" + // Here we assume empty == select all + // (see also https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) + if pods, err := in.k8s.CoreV1().Pods(namespace).List(meta_v1.ListOptions{LabelSelector: labelSelector}); err == nil { + return pods.Items, nil + } else { + return []v1.Pod{}, err + } +} + +func (in *IstioClient) GetCronJobs(namespace string) ([]batch_v1beta1.CronJob, error) { + if in.k8sCache != nil { + return in.k8sCache.GetCronJobs(namespace) + } + if cjList, err := in.k8s.BatchV1beta1().CronJobs(namespace).List(emptyListOptions); err == nil { + return cjList.Items, nil + } else { + return []batch_v1beta1.CronJob{}, err + } +} + +func (in *IstioClient) GetJobs(namespace string) ([]batch_v1.Job, error) { + if in.k8sCache != nil { + return in.k8sCache.GetJobs(namespace) + } + if jList, err := in.k8s.BatchV1().Jobs(namespace).List(emptyListOptions); err == nil { + return jList.Items, nil + } else { + return []batch_v1.Job{}, err + } +} + +// NewNotFound is a helper method to create a NotFound error similar as used by the kubernetes client. +// This method helps upper layers to send a explicit NotFound error without querying the backend. +func NewNotFound(name, group, resource string) error { + return errors.NewNotFound(schema.GroupResource{Group: group, Resource: resource}, name) +} + +// GetSelfSubjectAccessReview provides information on Kiali permissions +func (in *IstioClient) GetSelfSubjectAccessReview(namespace, api, resourceType string, verbs []string) ([]*auth_v1.SelfSubjectAccessReview, error) { + calls := len(verbs) + ch := make(chan *auth_v1.SelfSubjectAccessReview, calls) + errChan := make(chan error) + for _, v := range verbs { + go func(verb string) { + res, err := in.k8s.AuthorizationV1().SelfSubjectAccessReviews().Create(&auth_v1.SelfSubjectAccessReview{ + Spec: auth_v1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &auth_v1.ResourceAttributes{ + Namespace: namespace, + Verb: verb, + Group: api, + Resource: resourceType, + }, + }, + }) + if err != nil { + errChan <- err + } else { + ch <- res + } + }(v) + } + + var err error + result := []*auth_v1.SelfSubjectAccessReview{} + for count := 0; count < calls; count++ { + select { + case res := <-ch: + result = append(result, res) + case err = <-errChan: + // No op + } + } + return result, err +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/kubetest/mock.go b/vendor/github.com/kiali/kiali/kubernetes/kubetest/mock.go new file mode 100644 index 000000000..ef392ed7a --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/kubetest/mock.go @@ -0,0 +1,442 @@ +package kubetest + +import ( + "fmt" + + osappsv1 "github.com/openshift/api/apps/v1" + osv1 "github.com/openshift/api/project/v1" + "github.com/stretchr/testify/mock" + "k8s.io/api/apps/v1beta1" + "k8s.io/api/apps/v1beta2" + auth_v1 "k8s.io/api/authorization/v1" + batch_v1 "k8s.io/api/batch/v1" + batch_v1beta1 "k8s.io/api/batch/v1beta1" + v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type K8SClientMock struct { + mock.Mock +} + +// Constructor + +func NewK8SClientMock() *K8SClientMock { + k8s := new(K8SClientMock) + k8s.On("IsOpenShift").Return(true) + return k8s +} + +// Business methods + +// MockEmptyWorkloads setup the current mock to return empty workloads for every type of workloads (deployment, dc, rs, jobs, etc.) +func (o *K8SClientMock) MockEmptyWorkloads(namespace interface{}) { + o.On("GetDeployments", namespace).Return([]v1beta1.Deployment{}, nil) + o.On("GetReplicaSets", namespace).Return([]v1beta2.ReplicaSet{}, nil) + o.On("GetReplicationControllers", namespace).Return([]v1.ReplicationController{}, nil) + o.On("GetDeploymentConfigs", namespace).Return([]osappsv1.DeploymentConfig{}, nil) + o.On("GetStatefulSets", namespace).Return([]v1beta2.StatefulSet{}, nil) + o.On("GetJobs", namespace).Return([]batch_v1.Job{}, nil) + o.On("GetCronJobs", namespace).Return([]batch_v1beta1.CronJob{}, nil) +} + +// MockEmptyWorkload setup the current mock to return an empty workload for every type of workloads (deployment, dc, rs, jobs, etc.) +func (o *K8SClientMock) MockEmptyWorkload(namespace interface{}, workload interface{}) { + notfound := fmt.Errorf("not found") + o.On("GetDeployment", namespace, workload).Return(&v1beta1.Deployment{}, notfound) + o.On("GetStatefulSet", namespace, workload).Return(&v1beta2.StatefulSet{}, notfound) + o.On("GetDeploymentConfig", namespace, workload).Return(&osappsv1.DeploymentConfig{}, notfound) + o.On("GetReplicaSets", namespace).Return([]v1beta2.ReplicaSet{}, nil) + o.On("GetReplicationControllers", namespace).Return([]v1.ReplicationController{}, nil) + o.On("GetJobs", namespace).Return([]batch_v1.Job{}, nil) + o.On("GetCronJobs", namespace).Return([]batch_v1beta1.CronJob{}, nil) +} + +func (o *K8SClientMock) CreateIstioObject(api, namespace, resourceType, json string) (kubernetes.IstioObject, error) { + args := o.Called(api, namespace, resourceType, json) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) DeleteIstioObject(api, namespace, objectType, objectName string) error { + args := o.Called(api, namespace, objectType, objectName) + return args.Error(0) +} + +func (o *K8SClientMock) GetAdapter(namespace, adapterType, adapterName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace, adapterType, adapterName) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetAdapters(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetCronJobs(namespace string) ([]batch_v1beta1.CronJob, error) { + args := o.Called(namespace) + return args.Get(0).([]batch_v1beta1.CronJob), args.Error(1) +} + +func (o *K8SClientMock) GetDeployment(namespace string, deploymentName string) (*v1beta1.Deployment, error) { + args := o.Called(namespace, deploymentName) + return args.Get(0).(*v1beta1.Deployment), args.Error(1) +} + +func (o *K8SClientMock) GetDeployments(namespace string) ([]v1beta1.Deployment, error) { + args := o.Called(namespace) + return args.Get(0).([]v1beta1.Deployment), args.Error(1) +} + +func (o *K8SClientMock) GetDeploymentConfig(namespace string, deploymentName string) (*osappsv1.DeploymentConfig, error) { + args := o.Called(namespace, deploymentName) + return args.Get(0).(*osappsv1.DeploymentConfig), args.Error(1) +} + +func (o *K8SClientMock) GetDeploymentConfigs(namespace string) ([]osappsv1.DeploymentConfig, error) { + args := o.Called(namespace) + return args.Get(0).([]osappsv1.DeploymentConfig), args.Error(1) +} + +func (o *K8SClientMock) GetDestinationRules(namespace string, serviceName string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace, serviceName) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetDestinationRule(namespace string, destinationrule string) (kubernetes.IstioObject, error) { + args := o.Called(namespace, destinationrule) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetEndpoints(namespace string, serviceName string) (*v1.Endpoints, error) { + args := o.Called(namespace, serviceName) + return args.Get(0).(*v1.Endpoints), args.Error(1) +} + +func (o *K8SClientMock) GetGateways(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetGateway(namespace string, gateway string) (kubernetes.IstioObject, error) { + args := o.Called(namespace, gateway) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetIstioDetails(namespace string, serviceName string) (*kubernetes.IstioDetails, error) { + args := o.Called(namespace, serviceName) + return args.Get(0).(*kubernetes.IstioDetails), args.Error(1) +} + +func (o *K8SClientMock) GetIstioRule(namespace string, istiorule string) (kubernetes.IstioObject, error) { + args := o.Called(namespace, istiorule) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetIstioRules(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetJobs(namespace string) ([]batch_v1.Job, error) { + args := o.Called(namespace) + return args.Get(0).([]batch_v1.Job), args.Error(1) +} + +func (o *K8SClientMock) GetNamespace(namespace string) (*v1.Namespace, error) { + args := o.Called(namespace) + return args.Get(0).(*v1.Namespace), args.Error(1) +} + +func (o *K8SClientMock) GetNamespaces() ([]v1.Namespace, error) { + args := o.Called() + return args.Get(0).([]v1.Namespace), args.Error(1) +} + +func (o *K8SClientMock) GetPods(namespace, labelSelector string) ([]v1.Pod, error) { + args := o.Called(namespace, labelSelector) + return args.Get(0).([]v1.Pod), args.Error(1) +} + +func (o *K8SClientMock) GetProject(project string) (*osv1.Project, error) { + args := o.Called(project) + return args.Get(0).(*osv1.Project), args.Error(1) +} + +func (o *K8SClientMock) GetProjects() ([]osv1.Project, error) { + args := o.Called() + return args.Get(0).([]osv1.Project), args.Error(1) +} + +func (o *K8SClientMock) GetQuotaSpec(namespace string, quotaSpecName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace, quotaSpecName) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetQuotaSpecs(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetQuotaSpecBinding(namespace string, quotaSpecBindingName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace, quotaSpecBindingName) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetQuotaSpecBindings(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetReplicationControllers(namespace string) ([]v1.ReplicationController, error) { + args := o.Called(namespace) + return args.Get(0).([]v1.ReplicationController), args.Error(1) +} + +func (o *K8SClientMock) GetReplicaSets(namespace string) ([]v1beta2.ReplicaSet, error) { + args := o.Called(namespace) + return args.Get(0).([]v1beta2.ReplicaSet), args.Error(1) +} + +func (o *K8SClientMock) GetSelfSubjectAccessReview(namespace, api, resourceType string, verbs []string) ([]*auth_v1.SelfSubjectAccessReview, error) { + args := o.Called(namespace, api, resourceType, verbs) + return args.Get(0).([]*auth_v1.SelfSubjectAccessReview), args.Error(1) +} + +func (o *K8SClientMock) GetService(namespace string, serviceName string) (*v1.Service, error) { + args := o.Called(namespace, serviceName) + return args.Get(0).(*v1.Service), args.Error(1) +} + +func (o *K8SClientMock) GetServices(namespace string, selectorLabels map[string]string) ([]v1.Service, error) { + args := o.Called(namespace, selectorLabels) + return args.Get(0).([]v1.Service), args.Error(1) +} + +func (o *K8SClientMock) GetServiceEntries(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetServiceEntry(namespace string, serviceEntryName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace, serviceEntryName) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetStatefulSet(namespace string, statefulsetName string) (*v1beta2.StatefulSet, error) { + args := o.Called(namespace, statefulsetName) + return args.Get(0).(*v1beta2.StatefulSet), args.Error(1) +} + +func (o *K8SClientMock) GetStatefulSets(namespace string) ([]v1beta2.StatefulSet, error) { + args := o.Called(namespace) + return args.Get(0).([]v1beta2.StatefulSet), args.Error(1) +} + +func (o *K8SClientMock) GetTemplate(namespace, templateType, templateName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace, templateType, templateName) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetTemplates(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetVirtualServices(namespace string, serviceName string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace, serviceName) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetVirtualService(namespace string, virtualservice string) (kubernetes.IstioObject, error) { + args := o.Called(namespace, virtualservice) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetPolicies(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetPolicy(namespace string, policyName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetMeshPolicies(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetMeshPolicy(namespace string, policyName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetClusterRbacConfigs(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetClusterRbacConfig(namespace string, policyName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetServiceRoles(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetServiceRole(namespace string, policyName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetServiceRoleBindings(namespace string) ([]kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).([]kubernetes.IstioObject), args.Error(1) +} + +func (o *K8SClientMock) GetServiceRoleBinding(namespace string, policyName string) (kubernetes.IstioObject, error) { + args := o.Called(namespace) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} +func (o *K8SClientMock) IsOpenShift() bool { + args := o.Called() + return args.Get(0).(bool) +} + +func (o *K8SClientMock) Stop() { +} + +func (o *K8SClientMock) UpdateIstioObject(api, namespace, resourceType, name, jsonPatch string) (kubernetes.IstioObject, error) { + args := o.Called(api, namespace, resourceType, name, jsonPatch) + return args.Get(0).(kubernetes.IstioObject), args.Error(1) +} + +// Fake methods doesn't need an entry point + +func FakeService() *v1.Service { + return &v1.Service{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin", + Namespace: "tutorial", + Labels: map[string]string{ + "app": "httpbin", + "version": "v1"}}, + Spec: v1.ServiceSpec{ + ClusterIP: "fromservice", + Type: "ClusterIP", + Selector: map[string]string{"app": "httpbin"}, + Ports: []v1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 3001}, + { + Name: "http", + Protocol: "TCP", + Port: 3000}}}} +} + +func FakeServiceList() []v1.Service { + return []v1.Service{ + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "reviews", + Namespace: "tutorial", + Labels: map[string]string{ + "app": "reviews", + "version": "v1"}}, + Spec: v1.ServiceSpec{ + ClusterIP: "fromservice", + Type: "ClusterIP", + Selector: map[string]string{"app": "reviews"}, + Ports: []v1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 3001}, + { + Name: "http", + Protocol: "TCP", + Port: 3000}}}}, + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin", + Namespace: "tutorial", + Labels: map[string]string{ + "app": "httpbin", + "version": "v1"}}, + Spec: v1.ServiceSpec{ + ClusterIP: "fromservice", + Type: "ClusterIP", + Selector: map[string]string{"app": "httpbin"}, + Ports: []v1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 3001}, + { + Name: "http", + Protocol: "TCP", + Port: 3000}}}}, + } +} + +func FakePodListWithoutSidecar() []v1.Pod { + return []v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "reviews-v1", + Labels: map[string]string{"app": "reviews", "version": "v1"}}}, + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "reviews-v2", + Labels: map[string]string{"app": "reviews", "version": "v2"}}}, + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v1", + Labels: map[string]string{"app": "httpbin", "version": "v1"}}}, + } +} + +func FakePodList() []v1.Pod { + return []v1.Pod{ + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "reviews-v1", + Labels: map[string]string{"app": "reviews", "version": "v1"}, + Annotations: FakeIstioAnnotations(), + }, + }, + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "reviews-v2", + Labels: map[string]string{"app": "reviews", "version": "v2"}, + Annotations: FakeIstioAnnotations(), + }, + }, + { + ObjectMeta: meta_v1.ObjectMeta{ + Name: "httpbin-v1", + Labels: map[string]string{"app": "httpbin", "version": "v1"}, + Annotations: FakeIstioAnnotations(), + }, + }, + } +} + +func FakeIstioAnnotations() map[string]string { + return map[string]string{"sidecar.istio.io/status": "{\"version\":\"\",\"initContainers\":[\"istio-init\",\"enable-core-dump\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"]}"} +} + +func FakeNamespace(name string) *v1.Namespace { + return &v1.Namespace{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: name, + }, + } +} diff --git a/vendor/github.com/kiali/kiali/kubernetes/types.go b/vendor/github.com/kiali/kiali/kubernetes/types.go new file mode 100644 index 000000000..70bdc3f8a --- /dev/null +++ b/vendor/github.com/kiali/kiali/kubernetes/types.go @@ -0,0 +1,684 @@ +package kubernetes + +import ( + "fmt" + "strings" + + "github.com/kiali/kiali/config" + "k8s.io/api/apps/v1beta1" + autoscalingV1 "k8s.io/api/autoscaling/v1" + v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // Networking + + destinationRules = "destinationrules" + destinationRuleType = "DestinationRule" + destinationRuleTypeList = "DestinationRuleList" + + gateways = "gateways" + gatewayType = "Gateway" + gatewayTypeList = "GatewayList" + + serviceentries = "serviceentries" + serviceentryType = "ServiceEntry" + serviceentryTypeList = "ServiceEntryList" + + virtualServices = "virtualservices" + virtualServiceType = "VirtualService" + virtualServiceTypeList = "VirtualServiceList" + + // Quotas + + quotaspecs = "quotaspecs" + quotaspecType = "QuotaSpec" + quotaspecTypeList = "QuotaSpecList" + + quotaspecbindings = "quotaspecbindings" + quotaspecbindingType = "QuotaSpecBinding" + quotaspecbindingTypeList = "QuotaSpecBindingList" + + // Policies + + policies = "policies" + policyType = "Policy" + policyTypeList = "PolicyList" + + //MeshPolicies + + meshPolicies = "meshpolicies" + meshPolicyType = "MeshPolicy" + meshPolicyTypeList = "MeshPolicyList" + + // Rbac + clusterrbacconfigs = "clusterrbacconfigs" + clusterrbacconfigType = "ClusterRbacConfig" + clusterrbacconfigTypeList = "ClusterRbacConfigList" + + serviceroles = "serviceroles" + serviceroleType = "ServiceRole" + serviceroleTypeList = "ServiceRoleList" + + servicerolebindings = "servicerolebindings" + servicerolebindingType = "ServiceRoleBinding" + servicerolebindingTypeList = "ServiceRoleBindingList" + + // Config - Rules + + rules = "rules" + ruleType = "rule" + ruleTypeList = "ruleList" + + // Config - Adapters + + circonuses = "circonuses" + circonusType = "circonus" + circonusTypeList = "circonusList" + + deniers = "deniers" + denierType = "denier" + denierTypeList = "denierList" + + fluentds = "fluentds" + fluentdType = "fluentd" + fluentdTypeList = "fluentdList" + fluentdLabel = "fluentd" + + handlers = "handlers" + handlerType = "handler" + handlerTypeList = "handlerList" + + kubernetesenvs = "kubernetesenvs" + kubernetesenvType = "kubernetesenv" + kubernetesenvTypeList = "kubernetesenvList" + + listcheckers = "listcheckers" + listcheckerType = "listchecker" + listcheckerTypeList = "listcheckerList" + + memquotas = "memquotas" + memquotaType = "memquota" + memquotaTypeList = "memquotaList" + + opas = "opas" + opaType = "opa" + opaTypeList = "opaList" + + prometheuses = "prometheuses" + prometheusType = "prometheus" + prometheusTypeList = "prometheusList" + + rbacs = "rbacs" + rbacType = "rbac" + rbacTypeList = "rbacList" + + servicecontrols = "servicecontrols" + servicecontrolType = "servicecontrol" + servicecontrolTypeList = "servicecontrolList" + + solarwindses = "solarwindses" + solarwindsType = "solarwinds" + solarwindsTypeList = "solarwindsList" + + stackdrivers = "stackdrivers" + stackdriverType = "stackdriver" + stackdriverTypeList = "stackdriverList" + + statsds = "statsds" + statsdType = "statsd" + statsdTypeList = "statsdList" + + stdios = "stdios" + stdioType = "stdio" + stdioTypeList = "stdioList" + + // Config - Templates + + apikeys = "apikeys" + apikeyType = "apikey" + apikeyTypeList = "apikeyList" + + authorizations = "authorizations" + authorizationType = "authorization" + authorizationTypeList = "authorizationList" + + checknothings = "checknothings" + checknothingType = "checknothing" + checknothingTypeList = "checknothingList" + + kuberneteses = "kuberneteses" + kubernetesType = "kubernetes" + kubernetesTypeList = "kubernetesList" + + listEntries = "listentries" + listEntryType = "listentry" + listEntryTypeList = "listentryList" + + logentries = "logentries" + logentryType = "logentry" + logentryTypeList = "logentryList" + + metrics = "metrics" + metricType = "metric" + metricTypeList = "metricList" + + quotas = "quotas" + quotaType = "quota" + quotaTypeList = "quotaList" + + reportnothings = "reportnothings" + reportnothingType = "reportnothing" + reportnothingTypeList = "reportnothingList" + + servicecontrolreports = "servicecontrolreports" + servicecontrolreportType = "servicecontrolreport" + servicecontrolreportTypeList = "servicecontrolreportList" +) + +var ( + ConfigGroupVersion = schema.GroupVersion{ + Group: "config.istio.io", + Version: "v1alpha2", + } + ApiConfigVersion = ConfigGroupVersion.Group + "/" + ConfigGroupVersion.Version + + NetworkingGroupVersion = schema.GroupVersion{ + Group: "networking.istio.io", + Version: "v1alpha3", + } + ApiNetworkingVersion = NetworkingGroupVersion.Group + "/" + NetworkingGroupVersion.Version + + AuthenticationGroupVersion = schema.GroupVersion{ + Group: "authentication.istio.io", + Version: "v1alpha1", + } + ApiAuthenticationVersion = AuthenticationGroupVersion.Group + "/" + AuthenticationGroupVersion.Version + + RbacGroupVersion = schema.GroupVersion{ + Group: "rbac.istio.io", + Version: "v1alpha1", + } + ApiRbacVersion = RbacGroupVersion.Group + "/" + RbacGroupVersion.Version + + networkingTypes = []struct { + objectKind string + collectionKind string + }{ + { + objectKind: gatewayType, + collectionKind: gatewayTypeList, + }, + { + objectKind: virtualServiceType, + collectionKind: virtualServiceTypeList, + }, + { + objectKind: destinationRuleType, + collectionKind: destinationRuleTypeList, + }, + { + objectKind: serviceentryType, + collectionKind: serviceentryTypeList, + }, + } + + configTypes = []struct { + objectKind string + collectionKind string + }{ + { + objectKind: ruleType, + collectionKind: ruleTypeList, + }, + // Quota specs depends on Quota template but are not a "template" object itselft + { + objectKind: quotaspecType, + collectionKind: quotaspecTypeList, + }, + { + objectKind: quotaspecbindingType, + collectionKind: quotaspecbindingTypeList, + }, + } + + authenticationTypes = []struct { + objectKind string + collectionKind string + }{ + { + objectKind: policyType, + collectionKind: policyTypeList, + }, + { + objectKind: meshPolicyType, + collectionKind: meshPolicyTypeList, + }, + } + + // TODO Adapters and Templates can be loaded from external config for easy maintenance + + adapterTypes = []struct { + objectKind string + collectionKind string + }{ + { + objectKind: circonusType, + collectionKind: circonusTypeList, + }, + { + objectKind: denierType, + collectionKind: denierTypeList, + }, + { + objectKind: fluentdType, + collectionKind: fluentdTypeList, + }, + { + objectKind: handlerType, + collectionKind: handlerTypeList, + }, + { + objectKind: kubernetesenvType, + collectionKind: kubernetesenvTypeList, + }, + { + objectKind: listcheckerType, + collectionKind: listcheckerTypeList, + }, + { + objectKind: memquotaType, + collectionKind: memquotaTypeList, + }, + { + objectKind: opaType, + collectionKind: opaTypeList, + }, + { + objectKind: prometheusType, + collectionKind: prometheusTypeList, + }, + { + objectKind: rbacType, + collectionKind: rbacTypeList, + }, + { + objectKind: servicecontrolType, + collectionKind: servicecontrolTypeList, + }, + { + objectKind: solarwindsType, + collectionKind: solarwindsTypeList, + }, + { + objectKind: stackdriverType, + collectionKind: stackdriverTypeList, + }, + { + objectKind: statsdType, + collectionKind: statsdTypeList, + }, + { + objectKind: stdioType, + collectionKind: stdioTypeList, + }, + } + + templateTypes = []struct { + objectKind string + collectionKind string + }{ + { + objectKind: apikeyType, + collectionKind: apikeyTypeList, + }, + { + objectKind: authorizationType, + collectionKind: authorizationTypeList, + }, + { + objectKind: checknothingType, + collectionKind: checknothingTypeList, + }, + { + objectKind: kubernetesType, + collectionKind: kubernetesTypeList, + }, + { + objectKind: listEntryType, + collectionKind: listEntryTypeList, + }, + { + objectKind: logentryType, + collectionKind: logentryTypeList, + }, + { + objectKind: metricType, + collectionKind: metricTypeList, + }, + { + objectKind: quotaType, + collectionKind: quotaTypeList, + }, + { + objectKind: reportnothingType, + collectionKind: reportnothingTypeList, + }, + { + objectKind: servicecontrolreportType, + collectionKind: servicecontrolreportTypeList, + }, + } + + rbacTypes = []struct { + objectKind string + collectionKind string + }{ + { + objectKind: clusterrbacconfigType, + collectionKind: clusterrbacconfigTypeList, + }, + { + objectKind: serviceroleType, + collectionKind: serviceroleTypeList, + }, + { + objectKind: servicerolebindingType, + collectionKind: servicerolebindingTypeList, + }, + } + + // A map to get the plural for a Istio type using the singlar type + // Used for fetch istio actions details, so only applied to handlers (adapters) and instances (templates) types + // It should be one entry per adapter/template + adapterPlurals = map[string]string{ + circonusType: circonuses, + denierType: deniers, + fluentdType: fluentds, + handlerType: handlers, + kubernetesenvType: kubernetesenvs, + listcheckerType: listcheckers, + memquotaType: memquotas, + opaType: opas, + prometheusType: prometheuses, + rbacType: rbacs, + servicecontrolType: servicecontrols, + solarwindsType: solarwindses, + stackdriverType: stackdrivers, + statsdType: statsds, + stdioType: stdios, + } + + templatePlurals = map[string]string{ + apikeyType: apikeys, + authorizationType: authorizations, + checknothingType: checknothings, + kubernetesType: kuberneteses, + listEntryType: listEntries, + logentryType: logentries, + metricType: metrics, + quotaType: quotas, + reportnothingType: reportnothings, + servicecontrolreportType: servicecontrolreports, + } + + PluralType = map[string]string{ + // Networking + gateways: gatewayType, + virtualServices: virtualServiceType, + destinationRules: destinationRuleType, + serviceentries: serviceentryType, + + // Main Config files + rules: ruleType, + quotaspecs: quotaspecType, + quotaspecbindings: quotaspecbindingType, + + // Adapters + circonuses: circonusType, + deniers: denierType, + fluentds: fluentdType, + handlers: handlerType, + kubernetesenvs: kubernetesenvType, + listcheckers: listcheckerType, + memquotas: memquotaType, + opas: opaType, + prometheuses: prometheusType, + rbacs: rbacType, + servicecontrols: servicecontrolType, + solarwindses: solarwindsType, + stackdrivers: stackdriverType, + statsds: statsdType, + stdios: stdioType, + + // Templates + apikeys: apikeyType, + authorizations: authorizationType, + checknothings: checknothingType, + kuberneteses: kubernetesType, + listEntries: listEntryType, + logentries: logentryType, + metrics: metricType, + quotas: quotaType, + reportnothings: reportnothingType, + servicecontrolreports: servicecontrolreportType, + + // Policies + policies: policyType, + meshPolicies: meshPolicyType, + + // Rbac + clusterrbacconfigs: clusterrbacconfigType, + serviceroles: serviceroleType, + servicerolebindings: servicerolebindingType, + } +) + +// IstioObject is a k8s wrapper interface for config objects. +// Taken from istio.io +type IstioObject interface { + runtime.Object + GetSpec() map[string]interface{} + SetSpec(map[string]interface{}) + GetObjectMeta() meta_v1.ObjectMeta + SetObjectMeta(meta_v1.ObjectMeta) + DeepCopyIstioObject() IstioObject +} + +// IstioObjectList is a k8s wrapper interface for list config objects. +// Taken from istio.io +type IstioObjectList interface { + runtime.Object + GetItems() []IstioObject +} + +// ServiceList holds list of services, pods and deployments +type ServiceList struct { + Services *v1.ServiceList + Pods *v1.PodList + Deployments *v1beta1.DeploymentList +} + +// ServiceDetails is a wrapper to group full Service description, Endpoints and Pods. +// Used to fetch all details in a single operation instead to invoke individual APIs per each group. +type ServiceDetails struct { + Service *v1.Service `json:"service"` + Endpoints *v1.Endpoints `json:"endpoints"` + Deployments *v1beta1.DeploymentList `json:"deployments"` + Autoscalers *autoscalingV1.HorizontalPodAutoscalerList `json:"autoscalers"` + Pods []v1.Pod `json:"pods"` +} + +// IstioDetails is a wrapper to group all Istio objects related to a Service. +// Used to fetch all Istio information in a single operation instead to invoke individual APIs per each group. +type IstioDetails struct { + VirtualServices []IstioObject `json:"virtualservices"` + DestinationRules []IstioObject `json:"destinationrules"` + ServiceEntries []IstioObject `json:"serviceentries"` + Gateways []IstioObject `json:"gateways"` +} + +// MTLSDetails is a wrapper to group all Istio objects related to non-local mTLS configurations +type MTLSDetails struct { + DestinationRules []IstioObject `json:"destinationrules"` + MeshPolicies []IstioObject `json:"meshpolicies"` +} + +type istioResponse struct { + result IstioObject + results []IstioObject + err error +} + +// GenericIstioObject is a type to test Istio types defined by Istio as a Kubernetes extension. +type GenericIstioObject struct { + meta_v1.TypeMeta `json:",inline"` + meta_v1.ObjectMeta `json:"metadata"` + Spec map[string]interface{} `json:"spec"` +} + +// GenericIstioObjectList is the generic Kubernetes API list wrapper +type GenericIstioObjectList struct { + meta_v1.TypeMeta `json:",inline"` + meta_v1.ListMeta `json:"metadata"` + Items []GenericIstioObject `json:"items"` +} + +// GetSpec from a wrapper +func (in *GenericIstioObject) GetSpec() map[string]interface{} { + return in.Spec +} + +// SetSpec for a wrapper +func (in *GenericIstioObject) SetSpec(spec map[string]interface{}) { + in.Spec = spec +} + +// GetObjectMeta from a wrapper +func (in *GenericIstioObject) GetObjectMeta() meta_v1.ObjectMeta { + return in.ObjectMeta +} + +// SetObjectMeta for a wrapper +func (in *GenericIstioObject) SetObjectMeta(metadata meta_v1.ObjectMeta) { + in.ObjectMeta = metadata +} + +// GetItems from a wrapper +func (in *GenericIstioObjectList) GetItems() []IstioObject { + out := make([]IstioObject, len(in.Items)) + for i := range in.Items { + out[i] = &in.Items[i] + } + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericIstioObject) DeepCopyInto(out *GenericIstioObject) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericIstioObject. +func (in *GenericIstioObject) DeepCopy() *GenericIstioObject { + if in == nil { + return nil + } + out := new(GenericIstioObject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GenericIstioObject) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyIstioObject is an autogenerated deepcopy function, copying the receiver, creating a new IstioObject. +func (in *GenericIstioObject) DeepCopyIstioObject() IstioObject { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericIstioObjectList) DeepCopyInto(out *GenericIstioObjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GenericIstioObject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericIstioObjectList. +func (in *GenericIstioObjectList) DeepCopy() *GenericIstioObjectList { + if in == nil { + return nil + } + out := new(GenericIstioObjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GenericIstioObjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// Host represents the FQDN format for Istio hostnames +type Host struct { + Service string + Namespace string + Cluster string +} + +// Parse takes as an input a hostname (simple or full FQDN), namespace and clusterName and returns a parsed Host struct +func ParseHost(hostName, namespace, cluster string) Host { + domainParts := strings.Split(hostName, ".") + host := Host{ + Service: domainParts[0], + } + if len(domainParts) > 1 { + host.Namespace = domainParts[1] + + if len(domainParts) > 2 { + host.Cluster = strings.Join(domainParts[2:], ".") + } + } + + // Fill in missing details, we take precedence from the full hostname and not from DestinationRule details + if host.Cluster == "" { + if cluster != "" { + host.Cluster = cluster + } else { + host.Cluster = config.Get().ExternalServices.Istio.IstioIdentityDomain + } + } + + if host.Namespace == "" { + host.Namespace = namespace + } + return host +} + +// String outputs a full FQDN version of the Host +func (h Host) String() string { + return fmt.Sprintf("%s.%s.%s", h.Service, h.Namespace, h.Cluster) +} diff --git a/vendor/github.com/kiali/kiali/log/log.go b/vendor/github.com/kiali/kiali/log/log.go new file mode 100644 index 000000000..5be4db5f6 --- /dev/null +++ b/vendor/github.com/kiali/kiali/log/log.go @@ -0,0 +1,72 @@ +package log + +import ( + "fmt" + + "github.com/golang/glog" +) + +const ( + debug glog.Level = glog.Level(4) + trace glog.Level = glog.Level(5) +) + +func Info(args ...interface{}) { + glog.InfoDepth(1, args...) +} + +func Infof(format string, args ...interface{}) { + glog.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func Warning(args ...interface{}) { + glog.WarningDepth(1, args...) +} + +func Warningf(format string, args ...interface{}) { + glog.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func Error(args ...interface{}) { + glog.ErrorDepth(1, args...) +} + +func Errorf(format string, args ...interface{}) { + glog.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debug will log a message at verbose level 4 and will ensure the caller's stack frame is used +func Debug(args ...interface{}) { + if glog.V(debug) { + glog.InfoDepth(1, "DEBUG: "+fmt.Sprint(args...)) // 1 == depth in the stack of the caller + } +} + +// Debugf will log a message at verbose level 4 and will ensure the caller's stack frame is used +func Debugf(format string, args ...interface{}) { + if glog.V(debug) { + glog.InfoDepth(1, fmt.Sprintf("DEBUG: "+format, args...)) // 1 == depth in the stack of the caller + } +} + +func IsDebug() bool { + return bool(glog.V(debug)) +} + +// Trace will log a message at verbose level 5 and will ensure the caller's stack frame is used +func Trace(args ...interface{}) { + if glog.V(trace) { + glog.InfoDepth(1, "TRACE: "+fmt.Sprint(args...)) // 1 == depth in the stack of the caller + } +} + +// Tracef will log a message at verbose level 5 and will ensure the caller's stack frame is used +func Tracef(format string, args ...interface{}) { + if glog.V(trace) { + glog.InfoDepth(1, fmt.Sprintf("TRACE: "+format, args...)) // 1 == depth in the stack of the caller + } +} + +func IsTrace() bool { + return bool(glog.V(trace)) +} diff --git a/vendor/github.com/kiali/kiali/models/address.go b/vendor/github.com/kiali/kiali/models/address.go new file mode 100644 index 000000000..8c0735fbf --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/address.go @@ -0,0 +1,27 @@ +package models + +import "k8s.io/api/core/v1" + +type Addresses []Address +type Address struct { + Kind string `json:"kind"` + Name string `json:"name"` + IP string `json:"ip"` +} + +func (addresses *Addresses) Parse(as []v1.EndpointAddress) { + for _, address := range as { + castedAddress := Address{} + castedAddress.Parse(address) + *addresses = append(*addresses, castedAddress) + } +} + +func (address *Address) Parse(a v1.EndpointAddress) { + address.IP = a.IP + + if a.TargetRef != nil { + address.Kind = a.TargetRef.Kind + address.Name = a.TargetRef.Name + } +} diff --git a/vendor/github.com/kiali/kiali/models/app.go b/vendor/github.com/kiali/kiali/models/app.go new file mode 100644 index 000000000..60b0bede0 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/app.go @@ -0,0 +1,60 @@ +package models + +type AppList struct { + // Namespace where the apps live in + // required: true + // example: bookinfo + Namespace Namespace `json:"namespace"` + + // Applications for a given namespace + // required: true + Apps []AppListItem `json:"applications"` +} + +// AppListItem has the necessary information to display the console app list +type AppListItem struct { + // Name of the application + // required: true + // example: reviews + Name string `json:"name"` + + // Define if all Pods related to the Workloads of this app has an IstioSidecar deployed + // required: true + // example: true + IstioSidecar bool `json:"istioSidecar"` +} + +type WorkloadItem struct { + // Name of a workload member of an application + // required: true + // example: reviews-v1 + WorkloadName string `json:"workloadName"` + + // Define if all Pods related to the Workload has an IstioSidecar deployed + // required: true + // example: true + IstioSidecar bool `json:"istioSidecar"` +} + +type App struct { + // Namespace where the app lives in + // required: true + // example: bookinfo + Namespace Namespace `json:"namespace"` + + // Name of the application + // required: true + // example: reviews + Name string `json:"name"` + + // Workloads for a given application + // required: true + Workloads []WorkloadItem `json:"workloads"` + + // List of service names linked with an application + // required: true + ServiceNames []string `json:"serviceNames"` + + // Runtimes and associated dashboards + Runtimes []Runtime `json:"runtimes"` +} diff --git a/vendor/github.com/kiali/kiali/models/autoscalers.go b/vendor/github.com/kiali/kiali/models/autoscalers.go new file mode 100644 index 000000000..f7d31514d --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/autoscalers.go @@ -0,0 +1,54 @@ +package models + +import ( + "k8s.io/api/autoscaling/v1" +) + +type Autoscaler struct { + Name string `json:"name"` + Labels map[string]string `json:"labels"` + CreatedAt string `json:"createdAt"` + // Spec + MinReplicas int32 `json:"minReplicas"` + MaxReplicas int32 `json:"maxReplicas"` + TargetCPUUtilizationPercentage int32 `json:"targetCPUUtilizationPercentage"` + // Status + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + LastScaleTime string `json:"lastScaleTime,omitempty"` + CurrentReplicas int32 `json:"currentReplicas"` + DesiredReplicas int32 `json:"desiredReplicas"` + CurrentCPUUtilizationPercentage int32 `json:"currentCPUUtilizationPercentage,omitempty"` +} + +func (autoscaler *Autoscaler) Parse(d *v1.HorizontalPodAutoscaler) { + autoscaler.Name = d.Name + autoscaler.Labels = d.Labels + autoscaler.CreatedAt = formatTime(d.CreationTimestamp.Time) + + // Spec + autoscaler.MaxReplicas = d.Spec.MaxReplicas + + if d.Spec.MinReplicas != nil { + autoscaler.MinReplicas = *d.Spec.MinReplicas + } + + if d.Spec.TargetCPUUtilizationPercentage != nil { + autoscaler.TargetCPUUtilizationPercentage = *d.Spec.TargetCPUUtilizationPercentage + } + + // Status + autoscaler.CurrentReplicas = d.Status.CurrentReplicas + autoscaler.DesiredReplicas = d.Status.DesiredReplicas + + if d.Status.ObservedGeneration != nil { + autoscaler.ObservedGeneration = *d.Status.ObservedGeneration + } + + if d.Status.LastScaleTime != nil { + autoscaler.LastScaleTime = formatTime((*d.Status.LastScaleTime).Time) + } + + if d.Status.CurrentCPUUtilizationPercentage != nil { + autoscaler.CurrentCPUUtilizationPercentage = *d.Status.CurrentCPUUtilizationPercentage + } +} diff --git a/vendor/github.com/kiali/kiali/models/cluster_rbac_config.go b/vendor/github.com/kiali/kiali/models/cluster_rbac_config.go new file mode 100644 index 000000000..2f3ca47e6 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/cluster_rbac_config.go @@ -0,0 +1,32 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type ClusterRbacConfigs []ClusterRbacConfig +type ClusterRbacConfig struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Mode interface{} `json:"mode"` + Inclusion interface{} `json:"inclusion"` + Exclusion interface{} `json:"exclusion"` + } `json:"spec"` +} + +func (rcs *ClusterRbacConfigs) Parse(clusterRbacConfigs []kubernetes.IstioObject) { + for _, rc := range clusterRbacConfigs { + clusterRbacConfig := ClusterRbacConfig{} + clusterRbacConfig.Parse(rc) + *rcs = append(*rcs, clusterRbacConfig) + } +} + +func (rc *ClusterRbacConfig) Parse(clusterRbacConfig kubernetes.IstioObject) { + rc.Metadata = clusterRbacConfig.GetObjectMeta() + rc.Spec.Mode = clusterRbacConfig.GetSpec()["mode"] + rc.Spec.Inclusion = clusterRbacConfig.GetSpec()["inclusion"] + rc.Spec.Exclusion = clusterRbacConfig.GetSpec()["exclusion"] +} diff --git a/vendor/github.com/kiali/kiali/models/dashboards.go b/vendor/github.com/kiali/kiali/models/dashboards.go new file mode 100644 index 000000000..b5d6100ac --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/dashboards.go @@ -0,0 +1,100 @@ +package models + +import ( + "fmt" + "sort" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/prometheus" +) + +// MonitoringDashboard is the model representing custom monitoring dashboard, transformed from MonitoringDashboard k8s resource +type MonitoringDashboard struct { + Title string `json:"title"` + Charts []Chart `json:"charts"` + Aggregations []Aggregation `json:"aggregations"` +} + +// Chart is the model representing a custom chart, transformed from charts in MonitoringDashboard k8s resource +type Chart struct { + Name string `json:"name"` + Unit string `json:"unit"` + Spans int `json:"spans"` + Metric *prometheus.Metric `json:"metric"` + Histogram prometheus.Histogram `json:"histogram"` +} + +// ConvertChart converts a k8s chart (from MonitoringDashboard k8s resource) into this models chart +func ConvertChart(from kubernetes.MonitoringDashboardChart) Chart { + return Chart{ + Name: from.Name, + Unit: from.Unit, + Spans: from.Spans, + } +} + +// Aggregation is the model representing label's allowed aggregation, transformed from aggregation in MonitoringDashboard k8s resource +type Aggregation struct { + Label string `json:"label"` + DisplayName string `json:"displayName"` +} + +// ConvertAggregations converts a k8s aggregations (from MonitoringDashboard k8s resource) into this models aggregations +// Results are sorted by DisplayName +func ConvertAggregations(from kubernetes.MonitoringDashboardSpec) []Aggregation { + uniqueAggs := make(map[string]Aggregation) + for _, chart := range from.Charts { + for _, agg := range chart.Aggregations { + uniqueAggs[agg.DisplayName] = Aggregation{Label: agg.Label, DisplayName: agg.DisplayName} + } + } + aggs := []Aggregation{} + for _, agg := range uniqueAggs { + aggs = append(aggs, agg) + } + sort.Slice(aggs, func(i, j int) bool { + return aggs[i].DisplayName < aggs[j].DisplayName + }) + return aggs +} + +func buildIstioAggregations(local, remote string) []Aggregation { + return []Aggregation{ + { + Label: fmt.Sprintf("%s_version", local), + DisplayName: "Local version", + }, + { + Label: fmt.Sprintf("%s_app", remote), + DisplayName: "Remote app", + }, + { + Label: fmt.Sprintf("%s_version", remote), + DisplayName: "Remote version", + }, + { + Label: "response_code", + DisplayName: "Response code", + }, + } +} + +// PrepareIstioDashboard prepares the Istio dashboard title and aggregations dynamically for input values +func PrepareIstioDashboard(direction, local, remote string) MonitoringDashboard { + return MonitoringDashboard{ + Title: fmt.Sprintf("%s Metrics", direction), + Aggregations: buildIstioAggregations(local, remote), + } +} + +// Runtime holds the runtime title and associated dashboard template(s) +type Runtime struct { + Name string `json:"name"` + DashboardRefs []DashboardRef `json:"dashboardRefs"` +} + +// DashboardRef holds template name and title for a custom dashboard +type DashboardRef struct { + Template string `json:"template"` + Title string `json:"title"` +} diff --git a/vendor/github.com/kiali/kiali/models/destination_rule.go b/vendor/github.com/kiali/kiali/models/destination_rule.go new file mode 100644 index 000000000..8355fa69d --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/destination_rule.go @@ -0,0 +1,95 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" +) + +// DestinationRules destinationRules +// +// This is used for returning an array of DestinationRules +// +// swagger:model destinationRules +// An array of destinationRule +// swagger:allOf +type DestinationRules struct { + Permissions ResourcePermissions `json:"permissions"` + Items []DestinationRule `json:"items"` +} + +// DestinationRule destinationRule +// +// This is used for returning a DestinationRule +// +// swagger:model destinationRule +type DestinationRule struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Host interface{} `json:"host"` + TrafficPolicy interface{} `json:"trafficPolicy"` + Subsets interface{} `json:"subsets"` + } `json:"spec"` +} + +func (dRules *DestinationRules) Parse(destinationRules []kubernetes.IstioObject) { + dRules.Items = []DestinationRule{} + for _, dr := range destinationRules { + destinationRule := DestinationRule{} + destinationRule.Parse(dr) + dRules.Items = append(dRules.Items, destinationRule) + } +} + +func (dRule *DestinationRule) Parse(destinationRule kubernetes.IstioObject) { + dRule.Metadata = destinationRule.GetObjectMeta() + dRule.Spec.Host = destinationRule.GetSpec()["host"] + dRule.Spec.TrafficPolicy = destinationRule.GetSpec()["trafficPolicy"] + dRule.Spec.Subsets = destinationRule.GetSpec()["subsets"] +} + +func (dRule *DestinationRule) HasCircuitBreaker(namespace string, serviceName string, version string) bool { + if host, ok := dRule.Spec.Host.(string); ok && kubernetes.FilterByHost(host, serviceName, namespace) { + // CB is set at DR level, so it's true for the service and all versions + if isCircuitBreakerTrafficPolicy(dRule.Spec.TrafficPolicy) { + return true + } + if subsets, ok := dRule.Spec.Subsets.([]interface{}); ok { + cfg := config.Get() + for _, subsetInterface := range subsets { + if subset, ok := subsetInterface.(map[string]interface{}); ok { + if trafficPolicy, ok := subset["trafficPolicy"]; ok && isCircuitBreakerTrafficPolicy(trafficPolicy) { + // set the service true if it has a subset with a CB + if "" == version { + return true + } + if labels, ok := subset["labels"]; ok { + if dLabels, ok := labels.(map[string]interface{}); ok { + if versionValue, ok := dLabels[cfg.IstioLabels.VersionLabelName]; ok && versionValue == version { + return true + } + } + } + } + } + } + } + } + return false +} + +func isCircuitBreakerTrafficPolicy(trafficPolicy interface{}) bool { + if trafficPolicy == nil { + return false + } + if dTrafficPolicy, ok := trafficPolicy.(map[string]interface{}); ok { + if _, ok := dTrafficPolicy["connectionPool"]; ok { + return true + } + if _, ok := dTrafficPolicy["outlierDetection"]; ok { + return true + } + } + return false +} diff --git a/vendor/github.com/kiali/kiali/models/endpoint.go b/vendor/github.com/kiali/kiali/models/endpoint.go new file mode 100644 index 000000000..059f9876a --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/endpoint.go @@ -0,0 +1,26 @@ +package models + +import "k8s.io/api/core/v1" + +type Endpoints []Endpoint +type Endpoint struct { + Addresses Addresses `json:"addresses"` + Ports Ports `json:"ports"` +} + +func (endpoints *Endpoints) Parse(es *v1.Endpoints) { + if es == nil { + return + } + + for _, subset := range es.Subsets { + endpoint := Endpoint{} + endpoint.Parse(subset) + *endpoints = append(*endpoints, endpoint) + } +} + +func (endpoint *Endpoint) Parse(s v1.EndpointSubset) { + (&endpoint.Ports).ParseEndpointPorts(s.Ports) + (&endpoint.Addresses).Parse(s.Addresses) +} diff --git a/vendor/github.com/kiali/kiali/models/gateway.go b/vendor/github.com/kiali/kiali/models/gateway.go new file mode 100644 index 000000000..1cf6d36a1 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/gateway.go @@ -0,0 +1,30 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type Gateways []Gateway +type Gateway struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Servers interface{} `json:"servers"` + Selector interface{} `json:"selector"` + } `json:"spec"` +} + +func (gws *Gateways) Parse(gateways []kubernetes.IstioObject) { + for _, gw := range gateways { + gateway := Gateway{} + gateway.Parse(gw) + *gws = append(*gws, gateway) + } +} + +func (gw *Gateway) Parse(gateway kubernetes.IstioObject) { + gw.Metadata = gateway.GetObjectMeta() + gw.Spec.Servers = gateway.GetSpec()["servers"] + gw.Spec.Selector = gateway.GetSpec()["selector"] +} diff --git a/vendor/github.com/kiali/kiali/models/grafana_info.go b/vendor/github.com/kiali/kiali/models/grafana_info.go new file mode 100644 index 000000000..690ef4561 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/grafana_info.go @@ -0,0 +1,11 @@ +package models + +// GrafanaInfo provides information to access Grafana dashboards +type GrafanaInfo struct { + URL string `json:"url"` + ServiceDashboardPath string `json:"serviceDashboardPath"` + WorkloadDashboardPath string `json:"workloadDashboardPath"` + VarNamespace string `json:"varNamespace"` + VarService string `json:"varService"` + VarWorkload string `json:"varWorkload"` +} diff --git a/vendor/github.com/kiali/kiali/models/health.go b/vendor/github.com/kiali/kiali/models/health.go new file mode 100644 index 000000000..5b1176ef4 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/health.go @@ -0,0 +1,105 @@ +package models + +import ( + "github.com/prometheus/common/model" +) + +// NamespaceAppHealth is an alias of map of app name x health +type NamespaceAppHealth map[string]*AppHealth + +// NamespaceServiceHealth is an alias of map of service name x health +type NamespaceServiceHealth map[string]*ServiceHealth + +// NamespaceWorkloadHealth is an alias of map of workload name x health +type NamespaceWorkloadHealth map[string]*WorkloadHealth + +// ServiceHealth contains aggregated health from various sources, for a given service +type ServiceHealth struct { + Requests RequestHealth `json:"requests"` +} + +// AppHealth contains aggregated health from various sources, for a given app +type AppHealth struct { + WorkloadStatuses []WorkloadStatus `json:"workloadStatuses"` + Requests RequestHealth `json:"requests"` +} + +func NewEmptyRequestHealth() RequestHealth { + return RequestHealth{ErrorRatio: -1, InboundErrorRatio: -1, OutboundErrorRatio: -1} +} + +// EmptyAppHealth create an empty AppHealth +func EmptyAppHealth() AppHealth { + return AppHealth{ + WorkloadStatuses: []WorkloadStatus{}, + Requests: NewEmptyRequestHealth(), + } +} + +// EmptyServiceHealth create an empty ServiceHealth +func EmptyServiceHealth() ServiceHealth { + return ServiceHealth{ + Requests: NewEmptyRequestHealth(), + } +} + +// WorkloadHealth contains aggregated health from various sources, for a given workload +type WorkloadHealth struct { + WorkloadStatus WorkloadStatus `json:"workloadStatus"` + Requests RequestHealth `json:"requests"` +} + +// WorkloadStatus gives the available / total replicas in a deployment of a pod +type WorkloadStatus struct { + Name string `json:"name"` + Replicas int32 `json:"replicas"` + AvailableReplicas int32 `json:"available"` +} + +// RequestHealth holds several stats about recent request errors +type RequestHealth struct { + inboundErrorRate float64 + outboundErrorRate float64 + inboundRequestRate float64 + outboundRequestRate float64 + + ErrorRatio float64 `json:"errorRatio"` + InboundErrorRatio float64 `json:"inboundErrorRatio"` + OutboundErrorRatio float64 `json:"outboundErrorRatio"` +} + +// AggregateInbound adds the provided metric sample to internal inbound counters and updates error ratios +func (in *RequestHealth) AggregateInbound(sample *model.Sample) { + aggregate(sample, &in.inboundRequestRate, &in.inboundErrorRate, &in.InboundErrorRatio) + in.updateGlobalErrorRatio() +} + +// AggregateOutbound adds the provided metric sample to internal outbound counters and updates error ratios +func (in *RequestHealth) AggregateOutbound(sample *model.Sample) { + aggregate(sample, &in.outboundRequestRate, &in.outboundErrorRate, &in.OutboundErrorRatio) + in.updateGlobalErrorRatio() +} + +func (in *RequestHealth) updateGlobalErrorRatio() { + globalRequestRate := in.inboundRequestRate + in.outboundRequestRate + globalErrorRate := in.inboundErrorRate + in.outboundErrorRate + + if globalRequestRate == 0 { + in.ErrorRatio = -1 + } else { + in.ErrorRatio = globalErrorRate / globalRequestRate + } +} + +func aggregate(sample *model.Sample, requestRate, errorRate, errorRatio *float64) { + *requestRate += float64(sample.Value) + responseCode := sample.Metric["response_code"][0] + if responseCode == '5' || responseCode == '4' { + *errorRate += float64(sample.Value) + } + if *requestRate == 0 { + *errorRatio = -1 + } else { + *errorRatio = *errorRate / *requestRate + } +} diff --git a/vendor/github.com/kiali/kiali/models/istio_config.go b/vendor/github.com/kiali/kiali/models/istio_config.go new file mode 100644 index 000000000..5b40f465a --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/istio_config.go @@ -0,0 +1,57 @@ +package models + +// IstioConfigList istioConfigList +// +// This type is used for returning a response of IstioConfigList +// +// swagger:model IstioConfigList +type IstioConfigList struct { + // The namespace of istioConfiglist + // + // required: true + Namespace Namespace `json:"namespace"` + Gateways Gateways `json:"gateways"` + VirtualServices VirtualServices `json:"virtualServices"` + DestinationRules DestinationRules `json:"destinationRules"` + ServiceEntries ServiceEntries `json:"serviceEntries"` + Rules IstioRules `json:"rules"` + Adapters IstioAdapters `json:"adapters"` + Templates IstioTemplates `json:"templates"` + QuotaSpecs QuotaSpecs `json:"quotaSpecs"` + QuotaSpecBindings QuotaSpecBindings `json:"quotaSpecBindings"` + Policies Policies `json:"policies"` + MeshPolicies MeshPolicies `json:"meshPolicies"` + ClusterRbacConfigs ClusterRbacConfigs `json:"clusterRbacConfigs"` + ServiceRoles ServiceRoles `json:"serviceRoles"` + ServiceRoleBindings ServiceRoleBindings `json:"serviceRoleBindings"` + IstioValidations IstioValidations `json:"validations"` +} + +type IstioConfigDetails struct { + Namespace Namespace `json:"namespace"` + ObjectType string `json:"objectType"` + Gateway *Gateway `json:"gateway"` + VirtualService *VirtualService `json:"virtualService"` + DestinationRule *DestinationRule `json:"destinationRule"` + ServiceEntry *ServiceEntry `json:"serviceEntry"` + Rule *IstioRule `json:"rule"` + Adapter *IstioAdapter `json:"adapter"` + Template *IstioTemplate `json:"template"` + QuotaSpec *QuotaSpec `json:"quotaSpec"` + QuotaSpecBinding *QuotaSpecBinding `json:"quotaSpecBinding"` + Policy *Policy `json:"policy"` + MeshPolicy *MeshPolicy `json:"meshPolicy"` + ClusterRbacConfig *ClusterRbacConfig `json:"clusterRbacConfig"` + ServiceRole *ServiceRole `json:"serviceRole"` + ServiceRoleBinding *ServiceRoleBinding `json:"serviceRoleBinding"` + Permissions ResourcePermissions `json:"permissions"` + IstioValidation *IstioValidation `json:"validation"` +} + +// ResourcePermissions holds permission flags for an object type +// True means allowed. +type ResourcePermissions struct { + Create bool `json:"create"` + Update bool `json:"update"` + Delete bool `json:"delete"` +} diff --git a/vendor/github.com/kiali/kiali/models/istio_rule.go b/vendor/github.com/kiali/kiali/models/istio_rule.go new file mode 100644 index 000000000..6da6c9a80 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/istio_rule.go @@ -0,0 +1,128 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type IstioRuleList struct { + Namespace Namespace `json:"namespace"` + Rules []IstioRule `json:"rules"` +} + +// IstioRules istioRules +// +// This type type is used for returning an array of IstioRules +// +// swagger:model istioRules +// An array of istioRule +// swagger:allOf +type IstioRules []IstioRule + +// IstioRule istioRule +// +// This type type is used for returning a IstioRule +// +// swagger:model istioRule +type IstioRule struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Match interface{} `json:"match"` + Actions interface{} `json:"actions"` + } `json:"spec"` +} + +// IstioAdapters istioAdapters +// +// This type type is used for returning an array of IstioAdapters +// +// swagger:model istioAdapters +// An array of istioAdapter +// swagger:allOf +type IstioAdapters []IstioAdapter + +// IstioAdapter istioAdapter +// +// This type type is used for returning a IstioAdapter +// +// swagger:model istioAdapter +type IstioAdapter struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec interface{} `json:"spec"` + Adapter string `json:"adapter"` + // We need to bring the plural to use it from the UI to build the API + Adapters string `json:"adapters"` +} + +// IstioTemplates istioTemplates +// +// This type type is used for returning an array of IstioTemplates +// +// swagger:model istioTemplates +// An array of istioTemplates +// swagger:allOf +type IstioTemplates []IstioTemplate + +// IstioTemplate istioTemplate +// +// This type type is used for returning a IstioTemplate +// +// swagger:model istioTemplate +type IstioTemplate struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec interface{} `json:"spec"` + Template string `json:"template"` + // We need to bring the plural to use it from the UI to build the API + Templates string `json:"templates"` +} + +func CastIstioRulesCollection(rules []kubernetes.IstioObject) IstioRules { + istioRules := make([]IstioRule, len(rules)) + for i, rule := range rules { + istioRules[i] = CastIstioRule(rule) + } + return istioRules +} + +func CastIstioRule(rule kubernetes.IstioObject) IstioRule { + istioRule := IstioRule{} + istioRule.Metadata = rule.GetObjectMeta() + istioRule.Spec.Match = rule.GetSpec()["match"] + istioRule.Spec.Actions = rule.GetSpec()["actions"] + return istioRule +} + +func CastIstioAdaptersCollection(adapters []kubernetes.IstioObject) IstioAdapters { + istioAdapters := make([]IstioAdapter, len(adapters)) + for i, adapter := range adapters { + istioAdapters[i] = CastIstioAdapter(adapter) + } + return istioAdapters +} + +func CastIstioAdapter(adapter kubernetes.IstioObject) IstioAdapter { + istioAdapter := IstioAdapter{} + istioAdapter.Metadata = adapter.GetObjectMeta() + istioAdapter.Spec = adapter.GetSpec() + istioAdapter.Adapter = adapter.GetObjectMeta().Labels["adapter"] + istioAdapter.Adapters = adapter.GetObjectMeta().Labels["adapters"] + return istioAdapter +} + +func CastIstioTemplatesCollection(templates []kubernetes.IstioObject) IstioTemplates { + istioTemplates := make([]IstioTemplate, len(templates)) + for i, template := range templates { + istioTemplates[i] = CastIstioTemplate(template) + } + return istioTemplates +} + +func CastIstioTemplate(template kubernetes.IstioObject) IstioTemplate { + istioTemplate := IstioTemplate{} + istioTemplate.Metadata = template.GetObjectMeta() + istioTemplate.Spec = template.GetSpec() + istioTemplate.Template = template.GetObjectMeta().Labels["template"] + istioTemplate.Templates = template.GetObjectMeta().Labels["templates"] + return istioTemplate +} diff --git a/vendor/github.com/kiali/kiali/models/istio_validation.go b/vendor/github.com/kiali/kiali/models/istio_validation.go new file mode 100644 index 000000000..d0eb3a544 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/istio_validation.go @@ -0,0 +1,208 @@ +package models + +import ( + "encoding/json" +) + +// NamespaceValidations represents a set of IstioValidations grouped by namespace +type NamespaceValidations map[string]IstioValidations + +// IstioValidationKey is the key value composed of an Istio ObjectType and Name. +type IstioValidationKey struct { + ObjectType string + Name string +} + +// IstioValidations represents a set of IstioValidation grouped by IstioValidationKey. +type IstioValidations map[IstioValidationKey]*IstioValidation + +// IstioValidation represents a list of checks associated to an Istio object. +// swagger:model +type IstioValidation struct { + // Name of the object itself + // required: true + // example: reviews + Name string `json:"name"` + + // Type of the object + // required: true + // example: virtualservice + ObjectType string `json:"objectType"` + + // Represents validity of the object: in case of warning, validity remains as true + // required: true + // example: false + Valid bool `json:"valid"` + + // Array of checks. It might be empty. + Checks []*IstioCheck `json:"checks"` +} + +// IstioCheck represents an individual check. +// swagger:model +type IstioCheck struct { + // Description of the check + // required: true + // example: Weight sum should be 100 + Message string `json:"message"` + + // Indicates the level of importance: error or warning + // required: true + // example: error + Severity SeverityLevel `json:"severity"` + + // String that describes where in the yaml file is the check located + // example: spec/http[0]/route + Path string `json:"path"` +} + +type SeverityLevel string + +const ( + ErrorSeverity SeverityLevel = "error" + WarningSeverity SeverityLevel = "warning" +) + +var ObjectTypeSingular = map[string]string{ + "gateways": "gateway", + "virtualservices": "virtualservice", + "destinationrules": "destinationrule", + "serviceentries": "serviceentry", + "rules": "rule", + "quotaspecs": "quotaspec", + "quotaspecbindings": "quotaspecbinding", +} + +var checkDescriptors = map[string]IstioCheck{ + "destinationrules.multimatch": { + Message: "More than one DestinationRules for the same host subset combination", + Severity: WarningSeverity, + }, + "destinationrules.nodest.matchingworkload": { + Message: "This host has no matching workloads", + Severity: ErrorSeverity, + }, + "destinationrules.nodest.subsetlabels": { + Message: "This subset's labels are not found in any matching host", + Severity: ErrorSeverity, + }, + "destinationrules.trafficpolicy.notlssettings": { + Message: "mTLS settings of a non-local Destination Rule are overridden", + Severity: WarningSeverity, + }, + "gateways.multimatch": { + Message: "More than one Gateway for the same host port combination", + Severity: WarningSeverity, + }, + "port.name.mismatch": { + Message: "Port name must follow [-suffix] form", + Severity: ErrorSeverity, + }, + "virtualservices.nogateway": { + Message: "VirtualService is pointing to a non-existent gateway", + Severity: ErrorSeverity, + }, + "virtualservices.nohost.hostnotfound": { + Message: "DestinationWeight on route doesn't have a valid service (host not found)", + Severity: ErrorSeverity, + }, + "virtualservices.nohost.invalidprotocol": { + Message: "VirtualService doesn't define any valid route protocol", + Severity: ErrorSeverity, + }, + "virtualservices.route.numericweight": { + Message: "Weight must be a number", + Severity: ErrorSeverity, + }, + "virtualservices.route.weightrange": { + Message: "Weight should be between 0 and 100", + Severity: ErrorSeverity, + }, + "virtualservices.route.weightsum": { + Message: "Weight sum should be 100", + Severity: ErrorSeverity, + }, + "virtualservices.route.allweightspresent": { + Message: "All routes should have weight", + Severity: WarningSeverity, + }, + "virtualservices.singlehost": { + Message: "More than one Virtual Service for same host", + Severity: WarningSeverity, + }, + "virtualservices.subsetpresent.destinationmandatory": { + Message: "Destination field is mandatory", + Severity: ErrorSeverity, + }, + "virtualservices.subsetpresent.subsetnotfound": { + Message: "Subset not found", + Severity: WarningSeverity, + }, +} + +func Build(checkId string, path string) IstioCheck { + check := checkDescriptors[checkId] + check.Path = path + return check +} + +func BuildKey(objectType, name string) IstioValidationKey { + return IstioValidationKey{ObjectType: objectType, Name: name} +} + +func CheckMessage(checkId string) string { + return checkDescriptors[checkId].Message +} + +func (iv IstioValidations) FilterByKey(objectType, name string) IstioValidations { + fiv := IstioValidations{} + for k, v := range iv { + if k.Name == name && k.ObjectType == objectType { + fiv[k] = v + } + } + + return fiv +} + +// FilterByTypes takes an input as ObjectTypes, transforms to singular types and filters the validations +func (iv IstioValidations) FilterByTypes(objectTypes []string) IstioValidations { + types := make(map[string]bool, len(objectTypes)) + for _, objectType := range objectTypes { + types[ObjectTypeSingular[objectType]] = true + } + fiv := IstioValidations{} + for k, v := range iv { + if _, found := types[k.ObjectType]; found { + fiv[k] = v + } + } + + return fiv +} + +func (iv IstioValidations) MergeValidations(validations IstioValidations) IstioValidations { + for key, validation := range validations { + v, ok := iv[key] + if !ok { + iv[key] = validation + } else { + v.Checks = append(v.Checks, validation.Checks...) + v.Valid = v.Valid && validation.Valid + } + } + return iv +} + +// MarshalJSON implements the json.Marshaler interface. +func (iv IstioValidations) MarshalJSON() ([]byte, error) { + out := make(map[string]map[string]*IstioValidation) + for k, v := range iv { + _, ok := out[k.ObjectType] + if !ok { + out[k.ObjectType] = make(map[string]*IstioValidation) + } + out[k.ObjectType][k.Name] = v + } + return json.Marshal(out) +} diff --git a/vendor/github.com/kiali/kiali/models/jaeger_info.go b/vendor/github.com/kiali/kiali/models/jaeger_info.go new file mode 100644 index 000000000..78b43d2de --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/jaeger_info.go @@ -0,0 +1,6 @@ +package models + +// JaegerInfo provides information to access Jaeger UI +type JaegerInfo struct { + URL string `json:"url"` +} diff --git a/vendor/github.com/kiali/kiali/models/mesh_policy.go b/vendor/github.com/kiali/kiali/models/mesh_policy.go new file mode 100644 index 000000000..e3bbdcd1a --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/mesh_policy.go @@ -0,0 +1,38 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type MeshPolicies []MeshPolicy +type MeshPolicy struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Targets interface{} `json:"targets"` + Peers interface{} `json:"peers"` + PeerIsOptional interface{} `json:"peerIsOptional"` + Origins interface{} `json:"origins"` + OriginIsOptional interface{} `json:"originIsOptional"` + PrincipalBinding interface{} `json:"principalBinding"` + } `json:"spec"` +} + +func (mps *MeshPolicies) Parse(meshPolicies []kubernetes.IstioObject) { + for _, qs := range meshPolicies { + meshPolicy := MeshPolicy{} + meshPolicy.Parse(qs) + *mps = append(*mps, meshPolicy) + } +} + +func (mp *MeshPolicy) Parse(meshPolicy kubernetes.IstioObject) { + mp.Metadata = meshPolicy.GetObjectMeta() + mp.Spec.Targets = meshPolicy.GetSpec()["targets"] + mp.Spec.Peers = meshPolicy.GetSpec()["peers"] + mp.Spec.PeerIsOptional = meshPolicy.GetSpec()["peersIsOptional"] + mp.Spec.Origins = meshPolicy.GetSpec()["origins"] + mp.Spec.OriginIsOptional = meshPolicy.GetSpec()["originIsOptinal"] + mp.Spec.PrincipalBinding = meshPolicy.GetSpec()["principalBinding"] +} diff --git a/vendor/github.com/kiali/kiali/models/namespace.go b/vendor/github.com/kiali/kiali/models/namespace.go new file mode 100644 index 000000000..771311820 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/namespace.go @@ -0,0 +1,61 @@ +package models + +import ( + "k8s.io/api/core/v1" + "time" + + osv1 "github.com/openshift/api/project/v1" +) + +// A Namespace provide a scope for names +// This type is used to describe a set of objects. +// +// swagger:model namespace +type Namespace struct { + // The id of the namespace. + // + // example: istio-system + // required: true + Name string `json:"name"` + + // Creation date of the namespace. + // There is no need to export this through the API. So, this is + // set to be ignored by JSON package. + // + // required: true + CreationTimestamp time.Time `json:"-"` +} + +func CastNamespaceCollection(ns []v1.Namespace) []Namespace { + namespaces := make([]Namespace, len(ns)) + for i, item := range ns { + namespaces[i] = CastNamespace(item) + } + + return namespaces +} + +func CastNamespace(ns v1.Namespace) Namespace { + namespace := Namespace{} + namespace.Name = ns.Name + namespace.CreationTimestamp = ns.CreationTimestamp.Time + + return namespace +} + +func CastProjectCollection(ps []osv1.Project) []Namespace { + namespaces := make([]Namespace, len(ps)) + for i, project := range ps { + namespaces[i] = CastProject(project) + } + + return namespaces +} + +func CastProject(p osv1.Project) Namespace { + namespace := Namespace{} + namespace.Name = p.Name + namespace.CreationTimestamp = p.CreationTimestamp.Time + + return namespace +} diff --git a/vendor/github.com/kiali/kiali/models/pod.go b/vendor/github.com/kiali/kiali/models/pod.go new file mode 100644 index 000000000..8524c2c44 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/pod.go @@ -0,0 +1,123 @@ +package models + +import ( + "encoding/json" + "strings" + + "k8s.io/api/core/v1" + + "github.com/kiali/kiali/config" +) + +// Pods alias for list of Pod structs +type Pods []*Pod + +// Pod holds a subset of v1.Pod data that is meaningful in Kiali +type Pod struct { + Name string `json:"name"` + Labels map[string]string `json:"labels"` + CreatedAt string `json:"createdAt"` + CreatedBy []Reference `json:"createdBy"` + IstioContainers []*ContainerInfo `json:"istioContainers"` + IstioInitContainers []*ContainerInfo `json:"istioInitContainers"` + Status string `json:"status"` + AppLabel bool `json:"appLabel"` + VersionLabel bool `json:"versionLabel"` + RuntimesAnnotation []string `json:"runtimesAnnotation"` +} + +// Reference holds some information on the pod creator +type Reference struct { + Name string `json:"name"` + Kind string `json:"kind"` +} + +// ContainerInfo holds container name and image +type ContainerInfo struct { + Name string `json:"name"` + Image string `json:"image"` +} + +// ParseDeployment extracts desired information from k8s []Pod info +func (pods *Pods) Parse(list []v1.Pod) { + if list == nil { + return + } + + for _, pod := range list { + casted := Pod{} + casted.Parse(&pod) + *pods = append(*pods, &casted) + } +} + +// Below types are used for unmarshalling json +type createdBy struct { + Reference Reference `json:"reference"` +} +type sideCarStatus struct { + Containers []string `json:"containers"` + InitContainers []string `json:"initContainers"` +} + +// ParseDeployment extracts desired information from k8s Pod info +func (pod *Pod) Parse(p *v1.Pod) { + pod.Name = p.Name + pod.Labels = p.Labels + pod.CreatedAt = formatTime(p.CreationTimestamp.Time) + for _, ref := range p.OwnerReferences { + pod.CreatedBy = append(pod.CreatedBy, Reference{ + Name: ref.Name, + Kind: ref.Kind, + }) + } + conf := config.Get() + // ParseDeployment some annotations + if jSon, ok := p.Annotations[conf.ExternalServices.Istio.IstioSidecarAnnotation]; ok { + var scs sideCarStatus + err := json.Unmarshal([]byte(jSon), &scs) + if err == nil { + for _, name := range scs.InitContainers { + container := ContainerInfo{ + Name: name, + Image: lookupImage(name, p.Spec.InitContainers)} + pod.IstioInitContainers = append(pod.IstioInitContainers, &container) + } + for _, name := range scs.Containers { + container := ContainerInfo{ + Name: name, + Image: lookupImage(name, p.Spec.Containers)} + pod.IstioContainers = append(pod.IstioContainers, &container) + } + } + } + // Check for custom dashboards annotation + if rawRuntimes, ok := p.Annotations["kiali.io/runtimes"]; ok { + pod.RuntimesAnnotation = strings.Split(strings.TrimSpace(rawRuntimes), ",") + } + pod.Status = string(p.Status.Phase) + _, pod.AppLabel = p.Labels[conf.IstioLabels.AppLabelName] + _, pod.VersionLabel = p.Labels[conf.IstioLabels.VersionLabelName] +} + +func lookupImage(containerName string, containers []v1.Container) string { + for _, c := range containers { + if c.Name == containerName { + return c.Image + } + } + return "" +} + +func (pods Pods) HasIstioSideCar() bool { + for _, pod := range pods { + if pod.HasIstioSideCar() { + return true + } + } + return false +} + +func (pod Pod) HasIstioSideCar() bool { + return len(pod.IstioContainers) > 0 +} diff --git a/vendor/github.com/kiali/kiali/models/policy.go b/vendor/github.com/kiali/kiali/models/policy.go new file mode 100644 index 000000000..7b8a8f288 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/policy.go @@ -0,0 +1,38 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type Policies []Policy +type Policy struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Targets interface{} `json:"targets"` + Peers interface{} `json:"peers"` + PeerIsOptional interface{} `json:"peerIsOptional"` + Origins interface{} `json:"origins"` + OriginIsOptional interface{} `json:"originIsOptional"` + PrincipalBinding interface{} `json:"principalBinding"` + } `json:"spec"` +} + +func (ps *Policies) Parse(policies []kubernetes.IstioObject) { + for _, qs := range policies { + policy := Policy{} + policy.Parse(qs) + *ps = append(*ps, policy) + } +} + +func (p *Policy) Parse(policy kubernetes.IstioObject) { + p.Metadata = policy.GetObjectMeta() + p.Spec.Targets = policy.GetSpec()["targets"] + p.Spec.Peers = policy.GetSpec()["peers"] + p.Spec.PeerIsOptional = policy.GetSpec()["peersIsOptional"] + p.Spec.Origins = policy.GetSpec()["origins"] + p.Spec.OriginIsOptional = policy.GetSpec()["originIsOptinal"] + p.Spec.PrincipalBinding = policy.GetSpec()["principalBinding"] +} diff --git a/vendor/github.com/kiali/kiali/models/port.go b/vendor/github.com/kiali/kiali/models/port.go new file mode 100644 index 000000000..b0e111cd4 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/port.go @@ -0,0 +1,38 @@ +package models + +import "k8s.io/api/core/v1" + +type Ports []Port +type Port struct { + Name string `json:"name"` + Protocol string `json:"protocol"` + Port int32 `json:"port"` +} + +func (ports *Ports) Parse(ps []v1.ServicePort) { + for _, servicePort := range ps { + port := Port{} + port.Parse(servicePort) + *ports = append(*ports, port) + } +} + +func (port *Port) Parse(p v1.ServicePort) { + port.Name = p.Name + port.Protocol = string(p.Protocol) + port.Port = p.Port +} + +func (ports *Ports) ParseEndpointPorts(ps []v1.EndpointPort) { + for _, endpointPort := range ps { + port := Port{} + port.ParseEndpointPort(endpointPort) + *ports = append(*ports, port) + } +} + +func (port *Port) ParseEndpointPort(p v1.EndpointPort) { + port.Name = p.Name + port.Protocol = string(p.Protocol) + port.Port = p.Port +} diff --git a/vendor/github.com/kiali/kiali/models/quota_spec.go b/vendor/github.com/kiali/kiali/models/quota_spec.go new file mode 100644 index 000000000..4594cd5f4 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/quota_spec.go @@ -0,0 +1,28 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type QuotaSpecs []QuotaSpec +type QuotaSpec struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Rules interface{} `json:"rules"` + } `json:"spec"` +} + +func (qss *QuotaSpecs) Parse(quotaSpecs []kubernetes.IstioObject) { + for _, qs := range quotaSpecs { + quotaSpec := QuotaSpec{} + quotaSpec.Parse(qs) + *qss = append(*qss, quotaSpec) + } +} + +func (qs *QuotaSpec) Parse(quotaSpec kubernetes.IstioObject) { + qs.Metadata = quotaSpec.GetObjectMeta() + qs.Spec.Rules = quotaSpec.GetSpec()["rules"] +} diff --git a/vendor/github.com/kiali/kiali/models/quota_spec_binding.go b/vendor/github.com/kiali/kiali/models/quota_spec_binding.go new file mode 100644 index 000000000..dd666299e --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/quota_spec_binding.go @@ -0,0 +1,30 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type QuotaSpecBindings []QuotaSpecBinding +type QuotaSpecBinding struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + QuotaSpecs interface{} `json:"quotaSpecs"` + Services interface{} `json:"services"` + } `json:"spec"` +} + +func (qsbs *QuotaSpecBindings) Parse(quotaSpecBindings []kubernetes.IstioObject) { + for _, qsb := range quotaSpecBindings { + quotaSpecBinding := QuotaSpecBinding{} + quotaSpecBinding.Parse(qsb) + *qsbs = append(*qsbs, quotaSpecBinding) + } +} + +func (qsb *QuotaSpecBinding) Parse(quotaSpecBinding kubernetes.IstioObject) { + qsb.Metadata = quotaSpecBinding.GetObjectMeta() + qsb.Spec.QuotaSpecs = quotaSpecBinding.GetSpec()["quotaSpecs"] + qsb.Spec.Services = quotaSpecBinding.GetSpec()["services"] +} diff --git a/vendor/github.com/kiali/kiali/models/service.go b/vendor/github.com/kiali/kiali/models/service.go new file mode 100644 index 000000000..cb362509a --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/service.go @@ -0,0 +1,125 @@ +package models + +import ( + v1 "k8s.io/api/core/v1" + + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/prometheus" +) + +type ServiceOverview struct { + // Name of the Service + // required: true + // example: reviews-v1 + Name string `json:"name"` + // Define if Pods related to this Service has an IstioSidecar deployed + // required: true + // example: true + IstioSidecar bool `json:"istioSidecar"` + // Has label app + // required: true + // example: true + AppLabel bool `json:"appLabel"` +} + +type ServiceList struct { + Namespace Namespace `json:"namespace"` + Services []ServiceOverview `json:"services"` +} + +type ServiceDetails struct { + Service Service `json:"service"` + IstioSidecar bool `json:"istioSidecar"` + Endpoints Endpoints `json:"endpoints"` + VirtualServices VirtualServices `json:"virtualServices"` + DestinationRules DestinationRules `json:"destinationRules"` + Dependencies map[string][]SourceWorkload `json:"dependencies"` + Workloads WorkloadOverviews `json:"workloads"` + Health ServiceHealth `json:"health"` + Validations IstioValidations `json:"validations"` + ErrorTraces int `json:"errorTraces"` +} + +type Services []*Service +type Service struct { + Name string `json:"name"` + CreatedAt string `json:"createdAt"` + ResourceVersion string `json:"resourceVersion"` + Namespace Namespace `json:"namespace"` + Labels map[string]string `json:"labels"` + Type string `json:"type"` + Ip string `json:"ip"` + Ports Ports `json:"ports"` +} + +// SourceWorkload holds workload identifiers used for service dependencies +type SourceWorkload struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} + +func (ss *Services) Parse(services []v1.Service) { + if ss == nil { + return + } + + for _, item := range services { + service := &Service{} + service.Parse(&item) + *ss = append(*ss, service) + } +} + +func (s *Service) Parse(service *v1.Service) { + if service != nil { + s.Name = service.Name + s.Namespace = Namespace{Name: service.Namespace} + s.Labels = service.Labels + s.Type = string(service.Spec.Type) + s.Ip = service.Spec.ClusterIP + s.CreatedAt = formatTime(service.CreationTimestamp.Time) + s.ResourceVersion = service.ResourceVersion + (&s.Ports).Parse(service.Spec.Ports) + } +} + +func (s *ServiceDetails) SetService(svc *v1.Service) { + s.Service.Parse(svc) +} + +func (s *ServiceDetails) SetEndpoints(eps *v1.Endpoints) { + (&s.Endpoints).Parse(eps) +} + +func (s *ServiceDetails) SetPods(pods []v1.Pod) { + mPods := Pods{} + mPods.Parse(pods) + s.IstioSidecar = mPods.HasIstioSideCar() +} + +func (s *ServiceDetails) SetVirtualServices(vs []kubernetes.IstioObject, canCreate, canUpdate, canDelete bool) { + s.VirtualServices.Permissions = ResourcePermissions{Create: canCreate, Update: canUpdate, Delete: canDelete} + (&s.VirtualServices).Parse(vs) +} + +func (s *ServiceDetails) SetDestinationRules(dr []kubernetes.IstioObject, canCreate, canUpdate, canDelete bool) { + s.DestinationRules.Permissions = ResourcePermissions{Create: canCreate, Update: canUpdate, Delete: canDelete} + (&s.DestinationRules).Parse(dr) +} + +func (s *ServiceDetails) SetErrorTraces(errorTraces int) { + s.ErrorTraces = errorTraces +} + +func (s *ServiceDetails) SetSourceWorkloads(sw map[string][]prometheus.Workload) { + // Transform dependencies for UI + s.Dependencies = make(map[string][]SourceWorkload) + for version, workloads := range sw { + for _, workload := range workloads { + s.Dependencies[version] = append(s.Dependencies[version], SourceWorkload{ + Name: workload.Workload, + Namespace: workload.Namespace, + }) + } + } +} diff --git a/vendor/github.com/kiali/kiali/models/service_entry.go b/vendor/github.com/kiali/kiali/models/service_entry.go new file mode 100644 index 000000000..15a423682 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/service_entry.go @@ -0,0 +1,38 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type ServiceEntries []ServiceEntry +type ServiceEntry struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Hosts interface{} `json:"hosts"` + Addresses interface{} `json:"addresses"` + Ports interface{} `json:"ports"` + Location interface{} `json:"location"` + Resolution interface{} `json:"resolution"` + Endpoints interface{} `json:"endpoints"` + } `json:"spec"` +} + +func (ses *ServiceEntries) Parse(serviceEntries []kubernetes.IstioObject) { + for _, se := range serviceEntries { + serviceEntry := ServiceEntry{} + serviceEntry.Parse(se) + *ses = append(*ses, serviceEntry) + } +} + +func (se *ServiceEntry) Parse(serviceEntry kubernetes.IstioObject) { + se.Metadata = serviceEntry.GetObjectMeta() + se.Spec.Hosts = serviceEntry.GetSpec()["hosts"] + se.Spec.Addresses = serviceEntry.GetSpec()["addresses"] + se.Spec.Ports = serviceEntry.GetSpec()["ports"] + se.Spec.Location = serviceEntry.GetSpec()["location"] + se.Spec.Resolution = serviceEntry.GetSpec()["resolution"] + se.Spec.Endpoints = serviceEntry.GetSpec()["endpoints"] +} diff --git a/vendor/github.com/kiali/kiali/models/service_role.go b/vendor/github.com/kiali/kiali/models/service_role.go new file mode 100644 index 000000000..a898df98c --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/service_role.go @@ -0,0 +1,28 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type ServiceRoles []ServiceRole +type ServiceRole struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Rules interface{} `json:"rules"` + } `json:"spec"` +} + +func (srs *ServiceRoles) Parse(serviceRoles []kubernetes.IstioObject) { + for _, sr := range serviceRoles { + serviceRole := ServiceRole{} + serviceRole.Parse(sr) + *srs = append(*srs, serviceRole) + } +} + +func (sr *ServiceRole) Parse(serviceRole kubernetes.IstioObject) { + sr.Metadata = serviceRole.GetObjectMeta() + sr.Spec.Rules = serviceRole.GetSpec()["rules"] +} diff --git a/vendor/github.com/kiali/kiali/models/service_role_binding.go b/vendor/github.com/kiali/kiali/models/service_role_binding.go new file mode 100644 index 000000000..4364eb380 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/service_role_binding.go @@ -0,0 +1,30 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +type ServiceRoleBindings []ServiceRoleBinding +type ServiceRoleBinding struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Subjects interface{} `json:"subjects"` + RoleRef interface{} `json:"roleRef"` + } `json:"spec"` +} + +func (srbs *ServiceRoleBindings) Parse(serviceRoleBindings []kubernetes.IstioObject) { + for _, srb := range serviceRoleBindings { + serviceRoleBinding := ServiceRoleBinding{} + serviceRoleBinding.Parse(srb) + *srbs = append(*srbs, serviceRoleBinding) + } +} + +func (srb *ServiceRoleBinding) Parse(serviceRoleBinding kubernetes.IstioObject) { + srb.Metadata = serviceRoleBinding.GetObjectMeta() + srb.Spec.Subjects = serviceRoleBinding.GetSpec()["subjects"] + srb.Spec.RoleRef = serviceRoleBinding.GetSpec()["roleRef"] +} diff --git a/vendor/github.com/kiali/kiali/models/util.go b/vendor/github.com/kiali/kiali/models/util.go new file mode 100644 index 000000000..a7cc5b9d1 --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/util.go @@ -0,0 +1,7 @@ +package models + +import "time" + +func formatTime(t time.Time) string { + return t.Format(time.RFC3339) +} diff --git a/vendor/github.com/kiali/kiali/models/virtual_service.go b/vendor/github.com/kiali/kiali/models/virtual_service.go new file mode 100644 index 000000000..372119dbc --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/virtual_service.go @@ -0,0 +1,68 @@ +package models + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kiali/kiali/kubernetes" +) + +// VirtualServices virtualServices +// +// This type is used for returning an array of VirtualServices with some permission flags +// +// swagger:model virtualServices +// An array of virtualService +// swagger:allOf +type VirtualServices struct { + Permissions ResourcePermissions `json:"permissions"` + Items []VirtualService `json:"items"` +} + +// VirtualService virtualService +// +// This type is used for returning a VirtualService +// +// swagger:model virtualService +type VirtualService struct { + Metadata meta_v1.ObjectMeta `json:"metadata"` + Spec struct { + Hosts interface{} `json:"hosts"` + Gateways interface{} `json:"gateways"` + Http interface{} `json:"http"` + Tcp interface{} `json:"tcp"` + Tls interface{} `json:"tls"` + } `json:"spec"` +} + +func (vServices *VirtualServices) Parse(virtualServices []kubernetes.IstioObject) { + vServices.Items = []VirtualService{} + for _, vs := range virtualServices { + virtualService := VirtualService{} + virtualService.Parse(vs) + vServices.Items = append(vServices.Items, virtualService) + } +} + +func (vService *VirtualService) Parse(virtualService kubernetes.IstioObject) { + vService.Metadata = virtualService.GetObjectMeta() + vService.Spec.Hosts = virtualService.GetSpec()["hosts"] + vService.Spec.Gateways = virtualService.GetSpec()["gateways"] + vService.Spec.Http = virtualService.GetSpec()["http"] + vService.Spec.Tcp = virtualService.GetSpec()["tcp"] + vService.Spec.Tls = virtualService.GetSpec()["tls"] +} + +// IsValidHost returns true if VirtualService hosts applies to the service +func (vService *VirtualService) IsValidHost(namespace string, serviceName string) bool { + if serviceName == "" { + return false + } + if hosts, ok := vService.Spec.Hosts.([]interface{}); ok { + for _, host := range hosts { + if kubernetes.FilterByHost(host.(string), serviceName, namespace) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/kiali/kiali/models/workload.go b/vendor/github.com/kiali/kiali/models/workload.go new file mode 100644 index 000000000..061f3816c --- /dev/null +++ b/vendor/github.com/kiali/kiali/models/workload.go @@ -0,0 +1,373 @@ +package models + +import ( + osappsv1 "github.com/openshift/api/apps/v1" + "k8s.io/api/apps/v1beta1" + "k8s.io/api/apps/v1beta2" + batch_v1 "k8s.io/api/batch/v1" + batch_v1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/api/core/v1" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/prometheus" +) + +type WorkloadList struct { + // Namespace where the workloads live in + // required: true + // example: bookinfo + Namespace Namespace `json:"namespace"` + + // Workloads for a given namespace + // required: true + Workloads []WorkloadListItem `json:"workloads"` +} + +// WorkloadListItem has the necessary information to display the console workload list +type WorkloadListItem struct { + // Name of the workload + // required: true + // example: reviews-v1 + Name string `json:"name"` + + // Type of the workload + // required: true + // example: deployment + Type string `json:"type"` + + // Creation timestamp (in RFC3339 format) + // required: true + // example: 2018-07-31T12:24:17Z + CreatedAt string `json:"createdAt"` + + // Kubernetes ResourceVersion + // required: true + // example: 192892127 + ResourceVersion string `json:"resourceVersion"` + + // Define if Pods related to this Workload has an IstioSidecar deployed + // required: true + // example: true + IstioSidecar bool `json:"istioSidecar"` + + // Workload labels + Labels map[string]string `json:"labels"` + + // Define if Pods related to this Workload has the label App + // required: true + // example: true + AppLabel bool `json:"appLabel"` + + // Define if Pods related to this Workload has the label Version + // required: true + // example: true + VersionLabel bool `json:"versionLabel"` + + // Number of current workload pods + // required: true + // example: 1 + PodCount int `json:"podCount"` +} + +type WorkloadOverviews []*WorkloadListItem + +// Workload has the details of a workload +type Workload struct { + WorkloadListItem + + // Number of desired replicas + // required: true + // example: 2 + Replicas int32 `json:"replicas"` + + // Number of available replicas + // required: true + // example: 1 + AvailableReplicas int32 `json:"availableReplicas"` + + // Number of unavailable replicas + // required: true + // example: 1 + UnavailableReplicas int32 `json:"unavailableReplicas"` + + // Pods bound to the workload + Pods Pods `json:"pods"` + + // Services that match workload selector + Services Services `json:"services"` + + DestinationServices []DestinationService `json:"destinationServices"` + + // Runtimes and associated dashboards + Runtimes []Runtime `json:"runtimes"` +} + +// DestinationService holds service identifiers used for workload dependencies +type DestinationService struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} + +type Workloads []*Workload + +func (workload *WorkloadListItem) ParseWorkload(w *Workload) { + conf := config.Get() + workload.Name = w.Name + workload.Type = w.Type + workload.CreatedAt = w.CreatedAt + workload.ResourceVersion = w.ResourceVersion + workload.IstioSidecar = w.Pods.HasIstioSideCar() + workload.Labels = w.Labels + workload.PodCount = len(w.Pods) + + /** Check the labels app and version required by Istio in template Pods*/ + _, workload.AppLabel = w.Labels[conf.IstioLabels.AppLabelName] + _, workload.VersionLabel = w.Labels[conf.IstioLabels.VersionLabelName] +} + +func (workload *Workload) ParseDeployment(d *v1beta1.Deployment) { + conf := config.Get() + workload.Name = d.Name + workload.Type = "Deployment" + workload.Labels = d.Spec.Template.Labels + + /** Check the labels app and version required by Istio in template Pods*/ + _, workload.AppLabel = workload.Labels[conf.IstioLabels.AppLabelName] + _, workload.VersionLabel = workload.Labels[conf.IstioLabels.VersionLabelName] + + workload.CreatedAt = formatTime(d.CreationTimestamp.Time) + workload.ResourceVersion = d.ResourceVersion + workload.Replicas = d.Status.Replicas + workload.AvailableReplicas = d.Status.AvailableReplicas + // Deployments/ReplicaSets have a different parameters to indicate unavailable + // calculate "desired" - "current" sounds reasonable on this context + workload.UnavailableReplicas = workload.Replicas - workload.AvailableReplicas +} + +func (workload *Workload) ParseReplicaSet(r *v1beta2.ReplicaSet) { + conf := config.Get() + workload.Name = r.Name + workload.Type = "ReplicaSet" + workload.Labels = r.Spec.Template.Labels + + /** Check the labels app and version required by Istio in template Pods*/ + _, workload.AppLabel = workload.Labels[conf.IstioLabels.AppLabelName] + _, workload.VersionLabel = workload.Labels[conf.IstioLabels.VersionLabelName] + + workload.CreatedAt = formatTime(r.CreationTimestamp.Time) + workload.ResourceVersion = r.ResourceVersion + workload.Replicas = r.Status.Replicas + workload.AvailableReplicas = r.Status.AvailableReplicas + // Deployments/ReplicaSets have a different parameters to indicate unavailable + // calculate "desired" - "current" sounds reasonable on this context + workload.UnavailableReplicas = workload.Replicas - workload.AvailableReplicas +} + +func (workload *Workload) ParseReplicationController(r *v1.ReplicationController) { + conf := config.Get() + workload.Name = r.Name + workload.Type = "ReplicationController" + workload.Labels = r.Spec.Template.Labels + + /** Check the labels app and version required by Istio in template Pods*/ + _, workload.AppLabel = workload.Labels[conf.IstioLabels.AppLabelName] + _, workload.VersionLabel = workload.Labels[conf.IstioLabels.VersionLabelName] + + workload.CreatedAt = formatTime(r.CreationTimestamp.Time) + workload.ResourceVersion = r.ResourceVersion + workload.Replicas = r.Status.Replicas + workload.AvailableReplicas = r.Status.AvailableReplicas + // Deployments/ReplicaSets have a different parameters to indicate unavailable + // calculate "desired" - "current" sounds reasonable on this context + workload.UnavailableReplicas = workload.Replicas - workload.AvailableReplicas +} + +func (workload *Workload) ParseDeploymentConfig(dc *osappsv1.DeploymentConfig) { + workload.Name = dc.Name + workload.Type = "DeploymentConfig" + workload.Labels = dc.Spec.Template.Labels + workload.CreatedAt = formatTime(dc.CreationTimestamp.Time) + workload.ResourceVersion = dc.ResourceVersion + workload.Replicas = dc.Status.Replicas + workload.AvailableReplicas = dc.Status.AvailableReplicas + // Deployments/ReplicaSets have a different parameters to indicate unavailable + // calculate "desired" - "current" sounds reasonable on this context + workload.UnavailableReplicas = workload.Replicas - workload.AvailableReplicas +} + +func (workload *Workload) ParseStatefulSet(s *v1beta2.StatefulSet) { + conf := config.Get() + workload.Name = s.Name + workload.Type = "StatefulSet" + workload.Labels = s.Spec.Template.Labels + + /** Check the labels app and version required by Istio in template Pods*/ + _, workload.AppLabel = workload.Labels[conf.IstioLabels.AppLabelName] + _, workload.VersionLabel = workload.Labels[conf.IstioLabels.VersionLabelName] + + workload.CreatedAt = formatTime(s.CreationTimestamp.Time) + workload.ResourceVersion = s.ResourceVersion + workload.Replicas = s.Status.Replicas + workload.AvailableReplicas = s.Status.ReadyReplicas + // Deployments/ReplicaSets have a different parameters to indicate unavailable + // calculate "desired" - "current" sounds reasonable on this context + workload.UnavailableReplicas = workload.Replicas - workload.AvailableReplicas +} + +func (workload *Workload) ParsePod(pod *v1.Pod) { + conf := config.Get() + workload.Name = pod.Name + workload.Type = "Pod" + workload.Labels = pod.Labels + + /** Check the labels app and version required by Istio in template Pods*/ + _, workload.AppLabel = workload.Labels[conf.IstioLabels.AppLabelName] + _, workload.VersionLabel = workload.Labels[conf.IstioLabels.VersionLabelName] + + workload.CreatedAt = formatTime(pod.CreationTimestamp.Time) + workload.ResourceVersion = pod.ResourceVersion + + var podReplicas, podAvailableReplicas int32 + podReplicas = 1 + podAvailableReplicas = 1 + + // When a Workload is a single pod we don't have access to any controller replicas + // On this case we differentiate when pod is terminated with success versus not running + // Probably it might be more cases to refine here + if pod.Status.Phase == "Succeed" { + podReplicas = 0 + podAvailableReplicas = 0 + } else if pod.Status.Phase != "Running" { + podAvailableReplicas = 0 + } + + workload.Replicas = podReplicas + workload.AvailableReplicas = podAvailableReplicas + // Deployments/ReplicaSets have a different parameters to indicate unavailable + // calculate "desired" - "current" sounds reasonable on this context + workload.UnavailableReplicas = workload.Replicas - workload.AvailableReplicas +} + +func (workload *Workload) ParseJob(job *batch_v1.Job) { + conf := config.Get() + workload.Name = job.Name + workload.Type = "Job" + workload.Labels = job.Labels + + /** Check the labels app and version required by Istio in template Pods*/ + _, workload.AppLabel = workload.Labels[conf.IstioLabels.AppLabelName] + _, workload.VersionLabel = workload.Labels[conf.IstioLabels.VersionLabelName] + + workload.CreatedAt = formatTime(job.CreationTimestamp.Time) + workload.ResourceVersion = job.ResourceVersion + + workload.Replicas = job.Status.Active + job.Status.Succeeded + job.Status.Failed + workload.AvailableReplicas = job.Status.Active + job.Status.Succeeded + + // Deployments/ReplicaSets have a different parameters to indicate unavailable + // calculate "desired" - "current" sounds reasonable on this context + workload.UnavailableReplicas = job.Status.Failed +} + +func (workload *Workload) ParseCronJob(cnjb *batch_v1beta1.CronJob) { + conf := config.Get() + workload.Name = cnjb.Name + workload.Type = "CronJob" + workload.Labels = cnjb.Labels + + /** Check the labels app and version required by Istio in template Pods*/ + _, workload.AppLabel = workload.Labels[conf.IstioLabels.AppLabelName] + _, workload.VersionLabel = workload.Labels[conf.IstioLabels.VersionLabelName] + + workload.CreatedAt = formatTime(cnjb.CreationTimestamp.Time) + workload.ResourceVersion = cnjb.ResourceVersion + + // We don't have the information of this controller + // We will infer the number of replicas as the number of pods without succeed state + // We will infer the number of available as the number of pods with running state + // If this is not enough, we should try to fetch the controller, it is not doing now to not overload kiali fetching all types of controllers + var podReplicas, podAvailableReplicas int32 + podReplicas = 0 + podAvailableReplicas = 0 + for _, pod := range workload.Pods { + if pod.Status != "Succeeded" { + podReplicas++ + } + if pod.Status == "Running" { + podAvailableReplicas++ + } + } + workload.Replicas = podReplicas + workload.AvailableReplicas = podAvailableReplicas + // Deployments/ReplicaSets have a different parameters to indicate unavailable + // calculate "desired" - "current" sounds reasonable on this context + if podReplicas > podAvailableReplicas { + workload.UnavailableReplicas = workload.Replicas - workload.AvailableReplicas + } else { + // On this case a Job may have all pods terminated + // Then it is not an unhealth condition + workload.UnavailableReplicas = 0 + } +} + +func (workload *Workload) ParsePods(controllerName string, controllerType string, pods []v1.Pod) { + conf := config.Get() + workload.Name = controllerName + workload.Type = controllerType + // We don't have the information of this controller + // We will infer the number of replicas as the number of pods without succeed state + // We will infer the number of available as the number of pods with running state + // If this is not enough, we should try to fetch the controller, it is not doing now to not overload kiali fetching all types of controllers + var podReplicas, podAvailableReplicas int32 + podReplicas = 0 + podAvailableReplicas = 0 + for _, pod := range pods { + if pod.Status.Phase != "Succeeded" { + podReplicas++ + } + if pod.Status.Phase == "Running" { + podAvailableReplicas++ + } + } + workload.Replicas = podReplicas + workload.AvailableReplicas = podAvailableReplicas + // Deployments/ReplicaSets have a different parameters to indicate unavailable + // calculate "desired" - "current" sounds reasonable on this context + if podReplicas > podAvailableReplicas { + workload.UnavailableReplicas = workload.Replicas - workload.AvailableReplicas + } else { + // On this case a Job may have all pods terminated + // Then it is not an unhealth condition + workload.UnavailableReplicas = 0 + } + // We fetch one pod as template for labels + // There could be corner cases not correct, then we should support more controllers + if len(pods) > 0 { + workload.Labels = pods[0].Labels + workload.CreatedAt = formatTime(pods[0].CreationTimestamp.Time) + workload.ResourceVersion = pods[0].ResourceVersion + } + + /** Check the labels app and version required by Istio in template Pods*/ + _, workload.AppLabel = workload.Labels[conf.IstioLabels.AppLabelName] + _, workload.VersionLabel = workload.Labels[conf.IstioLabels.VersionLabelName] +} + +func (workload *Workload) SetPods(pods []v1.Pod) { + workload.Pods.Parse(pods) + workload.IstioSidecar = workload.Pods.HasIstioSideCar() +} + +func (workload *Workload) SetServices(svcs []v1.Service) { + workload.Services.Parse(svcs) +} + +func (workload *Workload) SetDestinationServices(dss []prometheus.Service) { + workload.DestinationServices = make([]DestinationService, 0, len(dss)) + for _, service := range dss { + workload.DestinationServices = append(workload.DestinationServices, DestinationService{ + Name: service.ServiceName, + Namespace: service.Namespace, + }) + } +} diff --git a/vendor/github.com/kiali/kiali/prometheus/client.go b/vendor/github.com/kiali/kiali/prometheus/client.go new file mode 100644 index 000000000..22c19f463 --- /dev/null +++ b/vendor/github.com/kiali/kiali/prometheus/client.go @@ -0,0 +1,253 @@ +package prometheus + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/prometheus/client_golang/api" + "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/prometheus/internalmetrics" + "github.com/kiali/kiali/util" +) + +// ClientInterface for mocks (only mocked function are necessary here) +type ClientInterface interface { + FetchHistogramRange(metricName, labels, grouping string, q *BaseMetricsQuery) Histogram + FetchRange(metricName, labels, grouping, aggregator string, q *BaseMetricsQuery) *Metric + FetchRateRange(metricName, labels, grouping string, q *BaseMetricsQuery) *Metric + GetAllRequestRates(namespace, ratesInterval string, queryTime time.Time) (model.Vector, error) + GetAppRequestRates(namespace, app, ratesInterval string, queryTime time.Time) (model.Vector, model.Vector, error) + GetConfiguration() (v1.ConfigResult, error) + GetDestinationServices(namespace string, namespaceCreationTime time.Time, workloadname string) ([]Service, error) + GetFlags() (v1.FlagsResult, error) + GetMetrics(query *IstioMetricsQuery) Metrics + GetNamespaceServicesRequestRates(namespace, ratesInterval string, queryTime time.Time) (model.Vector, error) + GetServiceRequestRates(namespace, service, ratesInterval string, queryTime time.Time) (model.Vector, error) + GetSourceWorkloads(namespace string, namespaceCreationTime time.Time, servicename string) (map[string][]Workload, error) + GetWorkloadRequestRates(namespace, workload, ratesInterval string, queryTime time.Time) (model.Vector, model.Vector, error) +} + +// Client for Prometheus API. +// It hides the way we query Prometheus offering a layer with a high level defined API. +type Client struct { + ClientInterface + p8s api.Client + api v1.API +} + +// Workload describes a workload with contextual information +type Workload struct { + Namespace string + App string + Workload string + Version string +} + +// Service describes a service with contextual information +type Service struct { + Namespace string + App string + ServiceName string +} + +// NewClient creates a new client to the Prometheus API. +// It returns an error on any problem. +func NewClient() (*Client, error) { + if config.Get() == nil { + return nil, errors.New("config.Get() must be not null") + } + p8s, err := api.NewClient(api.Config{Address: config.Get().ExternalServices.PrometheusServiceURL}) + if err != nil { + return nil, err + } + client := Client{p8s: p8s, api: v1.NewAPI(p8s)} + return &client, nil +} + +// Inject allows for replacing the API with a mock For testing +func (in *Client) Inject(api v1.API) { + in.api = api +} + +// GetSourceWorkloads returns a map of list of source workloads for a given service +// identified by its namespace and service name. +// Returned map has a destination version as a key and a list of workloads as values. +// It returns an error on any problem. +func (in *Client) GetSourceWorkloads(namespace string, namespaceCreationTime time.Time, servicename string) (map[string][]Workload, error) { + reporter := "source" + if config.Get().IstioNamespace == namespace { + reporter = "destination" + } + // The query needs a lower bound to make sure that no outdated data is fetched + // So, a range is set and an "easy" function (delta) is applied to return an instant-vector, + // since only labels are needed. + queryTime := util.Clock.Now() + queryInterval := queryTime.Sub(namespaceCreationTime) + query := fmt.Sprintf("delta(istio_requests_total{reporter=\"%s\",destination_service_name=\"%s\",destination_service_namespace=\"%s\"}[%vs])", + reporter, servicename, namespace, int(queryInterval.Seconds())) + log.Debugf("GetSourceWorkloads query: %s", query) + promtimer := internalmetrics.GetPrometheusProcessingTimePrometheusTimer("GetSourceWorkloads") + result, err := in.api.Query(context.Background(), query, queryTime) + if err != nil { + return nil, err + } + promtimer.ObserveDuration() // notice we only collect metrics for successful prom queries + routes := make(map[string][]Workload) + switch result.Type() { + case model.ValVector: + matrix := result.(model.Vector) + for _, sample := range matrix { + metric := sample.Metric + index := string(metric["destination_version"]) + source := Workload{ + Namespace: string(metric["source_workload_namespace"]), + App: string(metric["source_app"]), + Workload: string(metric["source_workload"]), + Version: string(metric["source_version"]), + } + if arr, ok := routes[index]; ok { + found := false + for _, s := range arr { + if s.Workload == source.Workload { + found = true + break + } + } + if !found { + routes[index] = append(arr, source) + } + } else { + routes[index] = []Workload{source} + } + } + } + return routes, nil +} + +func (in *Client) GetDestinationServices(namespace string, namespaceCreationTime time.Time, workloadname string) ([]Service, error) { + reporter := "source" + if config.Get().IstioNamespace == namespace { + reporter = "destination" + } + + queryTime := util.Clock.Now() + queryInterval := queryTime.Sub(namespaceCreationTime) + groupBy := "(destination_service_namespace, destination_service_name, destination_service)" + query := fmt.Sprintf("sum(rate(istio_requests_total{reporter=\"%s\",source_workload=\"%s\",source_workload_namespace=\"%s\"}[%vs])) by %s", + reporter, workloadname, namespace, int(queryInterval.Seconds()), groupBy) + log.Debugf("GetDestinationServices query: %s", query) + promtimer := internalmetrics.GetPrometheusProcessingTimePrometheusTimer("GetDestinationServices") + result, err := in.api.Query(context.Background(), query, queryTime) + if err != nil { + return nil, err + } + promtimer.ObserveDuration() // notice we only collect metrics for successful prom queries + + routes := make([]Service, 0) + switch result.Type() { + case model.ValVector: + matrix := result.(model.Vector) + for _, sample := range matrix { + metric := sample.Metric + destination := Service{ + App: string(metric["destination_app"]), + ServiceName: string(metric["destination_service_name"]), + Namespace: string(metric["destination_service_namespace"]), + } + routes = append(routes, destination) + } + } + return routes, nil +} + +// GetMetrics returns the Metrics related to the provided query options. +func (in *Client) GetMetrics(query *IstioMetricsQuery) Metrics { + return getMetrics(in.api, query) +} + +// GetAllRequestRates queries Prometheus to fetch request counter rates, over a time interval, for requests +// into, internal to, or out of the namespace. +// Returns (rates, error) +func (in *Client) GetAllRequestRates(namespace string, ratesInterval string, queryTime time.Time) (model.Vector, error) { + return getAllRequestRates(in.api, namespace, queryTime, ratesInterval) +} + +// GetNamespaceServicesRequestRates queries Prometheus to fetch request counter rates, over a time interval, limited to +// requests for services in the namespace. +// Returns (rates, error) +func (in *Client) GetNamespaceServicesRequestRates(namespace string, ratesInterval string, queryTime time.Time) (model.Vector, error) { + return getNamespaceServicesRequestRates(in.api, namespace, queryTime, ratesInterval) +} + +// GetServiceRequestRates queries Prometheus to fetch request counters rates over a time interval +// for a given service (hence only inbound). +// Returns (in, error) +func (in *Client) GetServiceRequestRates(namespace, service, ratesInterval string, queryTime time.Time) (model.Vector, error) { + return getServiceRequestRates(in.api, namespace, service, queryTime, ratesInterval) +} + +// GetAppRequestRates queries Prometheus to fetch request counters rates over a time interval +// for a given app, both in and out. +// Returns (in, out, error) +func (in *Client) GetAppRequestRates(namespace, app, ratesInterval string, queryTime time.Time) (model.Vector, model.Vector, error) { + return getItemRequestRates(in.api, namespace, app, "app", queryTime, ratesInterval) +} + +// GetWorkloadRequestRates queries Prometheus to fetch request counters rates over a time interval +// for a given workload, both in and out. +// Returns (in, out, error) +func (in *Client) GetWorkloadRequestRates(namespace, workload, ratesInterval string, queryTime time.Time) (model.Vector, model.Vector, error) { + return getItemRequestRates(in.api, namespace, workload, "workload", queryTime, ratesInterval) +} + +// FetchRange fetches a simple metric (gauge or counter) in given range +func (in *Client) FetchRange(metricName, labels, grouping, aggregator string, q *BaseMetricsQuery) *Metric { + query := fmt.Sprintf("%s(%s%s)", aggregator, metricName, labels) + if grouping != "" { + query += fmt.Sprintf(" by (%s)", grouping) + } + query = roundSignificant(query, 0.001) + return fetchRange(in.api, query, q.Range) +} + +// FetchRateRange fetches a counter's rate in given range +func (in *Client) FetchRateRange(metricName, labels, grouping string, q *BaseMetricsQuery) *Metric { + return fetchRateRange(in.api, metricName, labels, grouping, q) +} + +// FetchHistogramRange fetches bucketed metric as histogram in given range +func (in *Client) FetchHistogramRange(metricName, labels, grouping string, q *BaseMetricsQuery) Histogram { + return fetchHistogramRange(in.api, metricName, labels, grouping, q) +} + +// API returns the Prometheus V1 HTTP API for performing calls not supported natively by this client +func (in *Client) API() v1.API { + return in.api +} + +// Address return the configured Prometheus service URL +func (in *Client) Address() string { + return config.Get().ExternalServices.PrometheusServiceURL +} + +func (in *Client) GetConfiguration() (v1.ConfigResult, error) { + config, err := in.API().Config(context.Background()) + if err != nil { + return v1.ConfigResult{}, err + } + return config, nil +} + +func (in *Client) GetFlags() (v1.FlagsResult, error) { + flags, err := in.API().Flags(context.Background()) + if err != nil { + return nil, err + } + return flags, nil +} diff --git a/vendor/github.com/kiali/kiali/prometheus/internalmetrics/internal_metrics.go b/vendor/github.com/kiali/kiali/prometheus/internalmetrics/internal_metrics.go new file mode 100644 index 000000000..a38e0a509 --- /dev/null +++ b/vendor/github.com/kiali/kiali/prometheus/internalmetrics/internal_metrics.go @@ -0,0 +1,269 @@ +// Package internalmetrics provides functionality to collect Prometheus metrics. +package internalmetrics + +import ( + "strconv" + + "github.com/prometheus/client_golang/prometheus" + // Because this package is used all throughout the codebase, be VERY careful adding new + // kiali imports here. Most likely you will encounter an import cycle error that will + // cause a compilation failure. +) + +// These constants define the different label names for the different metric timeseries +const ( + labelGraphKind = "graph_kind" + labelGraphType = "graph_type" + labelWithServiceNodes = "with_service_nodes" + labelAppender = "appender" + labelRoute = "route" + labelQueryGroup = "query_group" + labelPackage = "package" + labelType = "type" + labelFunction = "function" +) + +// MetricsType defines all of Kiali's own internal metrics. +type MetricsType struct { + GraphNodes *prometheus.GaugeVec + GraphGenerationTime *prometheus.HistogramVec + GraphAppenderTime *prometheus.HistogramVec + GraphMarshalTime *prometheus.HistogramVec + APIProcessingTime *prometheus.HistogramVec + PrometheusProcessingTime *prometheus.HistogramVec + GoFunctionProcessingTime *prometheus.HistogramVec + GoFunctionFailures *prometheus.CounterVec +} + +// Metrics contains all of Kiali's own internal metrics. +// These metrics can be accessed directly to update their values, or +// you can use available utility functions defined below. +var Metrics = MetricsType{ + GraphNodes: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "kiali_graph_nodes", + Help: "The number of nodes in a generated graph.", + }, + []string{labelGraphKind, labelGraphType, labelWithServiceNodes}, + ), + GraphGenerationTime: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "kiali_graph_generation_duration_seconds", + Help: "The time required to generate a graph.", + }, + []string{labelGraphKind, labelGraphType, labelWithServiceNodes}, + ), + GraphAppenderTime: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "kiali_graph_appender_duration_seconds", + Help: "The time required to execute an appender while generating a graph.", + }, + []string{labelAppender}, + ), + GraphMarshalTime: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "kiali_graph_marshal_duration_seconds", + Help: "The time required to marshal and return the JSON for a graph.", + }, + []string{labelGraphKind, labelGraphType, labelWithServiceNodes}, + ), + APIProcessingTime: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "kiali_api_processing_duration_seconds", + Help: "The time required to execute a particular REST API route request.", + }, + []string{labelRoute}, + ), + PrometheusProcessingTime: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "kiali_prometheus_processing_duration_seconds", + Help: "The time required to execute a Prometheus query.", + }, + []string{labelQueryGroup}, + ), + GoFunctionProcessingTime: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "kiali_go_function_processing_duration_seconds", + Help: "The time required to execute a particular Go function.", + }, + []string{labelPackage, labelType, labelFunction}, + ), + GoFunctionFailures: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "kiali_go_function_failures_total", + Help: "Counts the total number of failures encountered by a particular Go function.", + }, + []string{labelPackage, labelType, labelFunction}, + ), +} + +// SuccessOrFailureMetricType let's you capture metrics for both successes and failures, +// where successes are tracked using a duration histogram and failures are tracked with a counter. +// Typical usage is: +// func SomeFunction(...) (..., err error) { +// sof := GetSuccessOrFailureMetricTypeObject() +// defer sof.ObserveNow(&err) +// ... do the work of SomeFunction here... +// } +// +// If a function doesn't support returning an error, then call ObserveDuration directly: +// +// func SomeFunction(...) (...) { +// sof := GetSuccessOrFailureMetricTypeObject() +// defer sof.ObserveDuration() +// ... do the work of SomeFunction here... +// +// If a function doesn't support returning an error, but you still need to report a failure, +// call Inc() directly to increment the failure counter: +// +// func SomeFunction(...) (...) { +// sof := GetSuccessOrFailureMetricTypeObject() +// defer func() { if (somethingBadHappened) { sof.Inc() } else { sof.ObserveDuration() }}() +// ... do the work of SomeFunction here... +// } +type SuccessOrFailureMetricType struct { + *prometheus.Timer + prometheus.Counter +} + +// ObserveNow will observe a duration unless *err is not nil +// in which case the error counter will be incremented instead. +// We use a pointer to err because this function is normally +// invoked via 'defer' and so the actual value of the error +// won't be set until the actual invocation of this function. +// (read the docs on 'defer' if you don't get it). +func (sof *SuccessOrFailureMetricType) ObserveNow(err *error) { + if *err == nil { + sof.ObserveDuration() + } else { + sof.Inc() + } +} + +// RegisterInternalMetrics must be called at startup to prepare the Prometheus scrape endpoint. +func RegisterInternalMetrics() { + prometheus.MustRegister( + Metrics.GraphNodes, + Metrics.GraphGenerationTime, + Metrics.GraphAppenderTime, + Metrics.GraphMarshalTime, + Metrics.APIProcessingTime, + Metrics.PrometheusProcessingTime, + Metrics.GoFunctionProcessingTime, + Metrics.GoFunctionFailures, + ) +} + +// +// The following are utility functions that can be used to update the internal metrics. +// + +// SetGraphNodes sets the node count metric +func SetGraphNodes(graphKind string, graphType string, withServiceNodes bool, nodeCount int) { + Metrics.GraphNodes.With(prometheus.Labels{ + labelGraphKind: graphKind, + labelGraphType: graphType, + labelWithServiceNodes: strconv.FormatBool(withServiceNodes), + }).Set(float64(nodeCount)) +} + +// GetGraphGenerationTimePrometheusTimer returns a timer that can be used to store +// a value for the graph generation time metric. The timer is ticking immediately +// when this function returns. +// Typical usage is as follows: +// promtimer := GetGraphGenerationTimePrometheusTimer(...) +// defer promtimer.ObserveDuration() +func GetGraphGenerationTimePrometheusTimer(graphKind string, graphType string, withServiceNodes bool) *prometheus.Timer { + timer := prometheus.NewTimer(Metrics.GraphGenerationTime.With(prometheus.Labels{ + labelGraphKind: graphKind, + labelGraphType: graphType, + labelWithServiceNodes: strconv.FormatBool(withServiceNodes), + })) + return timer +} + +// GetGraphAppenderTimePrometheusTimer returns a timer that can be used to store +// a value for the graph appender time metric. The timer is ticking immediately +// when this function returns. +// Typical usage is as follows: +// promtimer := GetGraphAppenderTimePrometheusTimer(...) +// ... run the appender ... +// promtimer.ObserveDuration() +func GetGraphAppenderTimePrometheusTimer(appenderName string) *prometheus.Timer { + timer := prometheus.NewTimer(Metrics.GraphAppenderTime.With(prometheus.Labels{ + labelAppender: appenderName, + })) + return timer +} + +// GetGraphMarshalTimePrometheusTimer returns a timer that can be used to store +// a value for the graph marshal time metric. The timer is ticking immediately +// when this function returns. +// Typical usage is as follows: +// promtimer := GetGraphMarshalTimePrometheusTimer(...) +// defer promtimer.ObserveDuration() +func GetGraphMarshalTimePrometheusTimer(graphKind string, graphType string, withServiceNodes bool) *prometheus.Timer { + timer := prometheus.NewTimer(Metrics.GraphMarshalTime.With(prometheus.Labels{ + labelGraphKind: graphKind, + labelGraphType: graphType, + labelWithServiceNodes: strconv.FormatBool(withServiceNodes), + })) + return timer +} + +// GetAPIProcessingTimePrometheusTimer returns a timer that can be used to store +// a value for the API processing time metric. The timer is ticking immediately +// when this function returns. +// Typical usage is as follows: +// promtimer := GetAPIProcessingTimePrometheusTimer(...) +// defer promtimer.ObserveDuration() +func GetAPIProcessingTimePrometheusTimer(apiRouteName string) *prometheus.Timer { + timer := prometheus.NewTimer(Metrics.APIProcessingTime.With(prometheus.Labels{ + labelRoute: apiRouteName, + })) + return timer +} + +// GetPrometheusProcessingTimePrometheusTimer returns a timer that can be used to store +// a value for the Prometheus query processing time metric. The timer is ticking immediately +// when this function returns. +// +// Note that the queryGroup parameter is simply some string that can be used to +// identify a particular set of Prometheus queries. This queryGroup does not necessarily have to +// identify a unique query (indeed, if you do that, that might cause too many timeseries to +// be collected), but it only needs to identify a set of queries. For example, perhaps there +// is a group of similar Prometheus queries used to generate a graph - in this case, +// the processing time for all of those queries can be combined into a single metric timeseries +// by passing in a queryGroup of "Graph-Generation". +// +// Typical usage is as follows: +// promtimer := GetPrometheusProcessingTimePrometheusTimer(...) +// ... execute the query ... +// promtimer.ObserveDuration() +func GetPrometheusProcessingTimePrometheusTimer(queryGroup string) *prometheus.Timer { + timer := prometheus.NewTimer(Metrics.PrometheusProcessingTime.With(prometheus.Labels{ + labelQueryGroup: queryGroup, + })) + return timer +} + +// GetGoFunctionMetric returns a SuccessOrFailureMetricType object that can be used to store +// a duration value for the Go Function processing time metric when the function is successful, +// or increments the failure counter if not successful. +// If the Go Function is not on a type (i.e. is a global function), pass in an empty string for goType. +// The timer is ticking immediately when this function returns. +// See the comments for SuccessOrFailureMetricType for documentation on how to use the returned object. +func GetGoFunctionMetric(goPkg string, goType string, goFunc string) SuccessOrFailureMetricType { + return SuccessOrFailureMetricType{ + prometheus.NewTimer(Metrics.GoFunctionProcessingTime.With(prometheus.Labels{ + labelPackage: goPkg, + labelType: goType, + labelFunction: goFunc, + })), + Metrics.GoFunctionFailures.With(prometheus.Labels{ + labelPackage: goPkg, + labelType: goType, + labelFunction: goFunc, + }), + } +} diff --git a/vendor/github.com/kiali/kiali/prometheus/metrics.go b/vendor/github.com/kiali/kiali/prometheus/metrics.go new file mode 100644 index 000000000..c7af9926d --- /dev/null +++ b/vendor/github.com/kiali/kiali/prometheus/metrics.go @@ -0,0 +1,273 @@ +package prometheus + +import ( + "context" + "fmt" + "regexp" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" + + "github.com/kiali/kiali/log" + "github.com/kiali/kiali/prometheus/internalmetrics" +) + +var ( + invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) +) + +func getMetrics(api v1.API, q *IstioMetricsQuery) Metrics { + labels, labelsError := buildLabelStrings(q) + grouping := strings.Join(q.ByLabels, ",") + metrics := fetchAllMetrics(api, q, labels, labelsError, grouping) + return metrics +} + +func buildLabelStrings(q *IstioMetricsQuery) (string, string) { + labels := []string{fmt.Sprintf(`reporter="%s"`, q.Reporter)} + ref := "destination" + if q.Direction == "outbound" { + ref = "source" + } + + if q.Service != "" { + // inbound only + labels = append(labels, fmt.Sprintf(`destination_service_name="%s"`, q.Service)) + if q.Namespace != "" { + labels = append(labels, fmt.Sprintf(`destination_service_namespace="%s"`, q.Namespace)) + } + } else if q.Namespace != "" { + labels = append(labels, fmt.Sprintf(`%s_workload_namespace="%s"`, ref, q.Namespace)) + } + if q.Workload != "" { + labels = append(labels, fmt.Sprintf(`%s_workload="%s"`, ref, q.Workload)) + } + if q.App != "" { + labels = append(labels, fmt.Sprintf(`%s_app="%s"`, ref, q.App)) + } + if q.RequestProtocol != "" { + labels = append(labels, fmt.Sprintf(`request_protocol="%s"`, q.RequestProtocol)) + } + + full := "{" + strings.Join(labels, ",") + "}" + + labels = append(labels, `response_code=~"[5|4].*"`) + errors := "{" + strings.Join(labels, ",") + "}" + + return full, errors +} + +func fetchAllMetrics(api v1.API, q *IstioMetricsQuery, labels, labelsError, grouping string) Metrics { + var wg sync.WaitGroup + fetchRate := func(p8sFamilyName string, metric **Metric, lbl string) { + defer wg.Done() + m := fetchRateRange(api, p8sFamilyName, lbl, grouping, &q.BaseMetricsQuery) + *metric = m + } + + fetchHisto := func(p8sFamilyName string, histo *Histogram) { + defer wg.Done() + h := fetchHistogramRange(api, p8sFamilyName, labels, grouping, &q.BaseMetricsQuery) + *histo = h + } + + type resultHolder struct { + metric *Metric + histo Histogram + definition istioMetric + } + maxResults := len(istioMetrics) + results := make([]*resultHolder, maxResults, maxResults) + + for i, istioMetric := range istioMetrics { + // if filters is empty, fetch all anyway + doFetch := len(q.Filters) == 0 + if !doFetch { + for _, filter := range q.Filters { + if filter == istioMetric.kialiName { + doFetch = true + break + } + } + } + if doFetch { + wg.Add(1) + result := resultHolder{definition: istioMetric} + results[i] = &result + if istioMetric.isHisto { + go fetchHisto(istioMetric.istioName, &result.histo) + } else { + labelsToUse := istioMetric.labelsToUse(labels, labelsError) + go fetchRate(istioMetric.istioName, &result.metric, labelsToUse) + } + } + } + wg.Wait() + + // Return results as two maps per reporter + metrics := make(map[string]*Metric) + histograms := make(map[string]Histogram) + for _, result := range results { + if result != nil { + if result.definition.isHisto { + histograms[result.definition.kialiName] = result.histo + } else { + metrics[result.definition.kialiName] = result.metric + } + } + } + return Metrics{ + Metrics: metrics, + Histograms: histograms, + } +} + +func fetchRateRange(api v1.API, metricName, labels, grouping string, q *BaseMetricsQuery) *Metric { + var query string + // Example: round(sum(rate(my_counter{foo=bar}[5m])) by (baz), 0.001) + if grouping == "" { + query = fmt.Sprintf("sum(%s(%s%s[%s]))", q.RateFunc, metricName, labels, q.RateInterval) + } else { + query = fmt.Sprintf("sum(%s(%s%s[%s])) by (%s)", q.RateFunc, metricName, labels, q.RateInterval, grouping) + } + query = roundSignificant(query, 0.001) + return fetchRange(api, query, q.Range) +} + +func fetchHistogramRange(api v1.API, metricName, labels, grouping string, q *BaseMetricsQuery) Histogram { + histogram := make(Histogram) + + // Note: the p8s queries are not run in parallel here, but they are at the caller's place. + // This is because we may not want to create too many threads in the lowest layer + if q.Avg { + groupingAvg := "" + if grouping != "" { + groupingAvg = fmt.Sprintf(" by (%s)", grouping) + } + // Average + // Example: sum(rate(my_histogram_sum{foo=bar}[5m])) by (baz) / sum(rate(my_histogram_count{foo=bar}[5m])) by (baz) + query := fmt.Sprintf("sum(rate(%s_sum%s[%s]))%s / sum(rate(%s_count%s[%s]))%s", + metricName, labels, q.RateInterval, groupingAvg, metricName, labels, q.RateInterval, groupingAvg) + query = roundSignificant(query, 0.001) + log.Infof("Query: %s\n", query) + histogram["avg"] = fetchRange(api, query, q.Range) + } + + groupingQuantile := "" + if grouping != "" { + groupingQuantile = fmt.Sprintf(",%s", grouping) + } + for _, quantile := range q.Quantiles { + // Example: round(histogram_quantile(0.5, sum(rate(my_histogram_bucket{foo=bar}[5m])) by (le,baz)), 0.001) + query := fmt.Sprintf("histogram_quantile(%s, sum(rate(%s_bucket%s[%s])) by (le%s))", + quantile, metricName, labels, q.RateInterval, groupingQuantile) + query = roundSignificant(query, 0.001) + histogram[quantile] = fetchRange(api, query, q.Range) + log.Infof("Query: %s\n", query) + } + + return histogram +} + +func fetchTimestamp(api v1.API, query string, t time.Time) (model.Vector, error) { + result, err := api.Query(context.Background(), query, t) + if err != nil { + return nil, err + } + switch result.Type() { + case model.ValVector: + return result.(model.Vector), nil + } + return nil, fmt.Errorf("Invalid query, vector expected: %s", query) +} + +func fetchRange(api v1.API, query string, bounds v1.Range) *Metric { + result, err := api.QueryRange(context.Background(), query, bounds) + if err != nil { + return &Metric{err: err} + } + switch result.Type() { + case model.ValMatrix: + return &Metric{Matrix: result.(model.Matrix)} + } + return &Metric{err: fmt.Errorf("Invalid query, matrix expected: %s", query)} +} + +func replaceInvalidCharacters(metricName string) string { + // See https://github.com/prometheus/prometheus/blob/master/util/strutil/strconv.go#L43 + return invalidLabelCharRE.ReplaceAllString(metricName, "_") +} + +// getAllRequestRates retrieves traffic rates for requests entering, internal to, or exiting the namespace. +// Uses source telemetry unless working on the Istio namespace. +func getAllRequestRates(api v1.API, namespace string, queryTime time.Time, ratesInterval string) (model.Vector, error) { + // traffic originating outside the namespace to destinations inside the namespace + lbl := fmt.Sprintf(`destination_service_namespace="%s",source_workload_namespace!="%s"`, namespace, namespace) + fromOutside, err := getRequestRatesForLabel(api, queryTime, lbl, ratesInterval) + if err != nil { + return model.Vector{}, err + } + // traffic originating inside the namespace to destinations inside or outside the namespace + lbl = fmt.Sprintf(`source_workload_namespace="%s"`, namespace) + fromInside, err := getRequestRatesForLabel(api, queryTime, lbl, ratesInterval) + if err != nil { + return model.Vector{}, err + } + // Merge results + all := append(fromOutside, fromInside...) + return all, nil +} + +// getNamespaceServicesRequestRates retrieves traffic rates for requests entering or internal to the namespace. +// Uses source telemetry unless working on the Istio namespace. +func getNamespaceServicesRequestRates(api v1.API, namespace string, queryTime time.Time, ratesInterval string) (model.Vector, error) { + // traffic for the namespace services + lblNs := fmt.Sprintf(`destination_service_namespace="%s"`, namespace) + ns, err := getRequestRatesForLabel(api, queryTime, lblNs, ratesInterval) + if err != nil { + return model.Vector{}, err + } + return ns, nil +} + +func getServiceRequestRates(api v1.API, namespace, service string, queryTime time.Time, ratesInterval string) (model.Vector, error) { + lbl := fmt.Sprintf(`destination_service_name="%s",destination_service_namespace="%s"`, service, namespace) + in, err := getRequestRatesForLabel(api, queryTime, lbl, ratesInterval) + if err != nil { + return model.Vector{}, err + } + return in, nil +} + +func getItemRequestRates(api v1.API, namespace, item, itemLabelSuffix string, queryTime time.Time, ratesInterval string) (model.Vector, model.Vector, error) { + lblIn := fmt.Sprintf(`destination_workload_namespace="%s",destination_%s="%s"`, namespace, itemLabelSuffix, item) + lblOut := fmt.Sprintf(`source_workload_namespace="%s",source_%s="%s"`, namespace, itemLabelSuffix, item) + in, err := getRequestRatesForLabel(api, queryTime, lblIn, ratesInterval) + if err != nil { + return model.Vector{}, model.Vector{}, err + } + out, err := getRequestRatesForLabel(api, queryTime, lblOut, ratesInterval) + if err != nil { + return model.Vector{}, model.Vector{}, err + } + return in, out, nil +} + +func getRequestRatesForLabel(api v1.API, time time.Time, labels, ratesInterval string) (model.Vector, error) { + query := fmt.Sprintf("rate(istio_requests_total{%s}[%s])", labels, ratesInterval) + promtimer := internalmetrics.GetPrometheusProcessingTimePrometheusTimer("Metrics-GetRequestRates") + result, err := api.Query(context.Background(), query, time) + if err != nil { + return model.Vector{}, err + } + promtimer.ObserveDuration() // notice we only collect metrics for successful prom queries + return result.(model.Vector), nil +} + +// roundSignificant will output promQL that performs rounding only if the resulting value is significant, that is, higher than the requested precision +func roundSignificant(innerQuery string, precision float64) string { + return fmt.Sprintf("round(%s, %f) > %f or %s", innerQuery, precision, precision, innerQuery) +} diff --git a/vendor/github.com/kiali/kiali/prometheus/metrics_definitions.go b/vendor/github.com/kiali/kiali/prometheus/metrics_definitions.go new file mode 100644 index 000000000..be5c3be6d --- /dev/null +++ b/vendor/github.com/kiali/kiali/prometheus/metrics_definitions.go @@ -0,0 +1,54 @@ +package prometheus + +type istioMetric struct { + kialiName string + istioName string + isHisto bool + useErrorLabels bool +} + +var istioMetrics = []istioMetric{ + istioMetric{ + kialiName: "request_count", + istioName: "istio_requests_total", + isHisto: false, + }, + istioMetric{ + kialiName: "request_error_count", + istioName: "istio_requests_total", + isHisto: false, + useErrorLabels: true, + }, + istioMetric{ + kialiName: "request_duration", + istioName: "istio_request_duration_seconds", + isHisto: true, + }, + istioMetric{ + kialiName: "request_size", + istioName: "istio_request_bytes", + isHisto: true, + }, + istioMetric{ + kialiName: "response_size", + istioName: "istio_response_bytes", + isHisto: true, + }, + istioMetric{ + kialiName: "tcp_received", + istioName: "istio_tcp_received_bytes_total", + isHisto: false, + }, + istioMetric{ + kialiName: "tcp_sent", + istioName: "istio_tcp_sent_bytes_total", + isHisto: false, + }, +} + +func (in *istioMetric) labelsToUse(labels, labelsError string) string { + if in.useErrorLabels { + return labelsError + } + return labels +} diff --git a/vendor/github.com/kiali/kiali/prometheus/types.go b/vendor/github.com/kiali/kiali/prometheus/types.go new file mode 100644 index 000000000..cc1c8879a --- /dev/null +++ b/vendor/github.com/kiali/kiali/prometheus/types.go @@ -0,0 +1,78 @@ +package prometheus + +import ( + "time" + + "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" +) + +// BaseMetricsQuery holds common parameters for all kinds of queries +type BaseMetricsQuery struct { + v1.Range + RateInterval string + RateFunc string + Quantiles []string + Avg bool + ByLabels []string +} + +// FillDefaults fills the struct with default parameters +func (q *BaseMetricsQuery) fillDefaults() { + q.End = time.Now() + q.Start = q.End.Add(-30 * time.Minute) + q.Step = 15 * time.Second + q.RateInterval = "1m" + q.RateFunc = "rate" + q.Avg = true +} + +// IstioMetricsQuery holds query parameters for a typical metrics query +type IstioMetricsQuery struct { + BaseMetricsQuery + Filters []string + Namespace string + App string + Workload string + Service string + Direction string // outbound | inbound + RequestProtocol string // e.g. http | grpc + Reporter string // source | destination, defaults to source if not provided +} + +// FillDefaults fills the struct with default parameters +func (q *IstioMetricsQuery) FillDefaults() { + q.BaseMetricsQuery.fillDefaults() + q.Reporter = "source" + q.Direction = "outbound" +} + +// CustomMetricsQuery holds query parameters for a custom metrics query +type CustomMetricsQuery struct { + BaseMetricsQuery + Namespace string + App string + Version string + RawDataAggregator string +} + +// FillDefaults fills the struct with default parameters +func (q *CustomMetricsQuery) FillDefaults() { + q.BaseMetricsQuery.fillDefaults() + q.RawDataAggregator = "sum" +} + +// Metrics contains all simple metrics and histograms data +type Metrics struct { + Metrics map[string]*Metric `json:"metrics"` + Histograms map[string]Histogram `json:"histograms"` +} + +// Metric holds the Prometheus Matrix model, which contains one or more time series (depending on grouping) +type Metric struct { + Matrix model.Matrix `json:"matrix"` + err error +} + +// Histogram contains Metric objects for several histogram-kind statistics +type Histogram = map[string]*Metric diff --git a/vendor/github.com/kiali/kiali/status/mtls_status.go b/vendor/github.com/kiali/kiali/status/mtls_status.go new file mode 100644 index 000000000..e9d10df59 --- /dev/null +++ b/vendor/github.com/kiali/kiali/status/mtls_status.go @@ -0,0 +1,33 @@ +package status + +import ( + "github.com/kiali/kiali/business" +) + +func (si *StatusInfo) getmTLSStatus() { + // Get business layer + business, err := business.Get() + if err != nil { + Put(ClusterMTLS, "error") + return + } + + namespaces, err := business.Namespace.GetNamespaces() + if err != nil { + Put(ClusterMTLS, "error") + return + } + + nsNames := make([]string, 0, len(namespaces)) + for _, ns := range namespaces { + nsNames = append(nsNames, ns.Name) + } + + globalmTLSStatus, err := business.IstioConfig.MeshWidemTLSStatus(nsNames) + if err != nil { + Put(ClusterMTLS, "error") + return + } + + Put(ClusterMTLS, globalmTLSStatus) +} diff --git a/vendor/github.com/kiali/kiali/status/status.go b/vendor/github.com/kiali/kiali/status/status.go new file mode 100644 index 000000000..7a20379f5 --- /dev/null +++ b/vendor/github.com/kiali/kiali/status/status.go @@ -0,0 +1,82 @@ +// status is a simple package for offering up various status information from Kiali. +package status + +const ( + name = "Kiali" + ConsoleVersion = name + " console version" + CoreVersion = name + " core version" + CoreCommitHash = name + " core commit hash" + State = name + " state" + ClusterMTLS = "Istio mTLS" + StateRunning = "running" +) + +// StatusInfo statusInfo +// +// This is used for returning a response of Kiali Status +// +// swagger:model StatusInfo +type StatusInfo struct { + // The state of Kiali + // A hash of key,values with versions of Kiali and state + // + // required: true + Status map[string]string `json:"status"` + // An array of external services installed + // + // required: true + // swagger:allOf + ExternalServices []ExternalServiceInfo `json:"externalServices"` + // An array of warningMessages + // items.example: Istio version 0.7.1 is not supported, the version should be 0.8.0 + // swagger:allOf + WarningMessages []string `json:"warningMessages"` +} + +var info StatusInfo + +// Status response model +// +// This is used for returning a response of Kiali Status +// +// swagger:model externalServiceInfo +type ExternalServiceInfo struct { + // The name of the service + // + // required: true + // example: Istio + Name string `json:"name"` + + // The installed version of the service + // + // required: false + // example: 0.8.0 + Version string `json:"version,omitempty"` + + // The service url + // + // required: false + // example: jaeger-query-istio-system.127.0.0.1.nip.io + Url string `json:"url,omitempty"` +} + +func init() { + info = StatusInfo{Status: make(map[string]string)} + info.Status[State] = StateRunning +} + +// Put adds or replaces status info for the provided name. Any previous setting is returned. +func Put(name, value string) (previous string, hasPrevious bool) { + previous, hasPrevious = info.Status[name] + info.Status[name] = value + return previous, hasPrevious +} + +// Get returns a copy of the current status info. +func Get() (status StatusInfo) { + info.ExternalServices = []ExternalServiceInfo{} + info.WarningMessages = []string{} + info.getmTLSStatus() + getVersions() + return info +} diff --git a/vendor/github.com/kiali/kiali/status/versions.go b/vendor/github.com/kiali/kiali/status/versions.go new file mode 100644 index 000000000..f7505fc94 --- /dev/null +++ b/vendor/github.com/kiali/kiali/status/versions.go @@ -0,0 +1,244 @@ +package status + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "regexp" + "strings" + + "github.com/hashicorp/go-version" + kversion "k8s.io/apimachinery/pkg/version" + kube "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/kubernetes" + "github.com/kiali/kiali/log" +) + +type externalService func() (*ExternalServiceInfo, error) + +var ( + // Example Maistra product version is: + // redhat@redhat-docker.io/maistra-0.1.0-1-3a136c90ec5e308f236e0d7ebb5c4c5e405217f4-unknown + // Example Maistra upstream project version is: + // redhat@redhat-pulp.abc.xyz.redhat.com:8888/openshift-istio-tech-preview-0.1.0-1-3a136c90ec5e308f236e0d7ebb5c4c5e405217f4-Custom + // Example Istio snapshot version is: + // root@f72e3d3ef3c2-docker.io/istio-release-1.0-20180927-21-10-cbe9c05c470ec1924f7bcf02334b183e7e6175cb-Clean + maistraProductVersionExpr = regexp.MustCompile("maistra-([0-9]+\\.[0-9]+\\.[0-9]+)") + maistraProjectVersionExpr = regexp.MustCompile("openshift-istio.*-([0-9]+\\.[0-9]+\\.[0-9]+)") + istioVersionExpr = regexp.MustCompile("([0-9]+\\.[0-9]+\\.[0-9]+)") + istioSnapshotVersionExpr = regexp.MustCompile("istio-release-([0-9]+\\.[0-9]+)(-[0-9]{8})") +) + +func getVersions() { + components := []externalService{ + istioVersion, + prometheusVersion, + kubernetesVersion, + } + + if config.Get().ExternalServices.Jaeger.URL != "" { + components = append(components, jaegerVersion) + } + + if config.Get().ExternalServices.Grafana.URL != "" { + components = append(components, grafanaVersion) + } + + for _, comp := range components { + getVersionComponent(comp) + } +} + +func getVersionComponent(serviceComponent externalService) { + componentInfo, err := serviceComponent() + if err == nil { + info.ExternalServices = append(info.ExternalServices, *componentInfo) + } +} + +func validateVersion(istioReq string, installedVersion string) bool { + reqWords := strings.Split(istioReq, " ") + requirementV, errReqV := version.NewVersion(reqWords[1]) + installedV, errInsV := version.NewVersion(installedVersion) + if errReqV != nil || errInsV != nil { + return false + } + switch operator := reqWords[0]; operator { + case "==": + return installedV.Equal(requirementV) + case ">=": + return installedV.GreaterThan(requirementV) || installedV.Equal(requirementV) + case ">": + return installedV.GreaterThan(requirementV) + case "<=": + return installedV.LessThan(requirementV) || installedV.Equal(requirementV) + case "<": + return installedV.LessThan(requirementV) + } + return false +} + +func istioVersion() (*ExternalServiceInfo, error) { + var ( + body []byte + err error + product *ExternalServiceInfo + resp *http.Response + ) + + istioConfig := config.Get().ExternalServices.Istio + resp, err = http.Get(istioConfig.UrlServiceVersion) + if err == nil { + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err == nil { + rawVersion := string(body) + product, err = parseIstioRawVersion(rawVersion) + return product, err + } + } + return nil, err +} + +func parseIstioRawVersion(rawVersion string) (*ExternalServiceInfo, error) { + product := ExternalServiceInfo{Name: "Unknown", Version: "Unknown"} + + // First see if we detect Maistra (either product or upstream project). + // If it is not Maistra, see if it is upstream Istio (either a release or snapshot). + // If it is neither then it is some unknown Istio implementation that we do not support. + + maistraVersionStringArr := maistraProductVersionExpr.FindStringSubmatch(rawVersion) + if maistraVersionStringArr != nil { + log.Debugf("Detected Maistra product version [%v]", rawVersion) + if len(maistraVersionStringArr) > 1 { + product.Name = "Maistra" + product.Version = maistraVersionStringArr[1] // get regex group #1 ,which is the "#.#.#" version string + if !validateVersion(config.MaistraVersionSupported, product.Version) { + info.WarningMessages = append(info.WarningMessages, "Maistra version "+product.Version+" is not supported, the version should be "+config.MaistraVersionSupported) + } + + // we know this is Maistra - either a supported or unsupported version - return now + return &product, nil + } + } + + maistraVersionStringArr = maistraProjectVersionExpr.FindStringSubmatch(rawVersion) + if maistraVersionStringArr != nil { + log.Debugf("Detected Maistra project version [%v]", rawVersion) + if len(maistraVersionStringArr) > 1 { + product.Name = "Maistra Project" + product.Version = maistraVersionStringArr[1] // get regex group #1 ,which is the "#.#.#" version string + if !validateVersion(config.MaistraVersionSupported, product.Version) { + info.WarningMessages = append(info.WarningMessages, "Maistra project version "+product.Version+" is not supported, the version should be "+config.MaistraVersionSupported) + } + + // we know this is Maistra - either a supported or unsupported version - return now + return &product, nil + } + } + + // see if it is a released version of Istio + istioVersionStringArr := istioVersionExpr.FindStringSubmatch(rawVersion) + if istioVersionStringArr != nil { + log.Debugf("Detected Istio version [%v]", rawVersion) + if len(istioVersionStringArr) > 1 { + product.Name = "Istio" + product.Version = istioVersionStringArr[1] // get regex group #1 ,which is the "#.#.#" version string + if !validateVersion(config.IstioVersionSupported, product.Version) { + info.WarningMessages = append(info.WarningMessages, "Istio version "+product.Version+" is not supported, the version should be "+config.IstioVersionSupported) + } + // we know this is Istio upstream - either a supported or unsupported version - return now + return &product, nil + } + } + + // see if it is a snapshot version of Istio + istioVersionStringArr = istioSnapshotVersionExpr.FindStringSubmatch(rawVersion) + if istioVersionStringArr != nil { + log.Debugf("Detected Istio snapshot version [%v]", rawVersion) + if len(istioVersionStringArr) > 2 { + product.Name = "Istio Snapshot" + majorMinor := istioVersionStringArr[1] // regex group #1 is the "#.#" version numbers + snapshotStr := istioVersionStringArr[2] // regex group #2 is the date/time stamp + product.Version = majorMinor + snapshotStr + if !validateVersion(config.IstioVersionSupported, majorMinor) { + info.WarningMessages = append(info.WarningMessages, "Istio snapshot version "+product.Version+" is not supported, the version should be "+config.IstioVersionSupported) + } + // we know this is Istio upstream - either a supported or unsupported version - return now + return &product, nil + } + } + + log.Debugf("Detected unknown Istio implementation version [%v]", rawVersion) + product.Name = "Unknown Istio Implementation" + product.Version = rawVersion + info.WarningMessages = append(info.WarningMessages, "Unknown Istio implementation version "+product.Version+" is not recognized, thus not supported.") + return &product, nil +} + +type p8sResponseVersion struct { + Version string `json:"version"` + Revision string `json:"revision"` +} + +func jaegerVersion() (*ExternalServiceInfo, error) { + product := ExternalServiceInfo{} + product.Name = "Jaeger" + product.Url = config.Get().ExternalServices.Jaeger.URL + + return &product, nil +} + +func grafanaVersion() (*ExternalServiceInfo, error) { + product := ExternalServiceInfo{} + product.Name = "Grafana" + product.Url = config.Get().ExternalServices.Grafana.URL + + return &product, nil +} + +func prometheusVersion() (*ExternalServiceInfo, error) { + product := ExternalServiceInfo{} + prometheusV := new(p8sResponseVersion) + prometheusUrl := config.Get().ExternalServices.PrometheusServiceURL + resp, err := http.Get(prometheusUrl + "/version") + if err == nil { + defer resp.Body.Close() + err = json.NewDecoder(resp.Body).Decode(&prometheusV) + if err == nil { + product.Name = "Prometheus" + product.Version = prometheusV.Version + return &product, nil + } + } + return nil, err +} + +func kubernetesVersion() (*ExternalServiceInfo, error) { + var ( + err error + k8sConfig *rest.Config + k8s *kube.Clientset + serverVersion *kversion.Info + ) + + product := ExternalServiceInfo{} + k8sConfig, err = kubernetes.ConfigClient() + if err == nil { + k8sConfig.QPS = config.Get().KubernetesConfig.QPS + k8sConfig.Burst = config.Get().KubernetesConfig.Burst + k8s, err = kube.NewForConfig(k8sConfig) + if err == nil { + serverVersion, err = k8s.Discovery().ServerVersion() + if err == nil { + product.Name = "Kubernetes" + product.Version = serverVersion.GitVersion + return &product, nil + } + } + } + return nil, err +} diff --git a/vendor/github.com/kiali/kiali/util/clock.go b/vendor/github.com/kiali/kiali/util/clock.go new file mode 100644 index 000000000..f12f7fbbc --- /dev/null +++ b/vendor/github.com/kiali/kiali/util/clock.go @@ -0,0 +1,23 @@ +package util + +import "time" + +type TimeProvider interface { + Now() time.Time +} + +var Clock TimeProvider + +type RealClock struct{} + +func (clock RealClock) Now() time.Time { + return time.Now() +} + +type ClockMock struct { + Time time.Time +} + +func (clock ClockMock) Now() time.Time { + return clock.Time +} diff --git a/vendor/github.com/kiali/kiali/util/intutil/convert.go b/vendor/github.com/kiali/kiali/util/intutil/convert.go new file mode 100644 index 000000000..b171b1b5b --- /dev/null +++ b/vendor/github.com/kiali/kiali/util/intutil/convert.go @@ -0,0 +1,20 @@ +package intutil + +import "errors" + +func Convert(subject interface{}) (int, error) { + var result int + + switch subject.(type) { + case uint64: + result = int(subject.(uint64)) + case int64: + result = int(subject.(int64)) + case int: + result = subject.(int) + default: + return 0, errors.New("It is not a numeric input") + } + + return result, nil +} diff --git a/vendor/github.com/kiali/kiali/util/javascript_config.go b/vendor/github.com/kiali/kiali/util/javascript_config.go new file mode 100644 index 000000000..76bca916e --- /dev/null +++ b/vendor/github.com/kiali/kiali/util/javascript_config.go @@ -0,0 +1,24 @@ +package util + +import ( + "io/ioutil" + "path/filepath" + + "github.com/kiali/kiali/config" + "github.com/kiali/kiali/log" +) + +// ConfigToJS generates env.js file from Kiali config +func ConfigToJS() { + log.Info("Generating env.js from config") + path, _ := filepath.Abs("./console/env.js") + + content := "window.WEB_ROOT='" + config.Get().Server.WebRoot + "';" + + log.Debugf("The content of %v will be:\n%v", path, content) + + err := ioutil.WriteFile(path, []byte(content), 0) + if isError(err) { + return + } +} diff --git a/vendor/github.com/kiali/kiali/util/prometheus_query.go b/vendor/github.com/kiali/kiali/util/prometheus_query.go new file mode 100644 index 000000000..3d9e4026d --- /dev/null +++ b/vendor/github.com/kiali/kiali/util/prometheus_query.go @@ -0,0 +1,31 @@ +package util + +import ( + "fmt" + "time" + + "github.com/prometheus/common/model" +) + +func AdjustRateInterval(namespaceCreationTime time.Time, queryTime time.Time, ratesInterval string) (string, error) { + startTime, err := GetStartTimeForRateInterval(queryTime, ratesInterval) + if err != nil { + return "", err + } + + if startTime.Before(namespaceCreationTime) { + interval := queryTime.Sub(namespaceCreationTime) + ratesInterval = fmt.Sprintf("%vs", int(interval.Seconds())) + } + + return ratesInterval, nil +} + +func GetStartTimeForRateInterval(baseTime time.Time, rateInterval string) (time.Time, error) { + duration, err := model.ParseDuration(rateInterval) + if err != nil { + return time.Time{}, err + } + + return baseTime.Add(-time.Duration(duration)), nil +} diff --git a/vendor/github.com/kiali/kiali/util/update_base_url.go b/vendor/github.com/kiali/kiali/util/update_base_url.go new file mode 100644 index 000000000..0d43a7ec0 --- /dev/null +++ b/vendor/github.com/kiali/kiali/util/update_base_url.go @@ -0,0 +1,38 @@ +package util + +import ( + "io/ioutil" + "path/filepath" + "strings" + + "github.com/kiali/kiali/log" +) + +// UpdateBaseURL updates index.html base href with web root string +func UpdateBaseURL(webRootPath string) { + log.Infof("Updating base URL in index.html with [%v]", webRootPath) + path, _ := filepath.Abs("./console/index.html") + b, err := ioutil.ReadFile(path) + if isError(err) { + return + } + + html := string(b) + + searchStr := `` + newStr := `` + newHTML := strings.Replace(html, searchStr, newStr, -1) + + err = ioutil.WriteFile(path, []byte(newHTML), 0) + if isError(err) { + return + } +} + +func isError(err error) bool { + if err != nil { + log.Errorf("File I/O error [%v]", err.Error()) + } + + return (err != nil) +} diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/klauspost/cpuid/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml deleted file mode 100644 index 630192d59..000000000 --- a/vendor/github.com/klauspost/cpuid/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go - -sudo: false - -os: - - linux - - osx -go: - - 1.8.x - - 1.9.x - - 1.10.x - - master - -script: - - go vet ./... - - go test -v ./... - - go test -race ./... - - diff <(gofmt -d .) <("") - -matrix: - allow_failures: - - go: 'master' - fast_finish: true diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt deleted file mode 100644 index 452d28eda..000000000 --- a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt +++ /dev/null @@ -1,35 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2015- Klaus Post & Contributors. -Email: klauspost@gmail.com - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md deleted file mode 100644 index b2b6bee87..000000000 --- a/vendor/github.com/klauspost/cpuid/README.md +++ /dev/null @@ -1,145 +0,0 @@ -# cpuid -Package cpuid provides information about the CPU running the current program. - -CPU features are detected on startup, and kept for fast access through the life of the application. -Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. - -You can access the CPU information by accessing the shared CPU variable of the cpuid library. - -Package home: https://github.com/klauspost/cpuid - -[![GoDoc][1]][2] [![Build Status][3]][4] - -[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg -[2]: https://godoc.org/github.com/klauspost/cpuid -[3]: https://travis-ci.org/klauspost/cpuid.svg -[4]: https://travis-ci.org/klauspost/cpuid - -# features -## CPU Instructions -* **CMOV** (i686 CMOV) -* **NX** (NX (No-Execute) bit) -* **AMD3DNOW** (AMD 3DNOW) -* **AMD3DNOWEXT** (AMD 3DNowExt) -* **MMX** (standard MMX) -* **MMXEXT** (SSE integer functions or AMD MMX ext) -* **SSE** (SSE functions) -* **SSE2** (P4 SSE functions) -* **SSE3** (Prescott SSE3 functions) -* **SSSE3** (Conroe SSSE3 functions) -* **SSE4** (Penryn SSE4.1 functions) -* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) -* **SSE42** (Nehalem SSE4.2 functions) -* **AVX** (AVX functions) -* **AVX2** (AVX2 functions) -* **FMA3** (Intel FMA 3) -* **FMA4** (Bulldozer FMA4 functions) -* **XOP** (Bulldozer XOP functions) -* **F16C** (Half-precision floating-point conversion) -* **BMI1** (Bit Manipulation Instruction Set 1) -* **BMI2** (Bit Manipulation Instruction Set 2) -* **TBM** (AMD Trailing Bit Manipulation) -* **LZCNT** (LZCNT instruction) -* **POPCNT** (POPCNT instruction) -* **AESNI** (Advanced Encryption Standard New Instructions) -* **CLMUL** (Carry-less Multiplication) -* **HTT** (Hyperthreading (enabled)) -* **HLE** (Hardware Lock Elision) -* **RTM** (Restricted Transactional Memory) -* **RDRAND** (RDRAND instruction is available) -* **RDSEED** (RDSEED instruction is available) -* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) -* **SHA** (Intel SHA Extensions) -* **AVX512F** (AVX-512 Foundation) -* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) -* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) -* **AVX512PF** (AVX-512 Prefetch Instructions) -* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) -* **AVX512CD** (AVX-512 Conflict Detection Instructions) -* **AVX512BW** (AVX-512 Byte and Word Instructions) -* **AVX512VL** (AVX-512 Vector Length Extensions) -* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) -* **MPX** (Intel MPX (Memory Protection Extensions)) -* **ERMS** (Enhanced REP MOVSB/STOSB) -* **RDTSCP** (RDTSCP Instruction) -* **CX16** (CMPXCHG16B Instruction) -* **SGX** (Software Guard Extensions, with activation details) - -## Performance -* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. -* **SSE2SLOW** (SSE2 is supported, but usually not faster) -* **SSE3SLOW** (SSE3 is supported, but usually not faster) -* **ATOM** (Atom processor, some SSSE3 instructions are slower) -* **Cache line** (Probable size of a cache line). -* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. - -## Cpu Vendor/VM -* **Intel** -* **AMD** -* **VIA** -* **Transmeta** -* **NSC** -* **KVM** (Kernel-based Virtual Machine) -* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) -* **VMware** -* **XenHVM** - -# installing - -```go get github.com/klauspost/cpuid``` - -# example - -```Go -package main - -import ( - "fmt" - "github.com/klauspost/cpuid" -) - -func main() { - // Print basic CPU information: - fmt.Println("Name:", cpuid.CPU.BrandName) - fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) - fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) - fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) - fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) - fmt.Println("Features:", cpuid.CPU.Features) - fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) - fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") - fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") - fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") - fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") - - // Test if we have a specific feature: - if cpuid.CPU.SSE() { - fmt.Println("We have Streaming SIMD Extensions") - } -} -``` - -Sample output: -``` ->go run main.go -Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz -PhysicalCores: 2 -ThreadsPerCore: 2 -LogicalCores: 4 -Family 6 Model: 42 -Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL -Cacheline bytes: 64 -We have Streaming SIMD Extensions -``` - -# private package - -In the "private" folder you can find an autogenerated version of the library you can include in your own packages. - -For this purpose all exports are removed, and functions and constants are lowercased. - -This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. - -# license - -This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/knative/pkg b/vendor/github.com/knative/pkg new file mode 160000 index 000000000..f8007289b --- /dev/null +++ b/vendor/github.com/knative/pkg @@ -0,0 +1 @@ +Subproject commit f8007289b228598dfdf7e6de67ee5f5c391c9d34 diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod deleted file mode 100644 index 716c61312..000000000 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/konsorten/go-windows-terminal-sequences diff --git a/vendor/github.com/kubernetes-sigs/application/LICENSE b/vendor/github.com/kubernetes-sigs/application/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/component/component.go b/vendor/github.com/kubernetes-sigs/application/pkg/component/component.go new file mode 100644 index 000000000..03669d019 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/component/component.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package component + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "reflect" + "strings" +) + +// Constants defining labels +const ( + LabelCR = "custom-resource" + LabelCRName = "custom-resource-name" + LabelCRNamespace = "custom-resource-namespace" + LabelComponent = "component" +) + +// Labels return +func Labels(cr metav1.Object, component string) map[string]string { + return map[string]string{ + LabelCR: strings.Trim(reflect.TypeOf(cr).String(), "*"), + LabelCRName: cr.GetName(), + LabelCRNamespace: cr.GetNamespace(), + LabelComponent: component, + } +} + +// Labels return the common labels for a resource +func (c *Component) Labels() map[string]string { + return Labels(c.CR, c.Name) +} + +// Merge is used to merge multiple maps into the target map +func (out KVMap) Merge(kvmaps ...KVMap) { + for _, kvmap := range kvmaps { + for k, v := range kvmap { + out[k] = v + } + } +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/component/doc.go b/vendor/github.com/kubernetes-sigs/application/pkg/component/doc.go new file mode 100644 index 000000000..c32b05d38 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/component/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package component contains component definition +package component diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/component/interface.go b/vendor/github.com/kubernetes-sigs/application/pkg/component/interface.go new file mode 100644 index 000000000..36ad58fae --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/component/interface.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package component + +import ( + "github.com/kubernetes-sigs/application/pkg/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Handle is an interface for operating on logical Components of a CR +type Handle interface { + DependentResources(rsrc interface{}) *resource.ObjectBag + ExpectedResources(rsrc interface{}, labels map[string]string, dependent, aggregated *resource.ObjectBag) (*resource.ObjectBag, error) + Observables(scheme *runtime.Scheme, rsrc interface{}, labels map[string]string, expected *resource.ObjectBag) []resource.Observable + Mutate(rsrc interface{}, rsrclabels map[string]string, status interface{}, expected, dependent, observed *resource.ObjectBag) (*resource.ObjectBag, error) + Differs(expected metav1.Object, observed metav1.Object) bool + UpdateComponentStatus(rsrc, status interface{}, reconciled *resource.ObjectBag, err error) + Finalize(rsrc, status interface{}, observed *resource.ObjectBag) error +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/component/types.go b/vendor/github.com/kubernetes-sigs/application/pkg/component/types.go new file mode 100644 index 000000000..d9c50dcb2 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/component/types.go @@ -0,0 +1,35 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package component + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Common const definitions +const ( + LifecycleManaged = "managed" + LifecycleReferred = "referred" +) + +// Component represents a logical collection of resources +type Component struct { + Handle + Name string + CR metav1.Object + OwnerRef *metav1.OwnerReference +} + +// KVMap is a map[string]string +type KVMap map[string]string diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/customresource/customresource.go b/vendor/github.com/kubernetes-sigs/application/pkg/customresource/customresource.go new file mode 100644 index 000000000..04de442fe --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/customresource/customresource.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package customresource + +import ( + component "github.com/kubernetes-sigs/application/pkg/component" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "reflect" + "strings" +) + +// Labels return custom resource label +func (hv *HandleValue) Labels() map[string]string { + c := hv.Handle.(metav1.Object) + return map[string]string{ + component.LabelCR: strings.Trim(reflect.TypeOf(c).String(), "*"), + component.LabelCRName: c.GetName(), + component.LabelCRNamespace: c.GetNamespace(), + } +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/customresource/doc.go b/vendor/github.com/kubernetes-sigs/application/pkg/customresource/doc.go new file mode 100644 index 000000000..efd748434 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/customresource/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package customresource contains component definition +package customresource diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/customresource/interface.go b/vendor/github.com/kubernetes-sigs/application/pkg/customresource/interface.go new file mode 100644 index 000000000..4f2cc57db --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/customresource/interface.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package customresource + +import ( + "github.com/kubernetes-sigs/application/pkg/component" +) + +// Handle is an interface for operating on CRs +type Handle interface { + ApplyDefaults() + Validate() error + Components() []component.Component + UpdateRsrcStatus(interface{}, error) bool + NewRsrc() Handle + NewStatus() interface{} +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/customresource/type.go b/vendor/github.com/kubernetes-sigs/application/pkg/customresource/type.go new file mode 100644 index 000000000..9f1a29588 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/customresource/type.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package customresource + +// HandleValue is to add methods for an interface object +type HandleValue struct { + Handle +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/finalizer/doc.go b/vendor/github.com/kubernetes-sigs/application/pkg/finalizer/doc.go new file mode 100644 index 000000000..ed3c1e46d --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/finalizer/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package finalizer contains storage releated code +// +k8s:deepcopy-gen=package,register +package finalizer diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/finalizer/finalizer.go b/vendor/github.com/kubernetes-sigs/application/pkg/finalizer/finalizer.go new file mode 100644 index 000000000..1e045957f --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/finalizer/finalizer.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package finalizer + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// const fileds +const ( + Cleanup = "cleanup" +) + +// Add adds the finalizer string +func Add(o metav1.Object, new string) { + exists := false + existing := o.GetFinalizers() + for _, f := range existing { + if f == new { + exists = true + break + } + } + + if !exists { + existing = append(existing, new) + o.SetFinalizers(existing) + } +} + +// Remove removes the finalizer string +func Remove(o metav1.Object, new string) { + foundat := -1 + existing := o.GetFinalizers() + for i, f := range existing { + if f == new { + foundat = i + break + } + } + if foundat != -1 { + existing[foundat] = existing[len(existing)-1] + o.SetFinalizers(existing[:len(existing)-1]) + } +} + +// EnsureStandard adds standard finalizers +func EnsureStandard(o metav1.Object) { + Add(o, Cleanup) +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/finalizer/zz_generated.deepcopy.go b/vendor/github.com/kubernetes-sigs/application/pkg/finalizer/zz_generated.deepcopy.go new file mode 100644 index 000000000..d7a889f26 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/finalizer/zz_generated.deepcopy.go @@ -0,0 +1,20 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by main. DO NOT EDIT. + +package finalizer diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/resource/doc.go b/vendor/github.com/kubernetes-sigs/application/pkg/resource/doc.go new file mode 100644 index 000000000..8243c528e --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/resource/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resource contains resource methods +package resource diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/resource/resource.go b/vendor/github.com/kubernetes-sigs/application/pkg/resource/resource.go new file mode 100644 index 000000000..5db16ace3 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/resource/resource.go @@ -0,0 +1,183 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bufio" + "bytes" + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes/scheme" + "os" + "reflect" + "strings" + "text/template" +) + +// objFromReader reads Object from []byte spec +func objFromReader(b *bufio.Reader, data interface{}, list metav1.ListInterface) (*Object, error) { + var exdoc bytes.Buffer + r := yaml.NewYAMLReader(b) + doc, err := r.Read() + if err == nil { + tmpl, e := template.New("tmpl").Parse(string(doc)) + err = e + if err == nil { + err = tmpl.Execute(&exdoc, data) + if err == nil { + d := scheme.Codecs.UniversalDeserializer() + obj, _, e := d.Decode(exdoc.Bytes(), nil, nil) + err = e + if err == nil { + return &Object{ + Obj: obj.DeepCopyObject().(metav1.Object), + ObjList: list, + Lifecycle: LifecycleManaged, + }, nil + } + } + } + } + return nil, err +} + +// ObjFromString populates Object from string spec +func ObjFromString(spec string, values interface{}, list metav1.ListInterface) (*Object, error) { + return objFromReader(bufio.NewReader(strings.NewReader(spec)), values, list) +} + +// ObjFromFile populates Object from file +func ObjFromFile(path string, values interface{}, list metav1.ListInterface) (*Object, error) { + f, err := os.Open(path) + if err == nil { + return objFromReader(bufio.NewReader(f), values, list) + } + return nil, err +} + +// ObservablesFromObjects returns ObservablesFromObjects +func ObservablesFromObjects(scheme *runtime.Scheme, bag *ObjectBag, labels map[string]string) []Observable { + var gk schema.GroupKind + var observables []Observable + gkmap := map[schema.GroupKind]struct{}{} + for _, obj := range bag.Items() { + if obj.ObjList != nil { + ro := obj.Obj.(runtime.Object) + kinds, _, err := scheme.ObjectKinds(ro) + if err == nil { + // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. + if len(kinds) != 1 { + err = fmt.Errorf("Expected exactly 1 kind for Object %T, but found %s kinds", ro, kinds) + + } + } + // Cache the Group and Kind for the OwnerType + if err == nil { + gk = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} + } else { + gk = ro.GetObjectKind().GroupVersionKind().GroupKind() + } + if _, ok := gkmap[gk]; !ok { + gkmap[gk] = struct{}{} + observable := Observable{ + ObjList: obj.ObjList, + Labels: labels, + } + observables = append(observables, observable) + } + } else { + observable := Observable{ + Obj: obj.Obj, + } + observables = append(observables, observable) + } + } + return observables +} + +// ReferredObject returns a reffered object +func ReferredObject(obj metav1.Object, name, namespace string) Object { + obj.SetName(name) + obj.SetNamespace(namespace) + return Object{ + Lifecycle: LifecycleReferred, + Obj: obj, + } +} + +// Add adds to the Object bag +func (b *ObjectBag) Add(objs ...Object) { + b.objects = append(b.objects, objs...) +} + +// Items get items from the Object bag +func (b *ObjectBag) Items() []Object { + return b.objects +} + +// Objs get items from the Object bag +func (b *ObjectBag) Objs() []metav1.Object { + var objs []metav1.Object + for _, o := range b.Items() { + objs = append(objs, o.Obj) + } + return objs +} + +// Get returns an item which matched the kind and name +func (b *ObjectBag) Get(inobj metav1.Object, name, namespace string) metav1.Object { + inobj.SetName(name) + inobj.SetNamespace(namespace) + for _, obj := range b.Items() { + otype := reflect.TypeOf(obj.Obj).String() + intype := reflect.TypeOf(inobj).String() + if otype == intype && obj.Obj.GetName() == inobj.GetName() && obj.Obj.GetNamespace() == inobj.GetNamespace() { + return obj.Obj + } + } + return nil +} + +// Delete returns an item which matched the kind and name +func (b *ObjectBag) Delete(inobj metav1.Object) { + for i, obj := range b.objects { + otype := reflect.TypeOf(obj.Obj).String() + intype := reflect.TypeOf(inobj).String() + if otype == intype && obj.Obj.GetName() == inobj.GetName() && obj.Obj.GetNamespace() == inobj.GetNamespace() { + b.objects[i] = b.objects[len(b.objects)-1] + b.objects = b.objects[:len(b.objects)-1] + break + } + } +} + +// Validate validates the LocalObjectReference +func (s *LocalObjectReference) Validate(fp *field.Path, sfield string, errs field.ErrorList, required bool) field.ErrorList { + fp = fp.Child(sfield) + if s == nil { + if required { + errs = append(errs, field.Required(fp, "Required "+sfield+" missing")) + } + return errs + } + + if s.Name == "" { + errs = append(errs, field.Required(fp.Child("name"), "name is required")) + } + return errs +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/resource/types.go b/vendor/github.com/kubernetes-sigs/application/pkg/resource/types.go new file mode 100644 index 000000000..7fbb9a3c6 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/resource/types.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Common const definitions +const ( + LifecycleManaged = "managed" + LifecycleReferred = "referred" +) + +// ObjectBag abstracts dealing with group of objects +// For now it is a simple list +type ObjectBag struct { + objects []Object +} + +// Object is a container to capture the k8s resource info to be used by controller +type Object struct { + // Lifecycle can be: managed, reference + Lifecycle string + // Obj refers to the resource object can be: sts, service, secret, pvc, .. + Obj metav1.Object + // ObjList refers to the list of resource objects + ObjList metav1.ListInterface +} + +// Observable captures the k8s resource info and selector to fetch child resources +type Observable struct { + // ObjList refers to the list of resource objects + ObjList metav1.ListInterface + // Obj refers to the resource object can be: sts, service, secret, pvc, .. + Obj metav1.Object + // Labels list of labels + Labels map[string]string + // Typemeta - needed for go test fake client + Type metav1.TypeMeta +} + +// LocalObjectReference with validation +type LocalObjectReference struct { + corev1.LocalObjectReference `json:",inline"` +} + +// GetObjectFn is a type for any function that returns resource info +type GetObjectFn func(interface{}) (*Object, error) diff --git a/vendor/github.com/lucas-clemente/aes12/Readme.md b/vendor/github.com/lucas-clemente/aes12/Readme.md deleted file mode 100644 index ef4e316f5..000000000 --- a/vendor/github.com/lucas-clemente/aes12/Readme.md +++ /dev/null @@ -1,28 +0,0 @@ -# aes12 - -This package modifies the AES-GCM implementation from Go's standard library to use 12 byte tag sizes. It is not intended for a general audience, and used in [quic-go](https://github.com/lucas-clemente/quic-go). - -To make use of the in-place encryption / decryption feature, the `dst` parameter to `Seal` and `Open` should be 16 bytes longer than plaintext, not 12. - -Command for testing: - -``` -go test . --bench=. && GOARCH=386 go test . --bench=. -``` - -The output (on my machine): - -``` -BenchmarkAESGCMSeal1K-8 3000000 467 ns/op 2192.37 MB/s -BenchmarkAESGCMOpen1K-8 3000000 416 ns/op 2456.72 MB/s -BenchmarkAESGCMSeal8K-8 500000 2742 ns/op 2986.53 MB/s -BenchmarkAESGCMOpen8K-8 500000 2791 ns/op 2934.65 MB/s -PASS -ok github.com/lucas-clemente/aes12 6.383s -BenchmarkAESGCMSeal1K-8 50000 35233 ns/op 29.06 MB/s -BenchmarkAESGCMOpen1K-8 50000 34529 ns/op 29.66 MB/s -BenchmarkAESGCMSeal8K-8 5000 262678 ns/op 31.19 MB/s -BenchmarkAESGCMOpen8K-8 5000 267296 ns/op 30.65 MB/s -PASS -ok github.com/lucas-clemente/aes12 6.972s -``` diff --git a/vendor/github.com/lucas-clemente/quic-go-certificates/README.md b/vendor/github.com/lucas-clemente/quic-go-certificates/README.md deleted file mode 100644 index c9621d5cd..000000000 --- a/vendor/github.com/lucas-clemente/quic-go-certificates/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# certsets - -Common certificate sets for quic-go diff --git a/vendor/github.com/lucas-clemente/quic-go-certificates/createCertSets.rb b/vendor/github.com/lucas-clemente/quic-go-certificates/createCertSets.rb deleted file mode 100755 index 6d28e83cf..000000000 --- a/vendor/github.com/lucas-clemente/quic-go-certificates/createCertSets.rb +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env ruby -# -# Extract the common certificate sets from the chromium source to go -# -# Usage: -# createCertSets.rb 1 ~/src/chromium/src/net/quic/crypto/common_cert_set_1* -# createCertSets.rb 2 ~/src/chromium/src/net/quic/crypto/common_cert_set_2* - -n = ARGV.shift -mainFile = ARGV.shift -dataFiles = ARGV - -data = "package certsets\n" -data += File.read(mainFile) -data += (dataFiles.map{|p| File.read(p)}).join - -# Good enough -data.gsub!(/\/\*(.*?)\*\//m, '') -data.gsub!(/^#include.+/, '') -data.gsub!(/^#if 0(.*?)\n#endif/m, '') - -data.gsub!(/^static const size_t kNumCerts.+/, '') -data.gsub!(/static const size_t kLens[^}]+};/m, '') - -data.gsub!('static const unsigned char* const kCerts[] = {', "var CertSet#{n} = [][]byte{") -data.gsub!('static const uint64_t kHash = UINT64_C', "const CertSet#{n}Hash uint64 = ") - -data.gsub!(/static const unsigned char kDERCert(\d+)\[\] = /, "var kDERCert\\1 = []byte") - -data.gsub!(/kDERCert(\d+)/, "certSet#{n}Cert\\1") - -File.write("cert_set_#{n}.go", data) - -system("gofmt -w -s cert_set_#{n}.go") diff --git a/vendor/github.com/lucas-clemente/quic-go/.editorconfig b/vendor/github.com/lucas-clemente/quic-go/.editorconfig deleted file mode 100644 index 538ba2b24..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/.editorconfig +++ /dev/null @@ -1,5 +0,0 @@ -root = true - -[*] -indent_style = tab -indent_size = 2 diff --git a/vendor/github.com/lucas-clemente/quic-go/.gitignore b/vendor/github.com/lucas-clemente/quic-go/.gitignore deleted file mode 100644 index 040b55ac0..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -debug -debug.test -main diff --git a/vendor/github.com/lucas-clemente/quic-go/.gometalinter.json b/vendor/github.com/lucas-clemente/quic-go/.gometalinter.json deleted file mode 100644 index 7aa4fb492..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/.gometalinter.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "DisableAll": true, - "Exclude": [ - "vendor", - "streams_map_incoming_generic.go", - "streams_map_outgoing_generic.go" - ], - "Enable": [ - "deadcode", - "goimports", - "ineffassign", - "megacheck", - "misspell", - "structcheck", - "unconvert", - "varcheck", - "vet" - ], - "Linters": { - "vet": "go tool vet -printfuncs=Infof,Debugf,Warningf,Errorf:PATH:LINE:MESSAGE", - "misspell": "misspell -i ect:PATH:LINE:COL:MESSAGE", - "megacheck": "megacheck -ignore github.com/lucas-clemente/quic-go/h2quic/response_writer_closenotifier.go:SA1019:PATH:LINE:COL:MESSAGE" - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/.travis.yml b/vendor/github.com/lucas-clemente/quic-go/.travis.yml deleted file mode 100644 index 059be883b..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/.travis.yml +++ /dev/null @@ -1,52 +0,0 @@ -dist: trusty -group: travis_latest - -addons: - hosts: - - quic.clemente.io - -language: go - -go: - - "1.10.4" - - "1.11" - -# first part of the GOARCH workaround -# setting the GOARCH directly doesn't work, since the value will be overwritten later -# so set it to a temporary environment variable first -env: - global: - - TIMESCALE_FACTOR=20 - matrix: - - TRAVIS_GOARCH=amd64 TESTMODE=lint - - TRAVIS_GOARCH=amd64 TESTMODE=unit - - TRAVIS_GOARCH=amd64 TESTMODE=integration - - TRAVIS_GOARCH=386 TESTMODE=unit - - TRAVIS_GOARCH=386 TESTMODE=integration - -# Linters might work differently in different Go versions. -# Only run them in the most recent one. -matrix: - exclude: - - go: "1.10.4" - env: TRAVIS_GOARCH=amd64 TESTMODE=lint - - go: "1.10.4" - env: TRAVIS_GOARCH=386 TESTMODE=lint - -# second part of the GOARCH workaround -# now actually set the GOARCH env variable to the value of the temporary variable set earlier -before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/onsi/ginkgo/ginkgo - - go get github.com/onsi/gomega - - export GOARCH=$TRAVIS_GOARCH - - go env # for debugging - - "printf \"quic.clemente.io certificate valid until: \" && openssl x509 -in example/fullchain.pem -enddate -noout | cut -d = -f 2" - - "export DISPLAY=:99.0" - - "Xvfb $DISPLAY &> /dev/null &" - -script: - - .travis/script.sh - -after_success: - - .travis/after_success.sh diff --git a/vendor/github.com/lucas-clemente/quic-go/Changelog.md b/vendor/github.com/lucas-clemente/quic-go/Changelog.md deleted file mode 100644 index cbc88e73e..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/Changelog.md +++ /dev/null @@ -1,44 +0,0 @@ -# Changelog - -## v0.10.0 (2018-08-28) - -- Add support for QUIC 44, drop support for QUIC 42. - -## v0.9.0 (2018-08-15) - -- Add a `quic.Config` option for the length of the connection ID (for IETF QUIC). -- Split Session.Close into one method for regular closing and one for closing with an error. - -## v0.8.0 (2018-06-26) - -- Add support for unidirectional streams (for IETF QUIC). -- Add a `quic.Config` option for the maximum number of incoming streams. -- Add support for QUIC 42 and 43. -- Add dial functions that use a context. -- Multiplex clients on a net.PacketConn, when using Dial(conn). - -## v0.7.0 (2018-02-03) - -- The lower boundary for packets included in ACKs is now derived, and the value sent in STOP_WAITING frames is ignored. -- Remove `DialNonFWSecure` and `DialAddrNonFWSecure`. -- Expose the `ConnectionState` in the `Session` (experimental API). -- Implement packet pacing. - -## v0.6.0 (2017-12-12) - -- Add support for QUIC 39, drop support for QUIC 35 - 37 -- Added `quic.Config` options for maximal flow control windows -- Add a `quic.Config` option for QUIC versions -- Add a `quic.Config` option to request omission of the connection ID from a server -- Add a `quic.Config` option to configure the source address validation -- Add a `quic.Config` option to configure the handshake timeout -- Add a `quic.Config` option to configure the idle timeout -- Add a `quic.Config` option to configure keep-alive -- Rename the STK to Cookie -- Implement `net.Conn`-style deadlines for streams -- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/lucas-clemente/quic-go) for details. -- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/lucas-clemente/quic-go/wiki/Logging) for more details. -- Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper` -- Changed `h2quic.Server.Serve()` to accept a `net.PacketConn` -- Drop support for Go 1.7 and 1.8. -- Various bugfixes diff --git a/vendor/github.com/lucas-clemente/quic-go/README.md b/vendor/github.com/lucas-clemente/quic-go/README.md deleted file mode 100644 index 58d65bd33..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# A QUIC implementation in pure Go - - - -[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/lucas-clemente/quic-go) -[![Travis Build Status](https://img.shields.io/travis/lucas-clemente/quic-go/master.svg?style=flat-square&label=Travis+build)](https://travis-ci.org/lucas-clemente/quic-go) -[![CircleCI Build Status](https://img.shields.io/circleci/project/github/lucas-clemente/quic-go.svg?style=flat-square&label=CircleCI+build)](https://circleci.com/gh/lucas-clemente/quic-go) -[![Windows Build Status](https://img.shields.io/appveyor/ci/lucas-clemente/quic-go/master.svg?style=flat-square&label=windows+build)](https://ci.appveyor.com/project/lucas-clemente/quic-go/branch/master) -[![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/) - -quic-go is an implementation of the [QUIC](https://en.wikipedia.org/wiki/QUIC) protocol in Go. - -## Roadmap - -quic-go is compatible with the current version(s) of Google Chrome and QUIC as deployed on Google's servers. We're actively tracking the development of the Chrome code to ensure compatibility as the protocol evolves. In that process, we're dropping support for old QUIC versions. -As Google's QUIC versions are expected to converge towards the [IETF QUIC draft](https://github.com/quicwg/base-drafts), quic-go will eventually implement that draft. - -## Guides - -We currently support Go 1.9+. - -Installing and updating dependencies: - - go get -t -u ./... - -Running tests: - - go test ./... - -### Running the example server - - go run example/main.go -www /var/www/ - -Using the `quic_client` from chromium: - - quic_client --host=127.0.0.1 --port=6121 --v=1 https://quic.clemente.io - -Using Chrome: - - /Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --user-data-dir=/tmp/chrome --no-proxy-server --enable-quic --origin-to-force-quic-on=quic.clemente.io:443 --host-resolver-rules='MAP quic.clemente.io:443 127.0.0.1:6121' https://quic.clemente.io - -### QUIC without HTTP/2 - -Take a look at [this echo example](example/echo/echo.go). - -### Using the example client - - go run example/client/main.go https://clemente.io - -## Usage - -### As a server - -See the [example server](example/main.go) or try out [Caddy](https://github.com/mholt/caddy) (from version 0.9, [instructions here](https://github.com/mholt/caddy/wiki/QUIC)). Starting a QUIC server is very similar to the standard lib http in go: - -```go -http.Handle("/", http.FileServer(http.Dir(wwwDir))) -h2quic.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil) -``` - -### As a client - -See the [example client](example/client/main.go). Use a `h2quic.RoundTripper` as a `Transport` in a `http.Client`. - -```go -http.Client{ - Transport: &h2quic.RoundTripper{}, -} -``` - -## Contributing - -We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/lucas-clemente/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment. diff --git a/vendor/github.com/lucas-clemente/quic-go/appveyor.yml b/vendor/github.com/lucas-clemente/quic-go/appveyor.yml deleted file mode 100644 index b9fe526f5..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/appveyor.yml +++ /dev/null @@ -1,37 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -environment: - GOPATH: c:\gopath - CGO_ENABLED: 0 - TIMESCALE_FACTOR: 20 - matrix: - - GOARCH: 386 - - GOARCH: amd64 - -hosts: - quic.clemente.io: 127.0.0.1 - -clone_folder: c:\gopath\src\github.com\lucas-clemente\quic-go - -install: - - rmdir c:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.windows-amd64.zip - - 7z x go1.11.windows-amd64.zip -y -oC:\ > NUL - - set PATH=%PATH%;%GOPATH%\bin\windows_%GOARCH%;%GOPATH%\bin - - echo %PATH% - - echo %GOPATH% - - go get github.com/onsi/ginkgo/ginkgo - - go get github.com/onsi/gomega - - go version - - go env - - go get -v -t ./... - -build_script: - - ginkgo -r -v -randomizeAllSpecs -randomizeSuites -trace -skipPackage benchmark,integrationtests - - ginkgo -randomizeAllSpecs -randomizeSuites -trace benchmark -- -samples=1 - -test: off - -deploy: off diff --git a/vendor/github.com/lucas-clemente/quic-go/codecov.yml b/vendor/github.com/lucas-clemente/quic-go/codecov.yml deleted file mode 100644 index f077c1ad0..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/codecov.yml +++ /dev/null @@ -1,18 +0,0 @@ -coverage: - round: nearest - ignore: - - streams_map_incoming_bidi.go - - streams_map_incoming_uni.go - - streams_map_outgoing_bidi.go - - streams_map_outgoing_uni.go - - h2quic/gzipreader.go - - h2quic/response.go - - internal/ackhandler/packet_linkedlist.go - - internal/utils/byteinterval_linkedlist.go - - internal/utils/packetinterval_linkedlist.go - - internal/utils/linkedlist/linkedlist.go - status: - project: - default: - threshold: 0.5 - patch: false diff --git a/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh b/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh deleted file mode 100755 index 0ba5f64ef..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Mockgen refuses to generate mocks private types. -# This script copies the quic package to a temporary directory, and adds an public alias for the private type. -# It then creates a mock for this public (alias) type. - -TEMP_DIR=$(mktemp -d) -mkdir -p $TEMP_DIR/src/github.com/lucas-clemente/quic-go/ - -# uppercase the name of the interface -INTERFACE_NAME="$(tr '[:lower:]' '[:upper:]' <<< ${4:0:1})${4:1}" - -# copy all .go files to a temporary directory -rsync -r --exclude 'vendor' --include='*.go' --include '*/' --exclude '*' $GOPATH/src/github.com/lucas-clemente/quic-go/ $TEMP_DIR/src/github.com/lucas-clemente/quic-go/ - -# create a public alias for the interface, so that mockgen can process it -echo -e "package $1\n" > $TEMP_DIR/src/github.com/lucas-clemente/quic-go/mockgen_interface.go -echo "type $INTERFACE_NAME = $4" >> $TEMP_DIR/src/github.com/lucas-clemente/quic-go/mockgen_interface.go - -export GOPATH="$TEMP_DIR:$GOPATH" - -mockgen -package $1 -self_package $1 -destination $2 $3 $INTERFACE_NAME - -# mockgen imports quic-go as 'import quic_go github.com/lucas_clemente/quic-go' -sed -i '' 's/quic_go.//g' $2 -goimports -w $2 - -rm -r "$TEMP_DIR" diff --git a/vendor/github.com/markbates/inflect/LICENCE b/vendor/github.com/markbates/inflect/LICENCE new file mode 100644 index 000000000..8a36b944a --- /dev/null +++ b/vendor/github.com/markbates/inflect/LICENCE @@ -0,0 +1,7 @@ +Copyright (c) 2011 Chris Farmiloe + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/markbates/inflect/helpers.go b/vendor/github.com/markbates/inflect/helpers.go new file mode 100644 index 000000000..24050c70a --- /dev/null +++ b/vendor/github.com/markbates/inflect/helpers.go @@ -0,0 +1,19 @@ +package inflect + +//Helpers is a map of the helper names with its corresponding inflect function +var Helpers = map[string]interface{}{ + "asciffy": Asciify, + "camelize": Camelize, + "camelize_down_first": CamelizeDownFirst, + "capitalize": Capitalize, + "dasherize": Dasherize, + "humanize": Humanize, + "ordinalize": Ordinalize, + "parameterize": Parameterize, + "pluralize": Pluralize, + "pluralize_with_size": PluralizeWithSize, + "singularize": Singularize, + "tableize": Tableize, + "typeify": Typeify, + "underscore": Underscore, +} diff --git a/vendor/github.com/markbates/inflect/inflect.go b/vendor/github.com/markbates/inflect/inflect.go new file mode 100644 index 000000000..9b6776c19 --- /dev/null +++ b/vendor/github.com/markbates/inflect/inflect.go @@ -0,0 +1,892 @@ +package inflect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// baseAcronyms comes from https://en.wikipedia.org/wiki/List_of_information_technology_acronymss +const baseAcronyms = `JSON,JWT,ID,UUID,SQL,ACK,ACL,ADSL,AES,ANSI,API,ARP,ATM,BGP,BSS,CAT,CCITT,CHAP,CIDR,CIR,CLI,CPE,CPU,CRC,CRT,CSMA,CMOS,DCE,DEC,DES,DHCP,DNS,DRAM,DSL,DSLAM,DTE,DMI,EHA,EIA,EIGRP,EOF,ESS,FCC,FCS,FDDI,FTP,GBIC,gbps,GEPOF,HDLC,HTTP,HTTPS,IANA,ICMP,IDF,IDS,IEEE,IETF,IMAP,IP,IPS,ISDN,ISP,kbps,LACP,LAN,LAPB,LAPF,LLC,MAC,MAN,Mbps,MC,MDF,MIB,MoCA,MPLS,MTU,NAC,NAT,NBMA,NIC,NRZ,NRZI,NVRAM,OSI,OSPF,OUI,PAP,PAT,PC,PIM,PIM,PCM,PDU,POP3,POP,POTS,PPP,PPTP,PTT,PVST,RADIUS,RAM,RARP,RFC,RIP,RLL,ROM,RSTP,RTP,RCP,SDLC,SFD,SFP,SLARP,SLIP,SMTP,SNA,SNAP,SNMP,SOF,SRAM,SSH,SSID,STP,SYN,TDM,TFTP,TIA,TOFU,UDP,URL,URI,USB,UTP,VC,VLAN,VLSM,VPN,W3C,WAN,WEP,WiFi,WPA,WWW` + +// Rule used by rulesets +type Rule struct { + suffix string + replacement string + exact bool +} + +// Ruleset a Ruleset is the config of pluralization rules +// you can extend the rules with the Add* methods +type Ruleset struct { + uncountables map[string]bool + plurals []*Rule + singulars []*Rule + humans []*Rule + acronyms []*Rule +} + +// NewRuleset creates a blank ruleset. Unless you are going to +// build your own rules from scratch you probably +// won't need this and can just use the defaultRuleset +// via the global inflect.* methods +func NewRuleset() *Ruleset { + rs := new(Ruleset) + rs.uncountables = make(map[string]bool) + rs.plurals = make([]*Rule, 0) + rs.singulars = make([]*Rule, 0) + rs.humans = make([]*Rule, 0) + rs.acronyms = make([]*Rule, 0) + return rs +} + +// NewDefaultRuleset creates a new ruleset and load it with the default +// set of common English pluralization rules +func NewDefaultRuleset() *Ruleset { + rs := NewRuleset() + rs.AddPlural("movie", "movies") + rs.AddPlural("s", "s") + rs.AddPlural("testis", "testes") + rs.AddPlural("axis", "axes") + rs.AddPlural("octopus", "octopi") + rs.AddPlural("virus", "viri") + rs.AddPlural("octopi", "octopi") + rs.AddPlural("viri", "viri") + rs.AddPlural("alias", "aliases") + rs.AddPlural("status", "statuses") + rs.AddPlural("Status", "Statuses") + rs.AddPlural("campus", "campuses") + rs.AddPlural("bus", "buses") + rs.AddPlural("buffalo", "buffaloes") + rs.AddPlural("tomato", "tomatoes") + rs.AddPlural("tum", "ta") + rs.AddPlural("ium", "ia") + rs.AddPlural("ta", "ta") + rs.AddPlural("ia", "ia") + rs.AddPlural("sis", "ses") + rs.AddPlural("lf", "lves") + rs.AddPlural("rf", "rves") + rs.AddPlural("afe", "aves") + rs.AddPlural("bfe", "bves") + rs.AddPlural("cfe", "cves") + rs.AddPlural("dfe", "dves") + rs.AddPlural("efe", "eves") + rs.AddPlural("gfe", "gves") + rs.AddPlural("hfe", "hves") + rs.AddPlural("ife", "ives") + rs.AddPlural("jfe", "jves") + rs.AddPlural("kfe", "kves") + rs.AddPlural("lfe", "lves") + rs.AddPlural("mfe", "mves") + rs.AddPlural("nfe", "nves") + rs.AddPlural("ofe", "oves") + rs.AddPlural("pfe", "pves") + rs.AddPlural("qfe", "qves") + rs.AddPlural("rfe", "rves") + rs.AddPlural("sfe", "sves") + rs.AddPlural("tfe", "tves") + rs.AddPlural("ufe", "uves") + rs.AddPlural("vfe", "vves") + rs.AddPlural("wfe", "wves") + rs.AddPlural("xfe", "xves") + rs.AddPlural("yfe", "yves") + rs.AddPlural("zfe", "zves") + rs.AddPlural("hive", "hives") + rs.AddPlural("quy", "quies") + rs.AddPlural("by", "bies") + rs.AddPlural("cy", "cies") + rs.AddPlural("dy", "dies") + rs.AddPlural("fy", "fies") + rs.AddPlural("gy", "gies") + rs.AddPlural("hy", "hies") + rs.AddPlural("jy", "jies") + rs.AddPlural("ky", "kies") + rs.AddPlural("ly", "lies") + rs.AddPlural("my", "mies") + rs.AddPlural("ny", "nies") + rs.AddPlural("py", "pies") + rs.AddPlural("qy", "qies") + rs.AddPlural("ry", "ries") + rs.AddPlural("sy", "sies") + rs.AddPlural("ty", "ties") + rs.AddPlural("vy", "vies") + rs.AddPlural("wy", "wies") + rs.AddPlural("xy", "xies") + rs.AddPlural("zy", "zies") + rs.AddPlural("x", "xes") + rs.AddPlural("ch", "ches") + rs.AddPlural("ss", "sses") + rs.AddPlural("sh", "shes") + rs.AddPlural("matrix", "matrices") + rs.AddPlural("vertix", "vertices") + rs.AddPlural("indix", "indices") + rs.AddPlural("matrex", "matrices") + rs.AddPlural("vertex", "vertices") + rs.AddPlural("index", "indices") + rs.AddPlural("mouse", "mice") + rs.AddPlural("louse", "lice") + rs.AddPlural("mice", "mice") + rs.AddPlural("lice", "lice") + rs.AddPlural("ress", "resses") + rs.AddPluralExact("ox", "oxen", true) + rs.AddPluralExact("oxen", "oxen", true) + rs.AddPluralExact("quiz", "quizzes", true) + rs.AddSingular("s", "") + rs.AddSingular("ss", "ss") + rs.AddSingular("news", "news") + rs.AddSingular("ta", "tum") + rs.AddSingular("ia", "ium") + rs.AddSingular("analyses", "analysis") + rs.AddSingular("bases", "basis") + rs.AddSingularExact("basis", "basis", true) + rs.AddSingular("diagnoses", "diagnosis") + rs.AddSingularExact("diagnosis", "diagnosis", true) + rs.AddSingular("parentheses", "parenthesis") + rs.AddSingular("prognoses", "prognosis") + rs.AddSingular("synopses", "synopsis") + rs.AddSingular("theses", "thesis") + rs.AddSingular("analyses", "analysis") + rs.AddSingularExact("analysis", "analysis", true) + rs.AddSingular("ovies", "ovie") + rs.AddSingular("aves", "afe") + rs.AddSingular("bves", "bfe") + rs.AddSingular("cves", "cfe") + rs.AddSingular("dves", "dfe") + rs.AddSingular("eves", "efe") + rs.AddSingular("gves", "gfe") + rs.AddSingular("hves", "hfe") + rs.AddSingular("ives", "ife") + rs.AddSingular("jves", "jfe") + rs.AddSingular("kves", "kfe") + rs.AddSingular("lves", "lfe") + rs.AddSingular("mves", "mfe") + rs.AddSingular("nves", "nfe") + rs.AddSingular("oves", "ofe") + rs.AddSingular("pves", "pfe") + rs.AddSingular("qves", "qfe") + rs.AddSingular("rves", "rfe") + rs.AddSingular("sves", "sfe") + rs.AddSingular("tves", "tfe") + rs.AddSingular("uves", "ufe") + rs.AddSingular("vves", "vfe") + rs.AddSingular("wves", "wfe") + rs.AddSingular("xves", "xfe") + rs.AddSingular("yves", "yfe") + rs.AddSingular("zves", "zfe") + rs.AddSingular("hives", "hive") + rs.AddSingular("tives", "tive") + rs.AddSingular("lves", "lf") + rs.AddSingular("rves", "rf") + rs.AddSingular("quies", "quy") + rs.AddSingular("bies", "by") + rs.AddSingular("cies", "cy") + rs.AddSingular("dies", "dy") + rs.AddSingular("fies", "fy") + rs.AddSingular("gies", "gy") + rs.AddSingular("hies", "hy") + rs.AddSingular("jies", "jy") + rs.AddSingular("kies", "ky") + rs.AddSingular("lies", "ly") + rs.AddSingular("mies", "my") + rs.AddSingular("nies", "ny") + rs.AddSingular("pies", "py") + rs.AddSingular("qies", "qy") + rs.AddSingular("ries", "ry") + rs.AddSingular("sies", "sy") + rs.AddSingular("ties", "ty") + // rs.AddSingular("vies", "vy") + rs.AddSingular("wies", "wy") + rs.AddSingular("xies", "xy") + rs.AddSingular("zies", "zy") + rs.AddSingular("series", "series") + rs.AddSingular("xes", "x") + rs.AddSingular("ches", "ch") + rs.AddSingular("sses", "ss") + rs.AddSingular("shes", "sh") + rs.AddSingular("mice", "mouse") + rs.AddSingular("lice", "louse") + rs.AddSingular("buses", "bus") + rs.AddSingularExact("bus", "bus", true) + rs.AddSingular("oes", "o") + rs.AddSingular("shoes", "shoe") + rs.AddSingular("crises", "crisis") + rs.AddSingularExact("crisis", "crisis", true) + rs.AddSingular("axes", "axis") + rs.AddSingularExact("axis", "axis", true) + rs.AddSingular("testes", "testis") + rs.AddSingularExact("testis", "testis", true) + rs.AddSingular("octopi", "octopus") + rs.AddSingularExact("octopus", "octopus", true) + rs.AddSingular("viri", "virus") + rs.AddSingularExact("virus", "virus", true) + rs.AddSingular("statuses", "status") + rs.AddSingular("Statuses", "Status") + rs.AddSingular("campuses", "campus") + rs.AddSingularExact("status", "status", true) + rs.AddSingularExact("Status", "Status", true) + rs.AddSingularExact("campus", "campus", true) + rs.AddSingular("aliases", "alias") + rs.AddSingularExact("alias", "alias", true) + rs.AddSingularExact("oxen", "ox", true) + rs.AddSingular("vertices", "vertex") + rs.AddSingular("indices", "index") + rs.AddSingular("matrices", "matrix") + rs.AddSingularExact("quizzes", "quiz", true) + rs.AddSingular("databases", "database") + rs.AddSingular("resses", "ress") + rs.AddSingular("ress", "ress") + rs.AddIrregular("person", "people") + rs.AddIrregular("man", "men") + rs.AddIrregular("child", "children") + rs.AddIrregular("sex", "sexes") + rs.AddIrregular("move", "moves") + rs.AddIrregular("zombie", "zombies") + rs.AddIrregular("Status", "Statuses") + rs.AddIrregular("status", "statuses") + rs.AddIrregular("campus", "campuses") + rs.AddIrregular("human", "humans") + rs.AddUncountable("equipment") + rs.AddUncountable("information") + rs.AddUncountable("rice") + rs.AddUncountable("money") + rs.AddUncountable("species") + rs.AddUncountable("series") + rs.AddUncountable("fish") + rs.AddUncountable("sheep") + rs.AddUncountable("jeans") + rs.AddUncountable("police") + + acronyms := strings.Split(baseAcronyms, ",") + for _, acr := range acronyms { + rs.AddAcronym(acr) + } + + return rs +} + +// Uncountables returns a map of uncountables in the ruleset +func (rs *Ruleset) Uncountables() map[string]bool { + return rs.uncountables +} + +// AddPlural add a pluralization rule +func (rs *Ruleset) AddPlural(suffix, replacement string) { + rs.AddPluralExact(suffix, replacement, false) +} + +// AddPluralExact add a pluralization rule with full string match +func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool) { + // remove uncountable + delete(rs.uncountables, suffix) + // create rule + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + r.exact = exact + // prepend + rs.plurals = append([]*Rule{r}, rs.plurals...) +} + +// AddSingular add a singular rule +func (rs *Ruleset) AddSingular(suffix, replacement string) { + rs.AddSingularExact(suffix, replacement, false) +} + +// AddSingularExact same as AddSingular but you can set `exact` to force +// a full string match +func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool) { + // remove from uncountable + delete(rs.uncountables, suffix) + // create rule + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + r.exact = exact + rs.singulars = append([]*Rule{r}, rs.singulars...) +} + +// AddHuman Human rules are applied by humanize to show more friendly +// versions of words +func (rs *Ruleset) AddHuman(suffix, replacement string) { + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + rs.humans = append([]*Rule{r}, rs.humans...) +} + +// AddIrregular Add any inconsistent pluralizing/singularizing rules +// to the set here. +func (rs *Ruleset) AddIrregular(singular, plural string) { + delete(rs.uncountables, singular) + delete(rs.uncountables, plural) + rs.AddPlural(singular, plural) + rs.AddPlural(plural, plural) + rs.AddSingular(plural, singular) +} + +// AddAcronym if you use acronym you may need to add them to the ruleset +// to prevent Underscored words of things like "HTML" coming out +// as "h_t_m_l" +func (rs *Ruleset) AddAcronym(word string) { + r := new(Rule) + r.suffix = word + r.replacement = rs.Titleize(strings.ToLower(word)) + rs.acronyms = append(rs.acronyms, r) +} + +// AddUncountable add a word to this ruleset that has the same singular and plural form +// for example: "rice" +func (rs *Ruleset) AddUncountable(word string) { + rs.uncountables[strings.ToLower(word)] = true +} + +func (rs *Ruleset) isUncountable(word string) bool { + // handle multiple words by using the last one + words := strings.Split(word, " ") + if _, exists := rs.uncountables[strings.ToLower(words[len(words)-1])]; exists { + return true + } + return false +} + +//isAcronym returns if a word is acronym or not. +func (rs *Ruleset) isAcronym(word string) bool { + for _, rule := range rs.acronyms { + if strings.ToUpper(rule.suffix) == strings.ToUpper(word) { + return true + } + } + + return false +} + +//PluralizeWithSize pluralize with taking number into account +func (rs *Ruleset) PluralizeWithSize(word string, size int) string { + if size == 1 { + return rs.Singularize(word) + } + return rs.Pluralize(word) +} + +// Pluralize returns the plural form of a singular word +func (rs *Ruleset) Pluralize(word string) string { + if len(word) == 0 { + return word + } + lWord := strings.ToLower(word) + if rs.isUncountable(lWord) { + return word + } + + var candidate string + for _, rule := range rs.plurals { + if rule.exact { + if lWord == rule.suffix { + // Capitalized word + if lWord[0] != word[0] && lWord[1:] == word[1:] { + return rs.Capitalize(rule.replacement) + } + return rule.replacement + } + continue + } + + if strings.EqualFold(word, rule.suffix) { + candidate = rule.replacement + } + + if strings.HasSuffix(word, rule.suffix) { + return replaceLast(word, rule.suffix, rule.replacement) + } + } + + if candidate != "" { + return candidate + } + return word + "s" +} + +//Singularize returns the singular form of a plural word +func (rs *Ruleset) Singularize(word string) string { + if len(word) <= 1 { + return word + } + lWord := strings.ToLower(word) + if rs.isUncountable(lWord) { + return word + } + + var candidate string + + for _, rule := range rs.singulars { + if rule.exact { + if lWord == rule.suffix { + // Capitalized word + if lWord[0] != word[0] && lWord[1:] == word[1:] { + return rs.Capitalize(rule.replacement) + } + return rule.replacement + } + continue + } + + if strings.EqualFold(word, rule.suffix) { + candidate = rule.replacement + } + + if strings.HasSuffix(word, rule.suffix) { + return replaceLast(word, rule.suffix, rule.replacement) + } + } + + if candidate != "" { + return candidate + } + + return word +} + +//Capitalize uppercase first character +func (rs *Ruleset) Capitalize(word string) string { + if rs.isAcronym(word) { + return strings.ToUpper(word) + } + return strings.ToUpper(word[:1]) + word[1:] +} + +//Camelize "dino_party" -> "DinoParty" +func (rs *Ruleset) Camelize(word string) string { + if rs.isAcronym(word) { + return strings.ToUpper(word) + } + words := splitAtCaseChangeWithTitlecase(word) + return strings.Join(words, "") +} + +//CamelizeDownFirst same as Camelcase but with first letter downcased +func (rs *Ruleset) CamelizeDownFirst(word string) string { + word = Camelize(word) + return strings.ToLower(word[:1]) + word[1:] +} + +//Titleize Capitalize every word in sentence "hello there" -> "Hello There" +func (rs *Ruleset) Titleize(word string) string { + words := splitAtCaseChangeWithTitlecase(word) + result := strings.Join(words, " ") + + var acronymWords []string + for index, word := range words { + if len(word) == 1 { + acronymWords = append(acronymWords, word) + } + + if len(word) > 1 || index == len(words)-1 || len(acronymWords) > 1 { + acronym := strings.Join(acronymWords, "") + if !rs.isAcronym(acronym) { + acronymWords = acronymWords[:len(acronymWords)] + continue + } + + result = strings.Replace(result, strings.Join(acronymWords, " "), acronym, 1) + acronymWords = []string{} + } + } + + return result +} + +func (rs *Ruleset) safeCaseAcronyms(word string) string { + // convert an acronym like HTML into Html + for _, rule := range rs.acronyms { + word = strings.Replace(word, rule.suffix, rule.replacement, -1) + } + return word +} + +func (rs *Ruleset) separatedWords(word, sep string) string { + word = rs.safeCaseAcronyms(word) + words := splitAtCaseChange(word) + return strings.Join(words, sep) +} + +//Underscore lowercase underscore version "BigBen" -> "big_ben" +func (rs *Ruleset) Underscore(word string) string { + return rs.separatedWords(word, "_") +} + +//Humanize First letter of sentence capitalized +// Uses custom friendly replacements via AddHuman() +func (rs *Ruleset) Humanize(word string) string { + word = replaceLast(word, "_id", "") // strip foreign key kinds + // replace and strings in humans list + for _, rule := range rs.humans { + word = strings.Replace(word, rule.suffix, rule.replacement, -1) + } + sentence := rs.separatedWords(word, " ") + + r, n := utf8.DecodeRuneInString(sentence) + return string(unicode.ToUpper(r)) + sentence[n:] +} + +//ForeignKey an underscored foreign key name "Person" -> "person_id" +func (rs *Ruleset) ForeignKey(word string) string { + return rs.Underscore(rs.Singularize(word)) + "_id" +} + +//ForeignKeyCondensed a foreign key (with an underscore) "Person" -> "personid" +func (rs *Ruleset) ForeignKeyCondensed(word string) string { + return rs.Underscore(word) + "id" +} + +//Tableize Rails style pluralized table names: "SuperPerson" -> "super_people" +func (rs *Ruleset) Tableize(word string) string { + return rs.Pluralize(rs.Underscore(rs.Typeify(word))) +} + +var notUrlSafe *regexp.Regexp = regexp.MustCompile(`[^\w\d\-_ ]`) + +//Parameterize param safe dasherized names like "my-param" +func (rs *Ruleset) Parameterize(word string) string { + return ParameterizeJoin(word, "-") +} + +//ParameterizeJoin param safe dasherized names with custom separator +func (rs *Ruleset) ParameterizeJoin(word, sep string) string { + word = strings.ToLower(word) + word = rs.Asciify(word) + word = notUrlSafe.ReplaceAllString(word, "") + word = strings.Replace(word, " ", sep, -1) + if len(sep) > 0 { + squash, err := regexp.Compile(sep + "+") + if err == nil { + word = squash.ReplaceAllString(word, sep) + } + } + word = strings.Trim(word, sep+" ") + return word +} + +var lookalikes = map[string]*regexp.Regexp{ + "A": regexp.MustCompile(`À|Á|Â|Ã|Ä|Å`), + "AE": regexp.MustCompile(`Æ`), + "C": regexp.MustCompile(`Ç`), + "E": regexp.MustCompile(`È|É|Ê|Ë`), + "G": regexp.MustCompile(`Ğ`), + "I": regexp.MustCompile(`Ì|Í|Î|Ï|İ`), + "N": regexp.MustCompile(`Ñ`), + "O": regexp.MustCompile(`Ò|Ó|Ô|Õ|Ö|Ø`), + "S": regexp.MustCompile(`Ş`), + "U": regexp.MustCompile(`Ù|Ú|Û|Ü`), + "Y": regexp.MustCompile(`Ý`), + "ss": regexp.MustCompile(`ß`), + "a": regexp.MustCompile(`à|á|â|ã|ä|å`), + "ae": regexp.MustCompile(`æ`), + "c": regexp.MustCompile(`ç`), + "e": regexp.MustCompile(`è|é|ê|ë`), + "g": regexp.MustCompile(`ğ`), + "i": regexp.MustCompile(`ì|í|î|ï|ı`), + "n": regexp.MustCompile(`ñ`), + "o": regexp.MustCompile(`ò|ó|ô|õ|ö|ø`), + "s": regexp.MustCompile(`ş`), + "u": regexp.MustCompile(`ù|ú|û|ü|ũ|ū|ŭ|ů|ű|ų`), + "y": regexp.MustCompile(`ý|ÿ`), +} + +//Asciify transforms Latin characters like é -> e +func (rs *Ruleset) Asciify(word string) string { + for repl, regex := range lookalikes { + word = regex.ReplaceAllString(word, repl) + } + return word +} + +var tablePrefix = regexp.MustCompile(`^[^.]*\.`) + +//Typeify "something_like_this" -> "SomethingLikeThis" +func (rs *Ruleset) Typeify(word string) string { + word = tablePrefix.ReplaceAllString(word, "") + return rs.Camelize(rs.Singularize(word)) +} + +//Dasherize "SomeText" -> "some-text" +func (rs *Ruleset) Dasherize(word string) string { + return rs.separatedWords(word, "-") +} + +//Ordinalize "1031" -> "1031st" +func (rs *Ruleset) Ordinalize(str string) string { + number, err := strconv.Atoi(str) + if err != nil { + return str + } + switch abs(number) % 100 { + case 11, 12, 13: + return fmt.Sprintf("%dth", number) + default: + switch abs(number) % 10 { + case 1: + return fmt.Sprintf("%dst", number) + case 2: + return fmt.Sprintf("%dnd", number) + case 3: + return fmt.Sprintf("%drd", number) + } + } + return fmt.Sprintf("%dth", number) +} + +//ForeignKeyToAttribute returns the attribute name from the foreign key +func (rs *Ruleset) ForeignKeyToAttribute(str string) string { + w := rs.Camelize(str) + if strings.HasSuffix(w, "Id") { + return strings.TrimSuffix(w, "Id") + "ID" + } + return w +} + +//LoadReader loads rules from io.Reader param +func (rs *Ruleset) LoadReader(r io.Reader) error { + m := map[string]string{} + err := json.NewDecoder(r).Decode(&m) + if err != nil { + return fmt.Errorf("could not decode inflection JSON from reader: %s", err) + } + for s, p := range m { + defaultRuleset.AddIrregular(s, p) + } + return nil +} + +///////////////////////////////////////// +// the default global ruleset +////////////////////////////////////////// + +var defaultRuleset *Ruleset + +//LoadReader loads rules from io.Reader param +func LoadReader(r io.Reader) error { + return defaultRuleset.LoadReader(r) +} + +func init() { + defaultRuleset = NewDefaultRuleset() + + pwd, _ := os.Getwd() + cfg := filepath.Join(pwd, "inflections.json") + if p := os.Getenv("INFLECT_PATH"); p != "" { + cfg = p + } + if _, err := os.Stat(cfg); err == nil { + b, err := ioutil.ReadFile(cfg) + if err != nil { + fmt.Printf("could not read inflection file %s (%s)\n", cfg, err) + return + } + if err = defaultRuleset.LoadReader(bytes.NewReader(b)); err != nil { + fmt.Println(err) + } + } +} + +//Uncountables returns a list of uncountables rules +func Uncountables() map[string]bool { + return defaultRuleset.Uncountables() +} + +//AddPlural adds plural to the ruleset +func AddPlural(suffix, replacement string) { + defaultRuleset.AddPlural(suffix, replacement) +} + +//AddSingular adds singular to the ruleset +func AddSingular(suffix, replacement string) { + defaultRuleset.AddSingular(suffix, replacement) +} + +//AddHuman adds human +func AddHuman(suffix, replacement string) { + defaultRuleset.AddHuman(suffix, replacement) +} + +func AddIrregular(singular, plural string) { + defaultRuleset.AddIrregular(singular, plural) +} + +func AddAcronym(word string) { + defaultRuleset.AddAcronym(word) +} + +func AddUncountable(word string) { + defaultRuleset.AddUncountable(word) +} + +func Pluralize(word string) string { + return defaultRuleset.Pluralize(word) +} + +func PluralizeWithSize(word string, size int) string { + return defaultRuleset.PluralizeWithSize(word, size) +} + +func Singularize(word string) string { + return defaultRuleset.Singularize(word) +} + +func Capitalize(word string) string { + return defaultRuleset.Capitalize(word) +} + +func Camelize(word string) string { + return defaultRuleset.Camelize(word) +} + +func CamelizeDownFirst(word string) string { + return defaultRuleset.CamelizeDownFirst(word) +} + +func Titleize(word string) string { + return defaultRuleset.Titleize(word) +} + +func Underscore(word string) string { + return defaultRuleset.Underscore(word) +} + +func Humanize(word string) string { + return defaultRuleset.Humanize(word) +} + +func ForeignKey(word string) string { + return defaultRuleset.ForeignKey(word) +} + +func ForeignKeyCondensed(word string) string { + return defaultRuleset.ForeignKeyCondensed(word) +} + +func Tableize(word string) string { + return defaultRuleset.Tableize(word) +} + +func Parameterize(word string) string { + return defaultRuleset.Parameterize(word) +} + +func ParameterizeJoin(word, sep string) string { + return defaultRuleset.ParameterizeJoin(word, sep) +} + +func Typeify(word string) string { + return defaultRuleset.Typeify(word) +} + +func Dasherize(word string) string { + return defaultRuleset.Dasherize(word) +} + +func Ordinalize(word string) string { + return defaultRuleset.Ordinalize(word) +} + +func Asciify(word string) string { + return defaultRuleset.Asciify(word) +} + +func ForeignKeyToAttribute(word string) string { + return defaultRuleset.ForeignKeyToAttribute(word) +} + +// helper funcs + +func reverse(s string) string { + o := make([]rune, utf8.RuneCountInString(s)) + i := len(o) + for _, c := range s { + i-- + o[i] = c + } + return string(o) +} + +func isSpacerChar(c rune) bool { + switch { + case c == rune("_"[0]): + return true + case c == rune(" "[0]): + return true + case c == rune(":"[0]): + return true + case c == rune("-"[0]): + return true + } + return false +} + +func splitAtCaseChange(s string) []string { + words := make([]string, 0) + word := make([]rune, 0) + for _, c := range s { + spacer := isSpacerChar(c) + if len(word) > 0 { + if unicode.IsUpper(c) || spacer { + words = append(words, string(word)) + word = make([]rune, 0) + } + } + if !spacer { + word = append(word, unicode.ToLower(c)) + } + } + words = append(words, string(word)) + return words +} + +func splitAtCaseChangeWithTitlecase(s string) []string { + words := make([]string, 0) + word := make([]rune, 0) + + for _, c := range s { + spacer := isSpacerChar(c) + if len(word) > 0 { + if unicode.IsUpper(c) || spacer { + words = append(words, string(word)) + word = make([]rune, 0) + } + } + if !spacer { + if len(word) > 0 { + word = append(word, unicode.ToLower(c)) + } else { + word = append(word, unicode.ToUpper(c)) + } + } + } + + words = append(words, string(word)) + return words +} + +func replaceLast(s, match, repl string) string { + // reverse strings + srev := reverse(s) + mrev := reverse(match) + rrev := reverse(repl) + // match first and reverse back + return reverse(strings.Replace(srev, mrev, rrev, 1)) +} + +func abs(x int) int { + if x < 0 { + return -x + } + return x +} diff --git a/vendor/github.com/markbates/inflect/name.go b/vendor/github.com/markbates/inflect/name.go new file mode 100644 index 000000000..e6863e28a --- /dev/null +++ b/vendor/github.com/markbates/inflect/name.go @@ -0,0 +1,163 @@ +package inflect + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/gobuffalo/envy" +) + +// Name is a string that represents the "name" of a thing, like an app, model, etc... +type Name string + +// Title version of a name. ie. "foo_bar" => "Foo Bar" +func (n Name) Title() string { + x := strings.Split(string(n), "/") + for i, s := range x { + x[i] = Titleize(s) + } + + return strings.Join(x, " ") +} + +// Underscore version of a name. ie. "FooBar" => "foo_bar" +func (n Name) Underscore() string { + w := string(n) + if strings.ToUpper(w) == w { + return strings.ToLower(w) + } + return Underscore(w) +} + +// Plural version of a name +func (n Name) Plural() string { + return Pluralize(string(n)) +} + +// Singular version of a name +func (n Name) Singular() string { + return Singularize(string(n)) +} + +// Camel version of a name +func (n Name) Camel() string { + c := Camelize(string(n)) + if strings.HasSuffix(c, "Id") { + c = strings.TrimSuffix(c, "Id") + c += "ID" + } + return c +} + +// Model version of a name. ie. "user" => "User" +func (n Name) Model() string { + x := strings.Split(string(n), "/") + for i, s := range x { + x[i] = Camelize(Singularize(s)) + } + + return strings.Join(x, "") +} + +// Resource version of a name +func (n Name) Resource() string { + name := n.Underscore() + x := strings.FieldsFunc(name, func(r rune) bool { + return r == '_' || r == '/' + }) + + for i, w := range x { + if i == len(x)-1 { + x[i] = Camelize(Pluralize(strings.ToLower(w))) + continue + } + + x[i] = Camelize(w) + } + + return strings.Join(x, "") +} + +// ModelPlural version of a name. ie. "user" => "Users" +func (n Name) ModelPlural() string { + return Camelize(Pluralize(n.Model())) +} + +// File version of a name +func (n Name) File() string { + return Underscore(Camelize(string(n))) +} + +// Table version of a name +func (n Name) Table() string { + return Underscore(Pluralize(string(n))) +} + +// UnderSingular version of a name +func (n Name) UnderSingular() string { + return Underscore(Singularize(string(n))) +} + +// PluralCamel version of a name +func (n Name) PluralCamel() string { + return Pluralize(Camelize(string(n))) +} + +// PluralUnder version of a name +func (n Name) PluralUnder() string { + return Pluralize(Underscore(string(n))) +} + +// URL version of a name +func (n Name) URL() string { + return n.PluralUnder() +} + +// CamelSingular version of a name +func (n Name) CamelSingular() string { + return Camelize(Singularize(string(n))) +} + +// VarCaseSingular version of a name. ie. "FooBar" => "fooBar" +func (n Name) VarCaseSingular() string { + return CamelizeDownFirst(Singularize(Underscore(n.Resource()))) +} + +// VarCasePlural version of a name. ie. "FooBar" => "fooBar" +func (n Name) VarCasePlural() string { + return CamelizeDownFirst(n.Resource()) +} + +// Lower case version of a string +func (n Name) Lower() string { + return strings.ToLower(string(n)) +} + +// ParamID returns foo_bar_id +func (n Name) ParamID() string { + return fmt.Sprintf("%s_id", strings.Replace(n.UnderSingular(), "/", "_", -1)) +} + +// Package returns go package +func (n Name) Package() string { + key := string(n) + + for _, gp := range envy.GoPaths() { + key = strings.TrimPrefix(key, filepath.Join(gp, "src")) + key = strings.TrimPrefix(key, gp) + } + key = strings.TrimPrefix(key, string(filepath.Separator)) + + key = strings.Replace(key, "\\", "/", -1) + return key +} + +// Char returns first character in lower case, this is useful for methods inside a struct. +func (n Name) Char() string { + return strings.ToLower(string(n[0])) +} + +func (n Name) String() string { + return string(n) +} diff --git a/vendor/github.com/markbates/inflect/version.go b/vendor/github.com/markbates/inflect/version.go new file mode 100644 index 000000000..a16744984 --- /dev/null +++ b/vendor/github.com/markbates/inflect/version.go @@ -0,0 +1,3 @@ +package inflect + +const Version = "v1.0.4" diff --git a/vendor/github.com/mattbaird/jsonpatch/.gitignore b/vendor/github.com/mattbaird/jsonpatch/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/mattbaird/jsonpatch/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/mattbaird/jsonpatch/README.md b/vendor/github.com/mattbaird/jsonpatch/README.md deleted file mode 100644 index 91c03b3fc..000000000 --- a/vendor/github.com/mattbaird/jsonpatch/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# jsonpatch -As per http://jsonpatch.com/ JSON Patch is specified in RFC 6902 from the IETF. - -JSON Patch allows you to generate JSON that describes changes you want to make to a document, so you don't have to send the whole doc. JSON Patch format is supported by HTTP PATCH method, allowing for standards based partial updates via REST APIs. - -```bash -go get github.com/mattbaird/jsonpatch -``` - -I tried some of the other "jsonpatch" go implementations, but none of them could diff two json documents and -generate format like jsonpatch.com specifies. Here's an example of the patch format: - -```json -[ - { "op": "replace", "path": "/baz", "value": "boo" }, - { "op": "add", "path": "/hello", "value": ["world"] }, - { "op": "remove", "path": "/foo"} -] - -``` -The API is super simple -#example -```go -package main - -import ( - "fmt" - "github.com/mattbaird/jsonpatch" -) - -var simpleA = `{"a":100, "b":200, "c":"hello"}` -var simpleB = `{"a":100, "b":200, "c":"goodbye"}` - -func main() { - patch, e := jsonpatch.CreatePatch([]byte(simpleA), []byte(simpleA)) - if e != nil { - fmt.Printf("Error creating JSON patch:%v", e) - return - } - for _, operation := range patch { - fmt.Printf("%s\n", operation.Json()) - } -} -``` - -This code needs more tests, as it's a highly recursive, type-fiddly monster. It's not a lot of code, but it has to deal with a lot of complexity. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb946b..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be21437..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/mholt/caddy/.gitattributes b/vendor/github.com/mholt/caddy/.gitattributes deleted file mode 100644 index 2e5077856..000000000 --- a/vendor/github.com/mholt/caddy/.gitattributes +++ /dev/null @@ -1,19 +0,0 @@ -# shell scripts should not use tabs to indent! -*.bash text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.sh text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 - -# files for systemd (shell-similar) -*.path text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.service text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.timer text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 - -# go fmt will enforce this, but in case a user has not called "go fmt" allow GIT to catch this: -*.go text eol=lf core.whitespace whitespace=indent-with-non-tab,trailing-space,tabwidth=4 - -*.txt text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.tpl text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.htm text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.html text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.md text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.yml text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -.git* text eol=auto core.whitespace whitespace=trailing-space diff --git a/vendor/github.com/mholt/caddy/.gitignore b/vendor/github.com/mholt/caddy/.gitignore deleted file mode 100644 index d996add1e..000000000 --- a/vendor/github.com/mholt/caddy/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -.DS_Store -Thumbs.db -_gitignore/ -Vagrantfile -.vagrant/ -/.idea - -dist/builds/ -dist/release/ - -error.log -access.log - -/*.conf -Caddyfile -!caddyfile/ - -og_static/ - -.vscode/ - -*.bat diff --git a/vendor/github.com/mholt/caddy/.travis.yml b/vendor/github.com/mholt/caddy/.travis.yml deleted file mode 100644 index 5040e0036..000000000 --- a/vendor/github.com/mholt/caddy/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -language: go - -addons: - hosts: - - quic.clemente.io - -go: - - 1.x - - tip - -matrix: - allow_failures: - - go: tip - fast_finish: true - -before_install: - # Decrypts a script that installs an authenticated cookie - # for git to use when cloning from googlesource.com. - # Bypasses "bandwidth limit exceeded" errors. - # See github.com/golang/go/issues/12933 - - if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then openssl aes-256-cbc -K $encrypted_3df18f9af81d_key -iv $encrypted_3df18f9af81d_iv -in dist/gitcookie.sh.enc -out dist/gitcookie.sh -d; fi - -install: - - if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then bash dist/gitcookie.sh; fi - - go get -t ./... - - go get golang.org/x/lint/golint - - go get github.com/FiloSottile/vendorcheck - - go get github.com/alecthomas/gometalinter - -script: - - gometalinter --install - - gometalinter --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode --tests --vendor ./... - - vendorcheck ./... - - go test -race ./... - -after_script: - - golint ./... diff --git a/vendor/github.com/mholt/caddy/README.md b/vendor/github.com/mholt/caddy/README.md deleted file mode 100644 index 3fe847a29..000000000 --- a/vendor/github.com/mholt/caddy/README.md +++ /dev/null @@ -1,184 +0,0 @@ -

- Caddy -

-

Every Site on HTTPS

-

Caddy is a general-purpose HTTP/2 web server that serves HTTPS by default.

-

- - - - -
- @caddyserver on Twitter - Caddy Forum - Caddy on Sourcegraph -

-

- Download · - Documentation · - Community -

- ---- - -Caddy is a **production-ready** open-source web server that is fast, easy to use, and makes you more productive. - -Available for Windows, Mac, Linux, BSD, Solaris, and [Android](https://github.com/mholt/caddy/wiki/Running-Caddy-on-Android). - -

- Thanks to our special sponsor: -

- Relica - Cross-platform file backup to the cloud, local disks, or other computers -

- -## Menu - -- [Features](#features) -- [Install](#install) -- [Quick Start](#quick-start) -- [Running in Production](#running-in-production) -- [Contributing](#contributing) -- [Donors](#donors) -- [About the Project](#about-the-project) - -## Features - -- **Easy configuration** with the Caddyfile -- **Automatic HTTPS** on by default (via [Let's Encrypt](https://letsencrypt.org)) -- **HTTP/2** by default -- **Virtual hosting** so multiple sites just work -- Experimental **QUIC support** for cutting-edge transmissions -- TLS session ticket **key rotation** for more secure connections -- **Extensible with plugins** because a convenient web server is a helpful one -- **Runs anywhere** with **no external dependencies** (not even libc) - -[See a more complete list of features built into Caddy.](https://caddyserver.com/features) On top of all those, Caddy does even more with plugins: choose which plugins you want at [download](https://caddyserver.com/download). - -Altogether, Caddy can do things other web servers simply cannot do. Its features and plugins save you time and mistakes, and will cheer you up. Your Caddy instance takes care of the details for you! - - -

- Powered by -
- CertMagic -

- - -## Install - -Caddy binaries have no dependencies and are available for every platform. Get Caddy any of these ways: - -- **[Download page](https://caddyserver.com/download)** (RECOMMENDED) allows you to customize your build in the browser -- **[Latest release](https://github.com/mholt/caddy/releases/latest)** for pre-built, vanilla binaries -- **[AWS Marketplace](https://aws.amazon.com/marketplace/pp/B07J1WNK75?qid=1539015041932&sr=0-1&ref_=srh_res_product_title&cl_spe=C)** makes it easy to deploy directly to your cloud environment. -Get Caddy on the AWS Marketplace - - -## Build - -To build from source you need **[Git](https://git-scm.com/downloads)** and **[Go](https://golang.org/doc/install)** (1.12 or newer). Follow these instruction for fast building: - -- Get the source with `go get github.com/mholt/caddy/caddy` and then run `go get github.com/caddyserver/builds` -- Now `cd $GOPATH/src/github.com/mholt/caddy/caddy` and run `go run build.go` - -Then make sure the `caddy` binary is in your PATH. - -To build for other platforms, use build.go with the `--goos` and `--goarch` flags. - -When building from source, telemetry is enabled by default. You can disable it by changing `enableTelemetry` in run.go before compiling, or use the `-disabled-metrics` flag at runtime to disable only certain metrics. - - -## Quick Start - -To serve static files from the current working directory, run: - -``` -caddy -``` - -Caddy's default port is 2015, so open your browser to [http://localhost:2015](http://localhost:2015). - -### Go from 0 to HTTPS in 5 seconds - -If the `caddy` binary has permission to bind to low ports and your domain name's DNS records point to the machine you're on: - -``` -caddy -host example.com -``` - -This command serves static files from the current directory over HTTPS. Certificates are automatically obtained and renewed for you! Caddy is also automatically configuring ports 80 and 443 for you, and redirecting HTTP to HTTPS. Cool, huh? - -### Customizing your site - -To customize how your site is served, create a file named Caddyfile by your site and paste this into it: - -```plain -localhost - -push -browse -websocket /echo cat -ext .html -log /var/log/access.log -proxy /api 127.0.0.1:7005 -header /api Access-Control-Allow-Origin * -``` - -When you run `caddy` in that directory, it will automatically find and use that Caddyfile. - -This simple file enables server push (via Link headers), allows directory browsing (for folders without an index file), hosts a WebSocket echo server at /echo, serves clean URLs, logs requests to an access log, proxies all API requests to a backend on port 7005, and adds the coveted `Access-Control-Allow-Origin: *` header for all responses from the API. - -Wow! Caddy can do a lot with just a few lines. - -### Doing more with Caddy - -To host multiple sites and do more with the Caddyfile, please see the [Caddyfile tutorial](https://caddyserver.com/tutorial/caddyfile). - -Sites with qualifying hostnames are served over [HTTPS by default](https://caddyserver.com/docs/automatic-https). - -Caddy has a nice little command line interface. Run `caddy -h` to view basic help or see the [CLI documentation](https://caddyserver.com/docs/cli) for details. - - -## Running in Production - -Caddy is production-ready if you find it to be a good fit for your site and workflow. - -**Running as root:** We advise against this. You can still listen on ports < 1024 on Linux using setcap like so: `sudo setcap cap_net_bind_service=+ep ./caddy` - -The Caddy project does not officially maintain any system-specific integrations nor suggest how to administer your own system. But your download file includes [unofficial resources](https://github.com/mholt/caddy/tree/master/dist/init) contributed by the community that you may find helpful for running Caddy in production. - -How you choose to run Caddy is up to you. Many users are satisfied with `nohup caddy &`. Others use `screen`. Users who need Caddy to come back up after reboots either do so in the script that caused the reboot, add a command to an init script, or configure a service with their OS. - -If you have questions or concerns about Caddy' underlying crypto implementations, consult Go's [crypto packages](https://golang.org/pkg/crypto), starting with their documentation, then issues, then the code itself; as Caddy uses mainly those libraries. - - -## Contributing - -**[Join our forum](https://caddy.community) where you can chat with other Caddy users and developers!** To get familiar with the code base, try [Caddy code search on Sourcegraph](https://sourcegraph.com/github.com/mholt/caddy/)! - -Please see our [contributing guidelines](https://github.com/mholt/caddy/blob/master/.github/CONTRIBUTING.md) for instructions. If you want to write a plugin, check out the [developer wiki](https://github.com/mholt/caddy/wiki). - -We use GitHub issues and pull requests only for discussing bug reports and the development of specific changes. We welcome all other topics on the [forum](https://caddy.community)! - -If you want to contribute to the documentation, please [submit an issue](https://github.com/mholt/caddy/issues/new) describing the change that should be made. - -Thanks for making Caddy -- and the Web -- better! - - -## Donors - -- [DigitalOcean](https://m.do.co/c/6d7bdafccf96) is hosting the Caddy project. -- [DNSimple](https://dnsimple.link/resolving-caddy) provides DNS services for Caddy's sites. -- [DNS Spy](https://dnsspy.io) keeps an eye on Caddy's DNS properties. - -We thank them for their services. **If you want to help keep Caddy free, please [become a sponsor](https://caddyserver.com/pricing)!** - - -## About the Project - -Caddy was born out of the need for a "batteries-included" web server that runs anywhere and doesn't have to take its configuration with it. Caddy took inspiration from [spark](https://github.com/rif/spark), [nginx](https://github.com/nginx/nginx), lighttpd, -[Websocketd](https://github.com/joewalnes/websocketd) and [Vagrant](https://www.vagrantup.com/), which provides a pleasant mixture of features from each of them. - -**The name "Caddy" is trademarked:** The name of the software is "Caddy", not "Caddy Server" or "CaddyServer". Please call it "Caddy" or, if you wish to clarify, "the Caddy web server". See [brand guidelines](https://caddyserver.com/brand). Caddy is a registered trademark of Light Code Labs, LLC. - -*Author on Twitter: [@mholt6](https://twitter.com/mholt6)* diff --git a/vendor/github.com/mholt/caddy/appveyor.yml b/vendor/github.com/mholt/caddy/appveyor.yml deleted file mode 100644 index 7d04da931..000000000 --- a/vendor/github.com/mholt/caddy/appveyor.yml +++ /dev/null @@ -1,38 +0,0 @@ -version: "{build}" - -hosts: - quic.clemente.io: 127.0.0.1 - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\mholt\caddy - -environment: - GOPATH: c:\gopath - -stack: go 1.12 - -install: - - set PATH=%GOPATH%\bin;%PATH% - - set PATH=C:\msys64\mingw64\bin;%PATH% - - go version - - go env - - go get -t ./... - - go get golang.org/x/lint/golint - - go get github.com/FiloSottile/vendorcheck - - go get github.com/alecthomas/gometalinter - -build: off - -test_script: - - gometalinter --install - - gometalinter --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode --tests --vendor ./... - - vendorcheck ./... - - go test -race ./... - -after_test: - - golint ./... - -deploy: off - -skip_branch_with_pr: true diff --git a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgi_test.php b/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgi_test.php deleted file mode 100644 index 3f5e5f2db..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgi_test.php +++ /dev/null @@ -1,79 +0,0 @@ - $val) { - $md5 = md5($val); - - if ($key != $md5) { - $stat = "FAILED"; - echo "server:err ".$md5." != ".$key."\n"; - } - - $length += strlen($key) + strlen($val); - - $ret .= $key."(".strlen($key).") "; - } - $ret .= "] ["; - foreach ($_FILES as $k0 => $val) { - - $error = $val["error"]; - if ($error == UPLOAD_ERR_OK) { - $tmp_name = $val["tmp_name"]; - $name = $val["name"]; - $datafile = "/tmp/test.go"; - move_uploaded_file($tmp_name, $datafile); - $md5 = md5_file($datafile); - - if ($k0 != $md5) { - $stat = "FAILED"; - echo "server:err ".$md5." != ".$key."\n"; - } - - $length += strlen($k0) + filesize($datafile); - - unlink($datafile); - $ret .= $k0."(".strlen($k0).") "; - } - else{ - $stat = "FAILED"; - echo "server:file err ".file_upload_error_message($error)."\n"; - } - } - $ret .= "]"; - echo "server:got data length " .$length."\n"; -} - - -echo "-{$stat}-POST(".count($_POST).") FILE(".count($_FILES).")\n"; - -function file_upload_error_message($error_code) { - switch ($error_code) { - case UPLOAD_ERR_INI_SIZE: - return 'The uploaded file exceeds the upload_max_filesize directive in php.ini'; - case UPLOAD_ERR_FORM_SIZE: - return 'The uploaded file exceeds the MAX_FILE_SIZE directive that was specified in the HTML form'; - case UPLOAD_ERR_PARTIAL: - return 'The uploaded file was only partially uploaded'; - case UPLOAD_ERR_NO_FILE: - return 'No file was uploaded'; - case UPLOAD_ERR_NO_TMP_DIR: - return 'Missing a temporary folder'; - case UPLOAD_ERR_CANT_WRITE: - return 'Failed to write file to disk'; - case UPLOAD_ERR_EXTENSION: - return 'File upload stopped by extension'; - default: - return 'Unknown upload error'; - } -} \ No newline at end of file diff --git a/vendor/github.com/mholt/certmagic/.gitignore b/vendor/github.com/mholt/certmagic/.gitignore deleted file mode 100644 index fbd281d14..000000000 --- a/vendor/github.com/mholt/certmagic/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_gitignore/ diff --git a/vendor/github.com/mholt/certmagic/.travis.yml b/vendor/github.com/mholt/certmagic/.travis.yml deleted file mode 100644 index bb97cafff..000000000 --- a/vendor/github.com/mholt/certmagic/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go - -go: - - 1.x - - tip - -matrix: - allow_failures: - - go: tip - fast_finish: true - -install: - - go get -t ./... - - go get golang.org/x/lint/golint - - go get github.com/alecthomas/gometalinter - -script: - - gometalinter --install - - gometalinter --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode --tests ./... - - go test -race ./... - -after_script: - - golint ./... diff --git a/vendor/github.com/mholt/certmagic/README.md b/vendor/github.com/mholt/certmagic/README.md deleted file mode 100644 index 6fb956d12..000000000 --- a/vendor/github.com/mholt/certmagic/README.md +++ /dev/null @@ -1,491 +0,0 @@ -

- CertMagic -

-

Easy and Powerful TLS Automation

-

The same library used by the Caddy Web Server

-

- - - - -

- - -Caddy's automagic TLS features, now for your own Go programs, in one powerful and easy-to-use library! - -CertMagic is the most mature, robust, and capable ACME client integration for Go. - -With CertMagic, you can add one line to your Go application to serve securely over TLS, without ever having to touch certificates. - -Instead of: - -```go -// plaintext HTTP, gross 🤢 -http.ListenAndServe(":80", mux) -``` - -Use CertMagic: - -```go -// encrypted HTTPS with HTTP->HTTPS redirects - yay! 🔒😍 -certmagic.HTTPS([]string{"example.com"}, mux) -``` - -That line of code will serve your HTTP router `mux` over HTTPS, complete with HTTP->HTTPS redirects. It obtains and renews the TLS certificates. It staples OCSP responses for greater privacy and security. As long as your domain name points to your server, CertMagic will keep its connections secure. - -Compared to other ACME client libraries for Go, only CertMagic supports the full suite of ACME features, and no other library matches CertMagic's maturity and reliability. - - - - -CertMagic - Automatic HTTPS using Let's Encrypt -=============================================== - -**Sponsored by Relica - Cross-platform local and cloud file backup:** - -Relica - Cross-platform file backup to the cloud, local disks, or other computers - - -## Menu - -- [Features](#features) -- [Requirements](#requirements) -- [Installation](#installation) -- [Usage](#usage) - - [Package Overview](#package-overview) - - [Certificate authority](#certificate-authority) - - [The `Config` type](#the-config-type) - - [Defaults](#defaults) - - [Providing an email address](#providing-an-email-address) - - [Development and testing](#development-and-testing) - - [Examples](#examples) - - [Serving HTTP handlers with HTTPS](#serving-http-handlers-with-https) - - [Starting a TLS listener](#starting-a-tls-listener) - - [Getting a tls.Config](#getting-a-tlsconfig) - - [Advanced use](#advanced-use) - - [Wildcard Certificates](#wildcard-certificates) - - [Behind a load balancer (or in a cluster)](#behind-a-load-balancer-or-in-a-cluster) - - [The ACME Challenges](#the-acme-challenges) - - [HTTP Challenge](#http-challenge) - - [TLS-ALPN Challenge](#tls-alpn-challenge) - - [DNS Challenge](#dns-challenge) - - [On-Demand TLS](#on-demand-tls) - - [Storage](#storage) - - [Cache](#cache) -- [Contributing](#contributing) -- [Project History](#project-history) -- [Credits and License](#credits-and-license) - - -## Features - -- Fully automated certificate management including issuance and renewal -- One-liner, fully managed HTTPS servers -- Full control over almost every aspect of the system -- HTTP->HTTPS redirects (for HTTP applications) -- Solves all 3 ACME challenges: HTTP, TLS-ALPN, and DNS -- Over 50 DNS providers work out-of-the-box (powered by [lego](https://github.com/xenolf/lego)!) -- Pluggable storage implementations (default: file system) -- Wildcard certificates (requires DNS challenge) -- OCSP stapling for each qualifying certificate ([done right](https://gist.github.com/sleevi/5efe9ef98961ecfb4da8#gistcomment-2336055)) -- Distributed solving of all challenges (works behind load balancers) -- Supports "on-demand" issuance of certificates (during TLS handshakes!) - - Custom decision functions - - Hostname whitelist - - Ask an external URL - - Rate limiting -- Optional event hooks to observe internal behaviors -- Works with any certificate authority (CA) compliant with the ACME specification -- Certificate revocation (please, only if private key is compromised) -- Must-Staple (optional; not default) -- Cross-platform support! Mac, Windows, Linux, BSD, Android... -- Scales well to thousands of names/certificates per instance -- Use in conjunction with your own certificates - - -## Requirements - -1. Public DNS name(s) you control -2. Server reachable from public Internet - - Or use the DNS challenge to waive this requirement -3. Control over port 80 (HTTP) and/or 443 (HTTPS) - - Or they can be forwarded to other ports you control - - Or use the DNS challenge to waive this requirement - - (This is a requirement of the ACME protocol, not a library limitation) -4. Persistent storage - - Typically the local file system (default) - - Other integrations available/possible - -**_Before using this library, your domain names MUST be pointed (A/AAAA records) at your server (unless you use the DNS challenge)!_** - - -## Installation - -```bash -$ go get -u github.com/mholt/certmagic -``` - - -## Usage - -### Package Overview - -#### Certificate authority - -This library uses Let's Encrypt by default, but you can use any certificate authority that conforms to the ACME specification. Known/common CAs are provided as consts in the package, for example `LetsEncryptStagingCA` and `LetsEncryptProductionCA`. - -#### The `Config` type - -The `certmagic.Config` struct is how you can wield the power of this fully armed and operational battle station. However, an empty config is _not_ a valid one! In time, you will learn to use the force of `certmagic.New(certmagic.Config{...})` as I have. - -#### Defaults - -For every field in the `Config` struct, there is a corresponding package-level variable you can set as a default value. These defaults will be used when you call any of the high-level convenience functions like `HTTPS()` or `Listen()` or anywhere else a default `Config` is used. They are also used for any `Config` fields that are zero-valued when you call `New()`. - -You can set these values easily, for example: `certmagic.Email = ...` sets the email address to use for everything unless you explicitly override it in a Config. - - -#### Providing an email address - -Although not strictly required, this is highly recommended best practice. It allows you to receive expiration emails if your certificates are expiring for some reason, and also allows the CA's engineers to potentially get in touch with you if something is wrong. I recommend setting `certmagic.Email` or always setting the `Email` field of the `Config` struct. - - -### Development and Testing - -Note that Let's Encrypt imposes [strict rate limits](https://letsencrypt.org/docs/rate-limits/) at its production endpoint, so using it while developing your application may lock you out for a few days if you aren't careful! - -While developing your application and testing it, use [their staging endpoint](https://letsencrypt.org/docs/staging-environment/) which has much higher rate limits. Even then, don't hammer it: but it's much safer for when you're testing. When deploying, though, use their production CA because their staging CA doesn't issue trusted certificates. - -To use staging, set `certmagic.CA = certmagic.LetsEncryptStagingCA` or set `CA` of every `Config` struct. - - - -### Examples - -There are many ways to use this library. We'll start with the highest-level (simplest) and work down (more control). - -First, we'll follow best practices and do the following: - -```go -// read and agree to your CA's legal documents -certmagic.Agreed = true - -// provide an email address -certmagic.Email = "you@yours.com" - -// use the staging endpoint while we're developing -certmagic.CA = certmagic.LetsEncryptStagingCA -``` - -For fully-functional program examples, check out [this Twitter thread](https://twitter.com/mholt6/status/1073103805112147968) (or read it [unrolled into a single post](https://threadreaderapp.com/thread/1073103805112147968.html)). - - -#### Serving HTTP handlers with HTTPS - -```go -err := certmagic.HTTPS([]string{"example.com", "www.example.com"}, mux) -if err != nil { - return err -} -``` - -This starts HTTP and HTTPS listeners and redirects HTTP to HTTPS! - -#### Starting a TLS listener - -```go -ln, err := certmagic.Listen([]string{"example.com"}) -if err != nil { - return err -} -``` - - -#### Getting a tls.Config - -```go -tlsConfig, err := certmagic.TLS([]string{"example.com"}) -if err != nil { - return err -} -``` - - -#### Advanced use - -For more control, you'll make and use a `Config` like so: - -```go -magic := certmagic.New(certmagic.Config{ - CA: certmagic.LetsEncryptStagingCA, - Email: "you@yours.com", - Agreed: true, - // plus any other customization you want -}) - -// this obtains certificates or renews them if necessary -err := magic.Manage([]string{"example.com", "sub.example.com"}) -if err != nil { - return err -} - -// to use its certificates and solve the TLS-ALPN challenge, -// you can get a TLS config to use in a TLS listener! -tlsConfig := magic.TLSConfig() - -// if you already have a TLS config you don't want to replace, -// we can simply set its GetCertificate field and append the -// TLS-ALPN challenge protocol to the NextProtos -myTLSConfig.GetCertificate = magic.GetCertificate -myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol} - -// the HTTP challenge has to be handled by your HTTP server; -// if you don't have one, you should have disabled it earlier -// when you made the certmagic.Config -httpMux = magic.HTTPChallengeHandler(httpMux) -``` - -Great! This example grants you much more flexibility for advanced programs. However, _the vast majority of you will only use the high-level functions described earlier_, especially since you can still customize them by setting the package-level defaults. - -If you want to use the default configuration but you still need a `certmagic.Config`, you can call `certmagic.Manage()` directly to get one: - -```go -magic, err := certmagic.Manage([]string{"example.com"}) -if err != nil { - return err -} -``` - -And then it's the same as above, as if you had made the `Config` yourself. - - -### Wildcard certificates - -At time of writing (December 2018), Let's Encrypt only issues wildcard certificates with the DNS challenge. - - -### Behind a load balancer (or in a cluster) - -CertMagic runs effectively behind load balancers and/or in cluster/fleet environments. In other words, you can have 10 or 1,000 servers all serving the same domain names, all sharing certificates and OCSP staples. - -To do so, simply ensure that each instance is using the same Storage. That is the sole criteria for determining whether an instance is part of a cluster. - -The default Storage is implemented using the file system, so mounting the same shared folder is sufficient (see [Storage](#storage) for more on that)! If you need an alternate Storage implementation, feel free to use one, provided that all the instances use the _same_ one. :) - -See [Storage](#storage) and the associated [godoc](https://godoc.org/github.com/mholt/certmagic#Storage) for more information! - - -## The ACME Challenges - -This section describes how to solve the ACME challenges. Challenges are how you demonstrate to the certificate authority some control over your domain name, thus authorizing them to grant you a certificate for that name. [The great innovation of ACME](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) is that verification by CAs can now be automated, rather than having to click links in emails (who ever thought that was a good idea??). - -If you're using the high-level convenience functions like `HTTPS()`, `Listen()`, or `TLS()`, the HTTP and/or TLS-ALPN challenges are solved for you because they also start listeners. However, if you're making a `Config` and you start your own server manually, you'll need to be sure the ACME challenges can be solved so certificates can be renewed. - -The HTTP and TLS-ALPN challenges are the defaults because they don't require configuration from you, but they require that your server is accessible from external IPs on low ports. If that is not possible in your situation, you can enable the DNS challenge, which will disable the HTTP and TLS-ALPN challenges and use the DNS challenge exclusively. - -Technically, only one challenge needs to be enabled for things to work, but using multiple is good for reliability in case a challenge is discontinued by the CA. This happened to the TLS-SNI challenge in early 2018—many popular ACME clients such as Traefik and Autocert broke, resulting in downtime for some sites, until new releases were made and patches deployed, because they used only one challenge; Caddy, however—this library's forerunner—was unaffected because it also used the HTTP challenge. If multiple challenges are enabled, they are chosen randomly to help prevent false reliance on a single challenge type. - - -### HTTP Challenge - -Per the ACME spec, the HTTP challenge requires port 80, or at least packet forwarding from port 80. It works by serving a specific HTTP response that only the genuine server would have to a normal HTTP request at a special endpoint. - -If you are running an HTTP server, solving this challenge is very easy: just wrap your handler in `HTTPChallengeHandler` _or_ call `SolveHTTPChallenge()` inside your own `ServeHTTP()` method. - -For example, if you're using the standard library: - -```go -mux := http.NewServeMux() -mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Lookit my cool website over HTTPS!") -}) - -http.ListenAndServe(":80", magic.HTTPChallengeHandler(mux)) -``` - -If wrapping your handler is not a good solution, try this inside your `ServeHTTP()` instead: - -```go -magic := certmagic.NewDefault() - -func ServeHTTP(w http.ResponseWriter, req *http.Request) { - if magic.HandleHTTPChallenge(w, r) { - return // challenge handled; nothing else to do - } - ... -} -``` - -If you are not running an HTTP server, you should disable the HTTP challenge _or_ run an HTTP server whose sole job it is to solve the HTTP challenge. - - -### TLS-ALPN Challenge - -Per the ACME spec, the TLS-ALPN challenge requires port 443, or at least packet forwarding from port 443. It works by providing a special certificate using a standard TLS extension, Application Layer Protocol Negotiation (ALPN), having a special value. This is the most convenient challenge type because it usually requires no extra configuration and uses the standard TLS port which is where the certificates are used, also. - -This challenge is easy to solve: just use the provided `tls.Config` when you make your TLS listener: - -```go -// use this to configure a TLS listener -tlsConfig := magic.TLSConfig() -``` - -Or make two simple changes to an existing `tls.Config`: - -```go -myTLSConfig.GetCertificate = magic.GetCertificate -myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol} -``` - -Then just make sure your TLS listener is listening on port 443: - -```go -ln, err := tls.Listen("tcp", ":443", myTLSConfig) -``` - - -### DNS Challenge - -The DNS challenge is perhaps the most useful challenge because it allows you to obtain certificates without your server needing to be publicly accessible on the Internet, and it's the only challenge by which Let's Encrypt will issue wildcard certificates. - -This challenge works by setting a special record in the domain's zone. To do this automatically, your DNS provider needs to offer an API by which changes can be made to domain names, and the changes need to take effect immediately for best results. CertMagic supports [all of lego's DNS provider implementations](https://github.com/xenolf/lego/tree/master/providers/dns)! All of them clean up the temporary record after the challenge completes. - -To enable it, just set the `DNSProvider` field on a `certmagic.Config` struct, or set the default `certmagic.DNSProvider` variable. For example, if my domains' DNS was served by DNSimple and I set my DNSimple API credentials in environment variables: - -```go -import "github.com/xenolf/lego/providers/dns/dnsimple" - -provider, err := dnsimple.NewDNSProvider() -if err != nil { - return err -} - -certmagic.DNSProvider = provider -``` - -Now the DNS challenge will be used by default, and I can obtain certificates for wildcard domains. See the [godoc documentation for the provider you're using](https://godoc.org/github.com/xenolf/lego/providers/dns#pkg-subdirectories) to learn how to configure it. Most can be configured by env variables or by passing in a config struct. If you pass a config struct instead of using env variables, you will probably need to set some other defaults (that's just how lego works, currently): - -```go -PropagationTimeout: dns01.DefaultPollingInterval, -PollingInterval: dns01.DefaultPollingInterval, -TTL: dns01.DefaultTTL, -``` - -Enabling the DNS challenge disables the other challenges for that `certmagic.Config` instance. - - -## On-Demand TLS - -Normally, certificates are obtained and renewed before a listener starts serving, and then those certificates are maintained throughout the lifetime of the program. In other words, the certificate names are static. But sometimes you don't know all the names ahead of time. This is where On-Demand TLS shines. - -Originally invented for use in Caddy (which was the first program to use such technology), On-Demand TLS makes it possible and easy to serve certificates for arbitrary names during the lifetime of the server. When a TLS handshake is received, CertMagic will read the Server Name Indication (SNI) value and either load and present that certificate in the ServerHello, or if one does not exist, it will obtain it from a CA right then-and-there. - -Of course, this has some obvious security implications. You don't want to DoS a CA or allow arbitrary clients to fill your storage with spammy TLS handshakes. That's why, in order to enable On-Demand issuance, you'll need to set some limits or some policy to allow getting a certificate. - -CertMagic provides several ways to enforce decision policies for On-Demand TLS, in descending order of priority: - -- A generic function that you write which will decide whether to allow the certificate request -- A name whitelist -- The ability to make an HTTP request to a URL for permission -- Rate limiting - -The simplest way to enable On-Demand issuance is to set the OnDemand field of a Config (or the default package-level value): - -```go -certmagic.OnDemand = &certmagic.OnDemandConfig{MaxObtain: 5} -``` - -This allows only 5 certificates to be requested and is the simplest way to enable On-Demand TLS, but is the least recommended. It prevents abuse, but only in the least helpful way. - -The [godoc](https://godoc.org/github.com/mholt/certmagic#OnDemandConfig) describes how to use the other policies, all of which are much more recommended! :) - -If `OnDemand` is set and `Manage()` is called, then the names given to `Manage()` will be whitelisted rather than obtained right away. - - -## Storage - -CertMagic relies on storage to store certificates and other TLS assets (OCSP staple cache, coordinating locks, etc). Persistent storage is a requirement when using CertMagic: ephemeral storage will likely lead to rate limiting on the CA-side as CertMagic will always have to get new certificates. - -By default, CertMagic stores assets on the local file system in `$HOME/.local/share/certmagic` (and honors `$XDG_DATA_HOME` if set). CertMagic will create the directory if it does not exist. If writes are denied, things will not be happy, so make sure CertMagic can write to it! - -The notion of a "cluster" or "fleet" of instances that may be serving the same site and sharing certificates, etc, is tied to storage. Simply, any instances that use the same storage facilities are considered part of the cluster. So if you deploy 100 instances of CertMagic behind a load balancer, they are all part of the same cluster if they share the same storage configuration. Sharing storage could be mounting a shared folder, or implementing some other distributed storage system such as a database server or KV store. - -The easiest way to change the storage being used is to set `certmagic.DefaultStorage` to a value that satisfies the [Storage interface](https://godoc.org/github.com/mholt/certmagic#Storage). Keep in mind that a valid `Storage` must be able to implement some operations atomically in order to provide locking and synchronization. - -If you write a Storage implementation, let us know and we'll add it to the project so people can find it! - - -## Cache - -All of the certificates in use are de-duplicated and cached in memory for optimal performance at handshake-time. This cache must be backed by persistent storage as described above. - -Most applications will not need to interact with certificate caches directly. Usually, the closest you will come is to set the package-wide `certmagic.DefaultStorage` variable (before attempting to create any Configs). However, if your use case requires using different storage facilities for different Configs (that's highly unlikely and NOT recommended! Even Caddy doesn't get that crazy), you will need to call `certmagic.NewCache()` and pass in the storage you want to use, then get new `Config` structs with `certmagic.NewWithCache()` and pass in the cache. - -Again, if you're needing to do this, you've probably over-complicated your application design. - - -## FAQ - -### Can I use some of my own certificates while using CertMagic? - -Yes, just call the relevant method on the `Config` to add your own certificate to the cache: - -- [`CacheUnmanagedCertificatePEMBytes()`](https://godoc.org/github.com/mholt/certmagic#Config.CacheUnmanagedCertificatePEMBytes) -- [`CacheUnmanagedCertificatePEMFile()`](https://godoc.org/github.com/mholt/certmagic#Config.CacheUnmanagedCertificatePEMFile) -- [`CacheUnmanagedTLSCertificate()`](https://godoc.org/github.com/mholt/certmagic#Config.CacheUnmanagedTLSCertificate) - -Keep in mind that unmanaged certificates are (obviously) not renewed for you, so you'll have to replace them when you do. However, OCSP stapling is performed even for unmanaged certificates that qualify. - - -### Does CertMagic obtain SAN certificates? - -Technically all certificates these days are SAN certificates because CommonName is deprecated. But if you're asking whether CertMagic issues and manages certificates with multiple SANs, the answer is no. But it does support serving them, if you provide your own. - - -### How can I listen on ports 80 and 443? Do I have to run as root? - -On Linux, you can use `setcap` to grant your binary the permission to bind low ports: - -```bash -$ sudo setcap cap_net_bind_service=+ep /path/to/your/binary -``` - -and then you will not need to run with root privileges. - - -## Contributing - -We welcome your contributions! Please see our **[contributing guidelines](https://github.com/mholt/certmagic/blob/master/.github/CONTRIBUTING.md)** for instructions. - - -## Project History - -CertMagic is the core of Caddy's advanced TLS automation code, extracted into a library. The underlying ACME client implementation is [lego](https://github.com/xenolf/lego), which was originally developed for use in Caddy even before Let's Encrypt entered public beta in 2015. - -In the years since then, Caddy's TLS automation techniques have been widely adopted, tried and tested in production, and served millions of sites and secured trillions of connections. - -Now, CertMagic is _the actual library used by Caddy_. It's incredibly powerful and feature-rich, but also easy to use for simple Go programs: one line of code can enable fully-automated HTTPS applications with HTTP->HTTPS redirects. - -Caddy is known for its robust HTTPS+ACME features. When ACME certificate authorities have had outages, in some cases Caddy was the only major client that didn't experience any downtime. Caddy can weather OCSP outages lasting days, or CA outages lasting weeks, without taking your sites offline. - -Caddy was also the first to sport "on-demand" issuance technology, which obtains certificates during the first TLS handshake for an allowed SNI name. - -Consequently, CertMagic brings all these (and more) features and capabilities right into your own Go programs. - -You can [watch a 2016 dotGo talk](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) by the author of this library about using ACME to automate certificate management in Go programs: - -[![Matthew Holt speaking at dotGo 2016 about ACME in Go](https://user-images.githubusercontent.com/1128849/49921557-2d506780-fe6b-11e8-97bf-6053b6b4eb48.png)](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) - - - -## Credits and License - -CertMagic is a project by [Matthew Holt](https://twitter.com/mholt6), who is the author; and various contributors, who are credited in the commit history of either CertMagic or Caddy. - -CertMagic is licensed under Apache 2.0, an open source license. For convenience, its main points are summarized as follows (but this is no replacement for the actual license text): - -- The author owns the copyright to this code -- Use, distribute, and modify the software freely -- Private and internal use is allowed -- License text and copyright notices must stay intact and be included with distributions -- Any and all changes to the code must be documented diff --git a/vendor/github.com/mholt/certmagic/appveyor.yml b/vendor/github.com/mholt/certmagic/appveyor.yml deleted file mode 100644 index 9178e0185..000000000 --- a/vendor/github.com/mholt/certmagic/appveyor.yml +++ /dev/null @@ -1,31 +0,0 @@ -version: "{build}" - -clone_folder: c:\gopath\src\github.com\mholt\certmagic - -environment: - GOPATH: c:\gopath - -stack: go 1.11 - -install: - - set PATH=%GOPATH%\bin;%PATH% - - set PATH=C:\msys64\mingw64\bin;%PATH% - - go get -t ./... - - go get golang.org/x/lint/golint - - go get github.com/alecthomas/gometalinter - -build: off - -before_test: - - go version - - go env - -test_script: - - gometalinter --install - - gometalinter --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode --tests ./... - - go test -race ./... - -after_test: - - golint ./... - -deploy: off diff --git a/vendor/github.com/mholt/certmagic/go.mod b/vendor/github.com/mholt/certmagic/go.mod deleted file mode 100644 index 15e9721ce..000000000 --- a/vendor/github.com/mholt/certmagic/go.mod +++ /dev/null @@ -1,14 +0,0 @@ -module github.com/mholt/certmagic - -require ( - github.com/klauspost/cpuid v1.2.0 - github.com/miekg/dns v1.1.3 // indirect - github.com/stretchr/testify v1.3.0 // indirect - github.com/xenolf/lego v2.1.0+incompatible - golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b - golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 // indirect - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - golang.org/x/sys v0.0.0-20190124100055-b90733256f2e // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/square/go-jose.v2 v2.2.2 // indirect -) diff --git a/vendor/github.com/mholt/certmagic/go.sum b/vendor/github.com/mholt/certmagic/go.sum deleted file mode 100644 index 950bd3545..000000000 --- a/vendor/github.com/mholt/certmagic/go.sum +++ /dev/null @@ -1,25 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/miekg/dns v1.1.3 h1:1g0r1IvskvgL8rR+AcHzUA+oFmGcQlaIm4IqakufeMM= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/xenolf/lego v2.1.0+incompatible h1:zZErna+4KHeBsUC3mw6gthaXncPDoBuFJOHKCRl64Wg= -github.com/xenolf/lego v2.1.0+incompatible/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b h1:Elez2XeF2p9uyVj0yEUDqQ56NFcDtcBNkYP7yv8YbUE= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e h1:3GIlrlVLfkoipSReOMNAgApI0ajnalyLa/EZHHca/XI= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/miekg/dns/.codecov.yml deleted file mode 100644 index f91e5c1fe..000000000 --- a/vendor/github.com/miekg/dns/.codecov.yml +++ /dev/null @@ -1,8 +0,0 @@ -coverage: - status: - project: - default: - target: 40% - threshold: null - patch: false - changes: false diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore deleted file mode 100644 index 776cd950c..000000000 --- a/vendor/github.com/miekg/dns/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.6 -tags -test.out -a.out diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml deleted file mode 100644 index 013b5ae44..000000000 --- a/vendor/github.com/miekg/dns/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go -sudo: false - -go: - - 1.10.x - - 1.11.x - - 1.12.x - - tip - -before_install: - # don't use the miekg/dns when testing forks - - mkdir -p $GOPATH/src/github.com/miekg - - ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true - -script: - - go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./... - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/miekg/dns/Gopkg.lock b/vendor/github.com/miekg/dns/Gopkg.lock deleted file mode 100644 index 686632207..000000000 --- a/vendor/github.com/miekg/dns/Gopkg.lock +++ /dev/null @@ -1,57 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:6914c49eed986dfb8dffb33516fa129c49929d4d873f41e073c83c11c372b870" - name = "golang.org/x/crypto" - packages = [ - "ed25519", - "ed25519/internal/edwards25519", - ] - pruneopts = "" - revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900" - -[[projects]] - branch = "master" - digest = "1:08e41d63f8dac84d83797368b56cf0b339e42d0224e5e56668963c28aec95685" - name = "golang.org/x/net" - packages = [ - "bpf", - "context", - "internal/iana", - "internal/socket", - "ipv4", - "ipv6", - ] - pruneopts = "" - revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de" - -[[projects]] - branch = "master" - digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1" - name = "golang.org/x/sync" - packages = ["errgroup"] - pruneopts = "" - revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" - -[[projects]] - branch = "master" - digest = "1:149a432fabebb8221a80f77731b1cd63597197ded4f14af606ebe3a0959004ec" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "" - revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "golang.org/x/crypto/ed25519", - "golang.org/x/net/ipv4", - "golang.org/x/net/ipv6", - "golang.org/x/sync/errgroup", - "golang.org/x/sys/unix", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/miekg/dns/Gopkg.toml b/vendor/github.com/miekg/dns/Gopkg.toml deleted file mode 100644 index 85e6ff31b..000000000 --- a/vendor/github.com/miekg/dns/Gopkg.toml +++ /dev/null @@ -1,38 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "golang.org/x/sys" - -[[constraint]] - branch = "master" - name = "golang.org/x/sync" diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/miekg/dns/Makefile.fuzz deleted file mode 100644 index dc158c4ac..000000000 --- a/vendor/github.com/miekg/dns/Makefile.fuzz +++ /dev/null @@ -1,33 +0,0 @@ -# Makefile for fuzzing -# -# Use go-fuzz and needs the tools installed. -# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/ -# -# Installing go-fuzz: -# $ make -f Makefile.fuzz get -# Installs: -# * github.com/dvyukov/go-fuzz/go-fuzz -# * get github.com/dvyukov/go-fuzz/go-fuzz-build - -all: build - -.PHONY: build -build: - go-fuzz-build -tags fuzz github.com/miekg/dns - -.PHONY: build-newrr -build-newrr: - go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns - -.PHONY: fuzz -fuzz: - go-fuzz -bin=dns-fuzz.zip -workdir=fuzz - -.PHONY: get -get: - go get github.com/dvyukov/go-fuzz/go-fuzz - go get github.com/dvyukov/go-fuzz/go-fuzz-build - -.PHONY: clean -clean: - rm *-fuzz.zip diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release deleted file mode 100644 index 8fb748e8a..000000000 --- a/vendor/github.com/miekg/dns/Makefile.release +++ /dev/null @@ -1,52 +0,0 @@ -# Makefile for releasing. -# -# The release is controlled from version.go. The version found there is -# used to tag the git repo, we're not building any artifects so there is nothing -# to upload to github. -# -# * Up the version in version.go -# * Run: make -f Makefile.release release -# * will *commit* your change with 'Release $VERSION' -# * push to github -# - -define GO -//+build ignore - -package main - -import ( - "fmt" - - "github.com/miekg/dns" -) - -func main() { - fmt.Println(dns.Version.String()) -} -endef - -$(file > version_release.go,$(GO)) -VERSION:=$(shell go run version_release.go) -TAG="v$(VERSION)" - -all: - @echo Use the \'release\' target to start a release $(VERSION) - rm -f version_release.go - -.PHONY: release -release: commit push - @echo Released $(VERSION) - rm -f version_release.go - -.PHONY: commit -commit: - @echo Committing release $(VERSION) - git commit -am"Release $(VERSION)" - git tag $(TAG) - -.PHONY: push -push: - @echo Pushing release $(VERSION) to master - git push --tags - git push diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md deleted file mode 100644 index dc2a8228f..000000000 --- a/vendor/github.com/miekg/dns/README.md +++ /dev/null @@ -1,172 +0,0 @@ -[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) -[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns) -[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) - -# Alternative (more granular) approach to a DNS library - -> Less is more. - -Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types. -It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there -isn't a convenience function for it. Server side and client side programming is supported, i.e. you -can build servers and resolvers with it. - -We try to keep the "master" branch as sane as possible and at the bleeding edge of standards, -avoiding breaking changes wherever reasonable. We support the last two versions of Go. - -# Goals - -* KISS; -* Fast; -* Small API. If it's easy to code in Go, don't make a function for it. - -# Users - -A not-so-up-to-date-list-that-may-be-actually-current: - -* https://github.com/coredns/coredns -* https://cloudflare.com -* https://github.com/abh/geodns -* http://www.statdns.com/ -* http://www.dnsinspect.com/ -* https://github.com/chuangbo/jianbing-dictionary-dns -* http://www.dns-lg.com/ -* https://github.com/fcambus/rrda -* https://github.com/kenshinx/godns -* https://github.com/skynetservices/skydns -* https://github.com/hashicorp/consul -* https://github.com/DevelopersPL/godnsagent -* https://github.com/duedil-ltd/discodns -* https://github.com/StalkR/dns-reverse-proxy -* https://github.com/tianon/rawdns -* https://mesosphere.github.io/mesos-dns/ -* https://pulse.turbobytes.com/ -* https://github.com/fcambus/statzone -* https://github.com/benschw/dns-clb-go -* https://github.com/corny/dnscheck for -* https://namesmith.io -* https://github.com/miekg/unbound -* https://github.com/miekg/exdns -* https://dnslookup.org -* https://github.com/looterz/grimd -* https://github.com/phamhongviet/serf-dns -* https://github.com/mehrdadrad/mylg -* https://github.com/bamarni/dockness -* https://github.com/fffaraz/microdns -* http://kelda.io -* https://github.com/ipdcode/hades -* https://github.com/StackExchange/dnscontrol/ -* https://www.dnsperf.com/ -* https://dnssectest.net/ -* https://dns.apebits.com -* https://github.com/oif/apex -* https://github.com/jedisct1/dnscrypt-proxy -* https://github.com/jedisct1/rpdns -* https://github.com/xor-gate/sshfp -* https://github.com/rs/dnstrace -* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss)) -* https://github.com/semihalev/sdns -* https://render.com -* https://github.com/peterzen/goresolver - -Send pull request if you want to be listed here. - -# Features - -* UDP/TCP queries, IPv4 and IPv6 -* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported -* Fast -* Server side programming (mimicking the net/http package) -* Client side programming -* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519 -* EDNS0, NSID, Cookies -* AXFR/IXFR -* TSIG, SIG(0) -* DNS over TLS (DoT): encrypted connection between client and server over TCP -* DNS name compression - -Have fun! - -Miek Gieben - 2010-2012 - -DNS Authors 2012- - -# Building - -Building is done with the `go` tool. If you have setup your GOPATH correctly, the following should -work: - - go get github.com/miekg/dns - go build github.com/miekg/dns - -## Examples - -A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc -github.com/miekg/dns`). - -Example programs can be found in the `github.com/miekg/exdns` repository. - -## Supported RFCs - -*all of them* - -* 103{4,5} - DNS standard -* 1348 - NSAP record (removed the record) -* 1982 - Serial Arithmetic -* 1876 - LOC record -* 1995 - IXFR -* 1996 - DNS notify -* 2136 - DNS Update (dynamic updates) -* 2181 - RRset definition - there is no RRset type though, just []RR -* 2537 - RSAMD5 DNS keys -* 2065 - DNSSEC (updated in later RFCs) -* 2671 - EDNS record -* 2782 - SRV record -* 2845 - TSIG record -* 2915 - NAPTR record -* 2929 - DNS IANA Considerations -* 3110 - RSASHA1 DNS keys -* 3225 - DO bit (DNSSEC OK) -* 340{1,2,3} - NAPTR record -* 3445 - Limiting the scope of (DNS)KEY -* 3597 - Unknown RRs -* 403{3,4,5} - DNSSEC + validation functions -* 4255 - SSHFP record -* 4343 - Case insensitivity -* 4408 - SPF record -* 4509 - SHA256 Hash in DS -* 4592 - Wildcards in the DNS -* 4635 - HMAC SHA TSIG -* 4701 - DHCID -* 4892 - id.server -* 5001 - NSID -* 5155 - NSEC3 record -* 5205 - HIP record -* 5702 - SHA2 in the DNS -* 5936 - AXFR -* 5966 - TCP implementation recommendations -* 6605 - ECDSA -* 6725 - IANA Registry Update -* 6742 - ILNP DNS -* 6840 - Clarifications and Implementation Notes for DNS Security -* 6844 - CAA record -* 6891 - EDNS0 update -* 6895 - DNS IANA considerations -* 6975 - Algorithm Understanding in DNSSEC -* 7043 - EUI48/EUI64 records -* 7314 - DNS (EDNS) EXPIRE Option -* 7477 - CSYNC RR -* 7828 - edns-tcp-keepalive EDNS0 Option -* 7553 - URI record -* 7858 - DNS over TLS: Initiation and Performance Considerations -* 7871 - EDNS0 Client Subnet -* 7873 - Domain Name System (DNS) Cookies -* 8080 - EdDSA for DNSSEC -* 8499 - DNS Terminology - -## Loosely Based Upon - -* ldns - -* NSD - -* Net::DNS - -* GRONG - diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5b..000000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod deleted file mode 100644 index 7efa09a04..000000000 --- a/vendor/github.com/mitchellh/go-homedir/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/go-homedir diff --git a/vendor/github.com/modern-go/concurrent/.gitignore b/vendor/github.com/modern-go/concurrent/.gitignore deleted file mode 100644 index 3f2bc4741..000000000 --- a/vendor/github.com/modern-go/concurrent/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/coverage.txt diff --git a/vendor/github.com/modern-go/concurrent/.travis.yml b/vendor/github.com/modern-go/concurrent/.travis.yml deleted file mode 100644 index 449e67cd0..000000000 --- a/vendor/github.com/modern-go/concurrent/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.8.x - - 1.x - -before_install: - - go get -t -v ./... - -script: - - ./test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/modern-go/concurrent/README.md b/vendor/github.com/modern-go/concurrent/README.md deleted file mode 100644 index acab3200a..000000000 --- a/vendor/github.com/modern-go/concurrent/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# concurrent - -[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/concurrent/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/concurrent?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/concurrent) -[![Build Status](https://travis-ci.org/modern-go/concurrent.svg?branch=master)](https://travis-ci.org/modern-go/concurrent) -[![codecov](https://codecov.io/gh/modern-go/concurrent/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/concurrent) -[![rcard](https://goreportcard.com/badge/github.com/modern-go/concurrent)](https://goreportcard.com/report/github.com/modern-go/concurrent) -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE) - -* concurrent.Map: backport sync.Map for go below 1.9 -* concurrent.Executor: goroutine with explicit ownership and cancellable - -# concurrent.Map - -because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable - -```go -m := concurrent.NewMap() -m.Store("hello", "world") -elem, found := m.Load("hello") -// elem will be "world" -// found will be true -``` - -# concurrent.Executor - -```go -executor := concurrent.NewUnboundedExecutor() -executor.Go(func(ctx context.Context) { - everyMillisecond := time.NewTicker(time.Millisecond) - for { - select { - case <-ctx.Done(): - fmt.Println("goroutine exited") - return - case <-everyMillisecond.C: - // do something - } - } -}) -time.Sleep(time.Second) -executor.StopAndWaitForever() -fmt.Println("executor stopped") -``` - -attach goroutine to executor instance, so that we can - -* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever -* handle panic by callback: the default behavior will no longer crash your application \ No newline at end of file diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh deleted file mode 100755 index d1e6b2ec5..000000000 --- a/vendor/github.com/modern-go/concurrent/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -coverprofile=profile.out -coverpkg=github.com/modern-go/concurrent $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/modern-go/reflect2/.gitignore b/vendor/github.com/modern-go/reflect2/.gitignore deleted file mode 100644 index 7b26c946d..000000000 --- a/vendor/github.com/modern-go/reflect2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/vendor -/coverage.txt diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml deleted file mode 100644 index fbb43744d..000000000 --- a/vendor/github.com/modern-go/reflect2/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.8.x - - 1.x - -before_install: - - go get -t -v ./... - - go get -t -v github.com/modern-go/reflect2-tests/... - -script: - - ./test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock deleted file mode 100644 index 2a3a69893..000000000 --- a/vendor/github.com/modern-go/reflect2/Gopkg.lock +++ /dev/null @@ -1,15 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml deleted file mode 100644 index 2f4f4dbdc..000000000 --- a/vendor/github.com/modern-go/reflect2/Gopkg.toml +++ /dev/null @@ -1,35 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - -ignored = [] - -[[constraint]] - name = "github.com/modern-go/concurrent" - version = "1.0.0" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/github.com/modern-go/reflect2/README.md b/vendor/github.com/modern-go/reflect2/README.md deleted file mode 100644 index 6f968aab9..000000000 --- a/vendor/github.com/modern-go/reflect2/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# reflect2 - -[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/reflect2/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/reflect2?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/reflect2) -[![Build Status](https://travis-ci.org/modern-go/reflect2.svg?branch=master)](https://travis-ci.org/modern-go/reflect2) -[![codecov](https://codecov.io/gh/modern-go/reflect2/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/reflect2) -[![rcard](https://goreportcard.com/badge/github.com/modern-go/reflect2)](https://goreportcard.com/report/github.com/modern-go/reflect2) -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE) - -reflect api that avoids runtime reflect.Value cost - -* reflect get/set interface{}, with type checking -* reflect get/set unsafe.Pointer, without type checking -* `reflect2.TypeByName` works like `Class.forName` found in java - -[json-iterator](https://github.com/json-iterator/go) use this package to save runtime dispatching cost. -This package is designed for low level libraries to optimize reflection performance. -General application should still use reflect standard library. - -# reflect2.TypeByName - -```go -// given package is github.com/your/awesome-package -type MyStruct struct { - // ... -} - -// will return the type -reflect2.TypeByName("awesome-package.MyStruct") -// however, if the type has not been used -// it will be eliminated by compiler, so we can not get it in runtime -``` - -# reflect2 get/set interface{} - -```go -valType := reflect2.TypeOf(1) -i := 1 -j := 10 -valType.Set(&i, &j) -// i will be 10 -``` - -to get set `type`, always use its pointer `*type` - -# reflect2 get/set unsafe.Pointer - -```go -valType := reflect2.TypeOf(1) -i := 1 -j := 10 -valType.UnsafeSet(unsafe.Pointer(&i), unsafe.Pointer(&j)) -// i will be 10 -``` - -to get set `type`, always use its pointer `*type` - -# benchmark - -Benchmark is not necessary for this package. It does nothing actually. -As it is just a thin wrapper to make go runtime public. -Both `reflect2` and `reflect` call same function -provided by `runtime` package exposed by go language. - -# unsafe safety - -Instead of casting `[]byte` to `sliceHeader` in your application using unsafe. -We can use reflect2 instead. This way, if `sliceHeader` changes in the future, -only reflect2 need to be upgraded. - -reflect2 tries its best to keep the implementation same as reflect (by testing). \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh deleted file mode 100755 index 3d2b9768c..000000000 --- a/vendor/github.com/modern-go/reflect2/test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do - go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go index 3acfb5580..6d489112f 100644 --- a/vendor/github.com/modern-go/reflect2/type_map.go +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -4,7 +4,6 @@ import ( "reflect" "runtime" "strings" - "sync" "unsafe" ) @@ -16,17 +15,10 @@ func typelinks1() [][]unsafe.Pointer //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) -// initOnce guards initialization of types and packages -var initOnce sync.Once - -var types map[string]reflect.Type -var packages map[string]map[string]reflect.Type - -// discoverTypes initializes types and packages -func discoverTypes() { - types = make(map[string]reflect.Type) - packages = make(map[string]map[string]reflect.Type) +var types = map[string]reflect.Type{} +var packages = map[string]map[string]reflect.Type{} +func init() { ver := runtime.Version() if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { loadGo15Types() @@ -98,13 +90,11 @@ type emptyInterface struct { // TypeByName return the type by its name, just like Class.forName in java func TypeByName(typeName string) Type { - initOnce.Do(discoverTypes) return Type2(types[typeName]) } // TypeByPackageName return the type by its package and name func TypeByPackageName(pkgPath string, name string) Type { - initOnce.Do(discoverTypes) pkgTypes := packages[pkgPath] if pkgTypes == nil { return nil diff --git a/vendor/github.com/naoina/go-stringutil/.travis.yml b/vendor/github.com/naoina/go-stringutil/.travis.yml deleted file mode 100644 index 0489ad5ea..000000000 --- a/vendor/github.com/naoina/go-stringutil/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.4 - - 1.5 - - tip -install: - - go get -v github.com/naoina/go-stringutil -script: - - go test -v -bench . -benchmem ./... diff --git a/vendor/github.com/naoina/go-stringutil/README.md b/vendor/github.com/naoina/go-stringutil/README.md deleted file mode 100644 index ecf7a5fae..000000000 --- a/vendor/github.com/naoina/go-stringutil/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# stringutil [![Build Status](https://travis-ci.org/naoina/go-stringutil.svg?branch=master)](https://travis-ci.org/naoina/go-stringutil) - -## Installation - - go get -u github.com/naoina/go-stringutil - -## Documentation - -See https://godoc.org/github.com/naoina/go-stringutil - -## License - -MIT diff --git a/vendor/github.com/naoina/toml/.travis.yml b/vendor/github.com/naoina/toml/.travis.yml deleted file mode 100644 index 7b7551caa..000000000 --- a/vendor/github.com/naoina/toml/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -go: - - 1.3 - - 1.x - -install: - - go get -t -v ./... - -script: - - go test ./... diff --git a/vendor/github.com/naoina/toml/README.md b/vendor/github.com/naoina/toml/README.md deleted file mode 100644 index 1c0143348..000000000 --- a/vendor/github.com/naoina/toml/README.md +++ /dev/null @@ -1,392 +0,0 @@ -# TOML parser and encoder library for Golang [![Build Status](https://travis-ci.org/naoina/toml.png?branch=master)](https://travis-ci.org/naoina/toml) - -[TOML](https://github.com/toml-lang/toml) parser and encoder library for [Golang](http://golang.org/). - -This library is compatible with TOML version [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md). - -## Installation - - go get -u github.com/naoina/toml - -## Usage - -The following TOML save as `example.toml`. - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Lance Uppercut" -dob = 1979-05-27T07:32:00-08:00 # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -Then above TOML will mapping to `tomlConfig` struct using `toml.Unmarshal`. - -```go -package main - -import ( - "io/ioutil" - "os" - "time" - - "github.com/naoina/toml" -) - -type tomlConfig struct { - Title string - Owner struct { - Name string - Dob time.Time - } - Database struct { - Server string - Ports []int - ConnectionMax uint - Enabled bool - } - Servers map[string]ServerInfo - Clients struct { - Data [][]interface{} - Hosts []string - } -} - -type ServerInfo struct { - IP net.IP - DC string -} - -func main() { - f, err := os.Open("example.toml") - if err != nil { - panic(err) - } - defer f.Close() - var config Config - if err := toml.NewDecoder(f).Decode(&config); err != nil { - panic(err) - } - - // then to use the unmarshaled config... - fmt.Println("IP of server 'alpha':", config.Servers["alpha"].IP) -} -``` - -## Mappings - -A key and value of TOML will map to the corresponding field. -The fields of struct for mapping must be exported. - -The rules of the mapping of key are following: - -#### Exact matching - -```toml -timeout_seconds = 256 -``` - -```go -type Config struct { - Timeout_seconds int -} -``` - -#### Camelcase matching - -```toml -server_name = "srv1" -``` - -```go -type Config struct { - ServerName string -} -``` - -#### Uppercase matching - -```toml -ip = "10.0.0.1" -``` - -```go -type Config struct { - IP string -} -``` - -See the following examples for the value mappings. - -### String - -```toml -val = "string" -``` - -```go -type Config struct { - Val string -} -``` - -### Integer - -```toml -val = 100 -``` - -```go -type Config struct { - Val int -} -``` - -All types that can be used are following: - -* int8 (from `-128` to `127`) -* int16 (from `-32768` to `32767`) -* int32 (from `-2147483648` to `2147483647`) -* int64 (from `-9223372036854775808` to `9223372036854775807`) -* int (same as `int32` on 32bit environment, or `int64` on 64bit environment) -* uint8 (from `0` to `255`) -* uint16 (from `0` to `65535`) -* uint32 (from `0` to `4294967295`) -* uint64 (from `0` to `18446744073709551615`) -* uint (same as `uint` on 32bit environment, or `uint64` on 64bit environment) - -### Float - -```toml -val = 3.1415 -``` - -```go -type Config struct { - Val float32 -} -``` - -All types that can be used are following: - -* float32 -* float64 - -### Boolean - -```toml -val = true -``` - -```go -type Config struct { - Val bool -} -``` - -### Datetime - -```toml -val = 2014-09-28T21:27:39Z -``` - -```go -type Config struct { - Val time.Time -} -``` - -### Array - -```toml -val = ["a", "b", "c"] -``` - -```go -type Config struct { - Val []string -} -``` - -Also following examples all can be mapped: - -```toml -val1 = [1, 2, 3] -val2 = [["a", "b"], ["c", "d"]] -val3 = [[1, 2, 3], ["a", "b", "c"]] -val4 = [[1, 2, 3], [["a", "b"], [true, false]]] -``` - -```go -type Config struct { - Val1 []int - Val2 [][]string - Val3 [][]interface{} - Val4 [][]interface{} -} -``` - -### Table - -```toml -[server] -type = "app" - - [server.development] - ip = "10.0.0.1" - - [server.production] - ip = "10.0.0.2" -``` - -```go -type Config struct { - Server map[string]Server -} - -type Server struct { - IP string -} -``` - -You can also use the following struct instead of map of struct. - -```go -type Config struct { - Server struct { - Development Server - Production Server - } -} - -type Server struct { - IP string -} -``` - -### Array of Tables - -```toml -[[fruit]] - name = "apple" - - [fruit.physical] - color = "red" - shape = "round" - - [[fruit.variety]] - name = "red delicious" - - [[fruit.variety]] - name = "granny smith" - -[[fruit]] - name = "banana" - - [[fruit.variety]] - name = "plantain" -``` - -```go -type Config struct { - Fruit []struct { - Name string - Physical struct { - Color string - Shape string - } - Variety []struct { - Name string - } - } -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Package toml supports `encoding.TextUnmarshaler` (and `encoding.TextMarshaler`). You can -use it to apply custom marshaling rules for certain types. The `UnmarshalText` method is -called with the value text found in the TOML input. TOML strings are passed unquoted. - -```toml -duration = "10s" -``` - -```go -import time - -type Duration time.Duration - -// UnmarshalText implements encoding.TextUnmarshaler -func (d *Duration) UnmarshalText(data []byte) error { - duration, err := time.ParseDuration(string(data)) - if err == nil { - *d = Duration(duration) - } - return err -} - -// MarshalText implements encoding.TextMarshaler -func (d Duration) MarshalText() ([]byte, error) { - return []byte(time.Duration(d).String()), nil -} - -type ConfigWithDuration struct { - Duration Duration -} -``` -### Using the `toml.UnmarshalerRec` interface - -You can also override marshaling rules specifically for TOML using the `UnmarshalerRec` -and `MarshalerRec` interfaces. These are useful if you want to control how structs or -arrays are handled. You can apply additional validation or set unexported struct fields. - -Note: `encoding.TextUnmarshaler` and `encoding.TextMarshaler` should be preferred for -simple (scalar) values because they're also compatible with other formats like JSON or -YAML. - -[See the UnmarshalerRec example](https://godoc.org/github.com/naoina/toml/#example_UnmarshalerRec). - -### Using the `toml.Unmarshaler` interface - -If you want to deal with raw TOML syntax, use the `Unmarshaler` and `Marshaler` -interfaces. Their input and output is raw TOML syntax. As such, these interfaces are -useful if you want to handle TOML at the syntax level. - -[See the Unmarshaler example](https://godoc.org/github.com/naoina/toml/#example_Unmarshaler). - -## API documentation - -See [Godoc](http://godoc.org/github.com/naoina/toml). - -## License - -MIT diff --git a/vendor/github.com/naoina/toml/parse.peg b/vendor/github.com/naoina/toml/parse.peg deleted file mode 100644 index dd9eb8880..000000000 --- a/vendor/github.com/naoina/toml/parse.peg +++ /dev/null @@ -1,147 +0,0 @@ -package toml - -type tomlParser Peg { - toml -} - -TOML <- Expression (newline Expression)* newline? !. { _ = buffer } - -Expression <- ( - { p.SetTableString(begin, end) } - / ws keyval ws comment? - / ws comment? - / ws -) - -newline <- <[\r\n]+> { p.AddLineCount(end - begin) } - -ws <- [ \t]* -wsnl <- ( - [ \t] - / <[\r\n]> { p.AddLineCount(end - begin) } -)* - -comment <- '#' <[\t -\0x10FFFF]*> - -keyval <- key ws '=' ws val { p.AddKeyValue() } - -key <- bareKey / quotedKey - -bareKey <- <[0-9A-Za-z\-_]+> { p.SetKey(p.buffer, begin, end) } - -quotedKey <- < '"' basicChar* '"' > { p.SetKey(p.buffer, begin, end) } - -val <- ( - { p.SetTime(begin, end) } - / { p.SetFloat64(begin, end) } - / { p.SetInt64(begin, end) } - / { p.SetString(begin, end) } - / { p.SetBool(begin, end) } - / { p.SetArray(begin, end) } - / inlineTable -) - -table <- stdTable / arrayTable - -stdTable <- '[' ws ws ']' { p.SetTable(p.buffer, begin, end) } - -arrayTable <- '[[' ws ws ']]' { p.SetArrayTable(p.buffer, begin, end) } - -inlineTable <- ( - '{' { p.StartInlineTable() } - ws inlineTableKeyValues ws - '}' { p.EndInlineTable() } -) - -inlineTableKeyValues <- (keyval inlineTableValSep?)* - -tableKey <- tableKeyComp (tableKeySep tableKeyComp)* - -tableKeyComp <- key { p.AddTableKey() } - -tableKeySep <- ws '.' ws - -inlineTableValSep <- ws ',' ws - -integer <- [\-+]? int -int <- [1-9] (digit / '_' digit)+ / digit - -float <- integer (frac exp? / frac? exp) -frac <- '.' digit (digit / '_' digit)* -exp <- [eE] [\-+]? digit (digit / '_' digit)* - -string <- ( - mlLiteralString - / literalString - / mlBasicString - / basicString -) - -basicString <- <'"' basicChar* '"'> { p.SetBasicString(p.buffer, begin, end) } - -basicChar <- basicUnescaped / escaped -escaped <- escape ([btnfr"/\\] / 'u' hexQuad / 'U' hexQuad hexQuad) - -basicUnescaped <- [ -!#-\[\]-\0x10FFFF] - -escape <- '\\' - -mlBasicString <- '"""' mlBasicBody '"""' { p.SetMultilineString() } - -mlBasicBody <- ( - { p.AddMultilineBasicBody(p.buffer, begin, end) } - / escape newline wsnl -)* - -literalString <- "'" "'" { p.SetLiteralString(p.buffer, begin, end) } - -literalChar <- [\t -&(-\0x10FFFF] - -mlLiteralString <- "'''" "'''" { p.SetMultilineLiteralString(p.buffer, begin, end) } - -mlLiteralBody <- (!"'''" (mlLiteralChar / newline))* - -mlLiteralChar <- [\t -\0x10FFFF] - -hexdigit <- [0-9A-Fa-f] -hexQuad <- hexdigit hexdigit hexdigit hexdigit - -boolean <- 'true' / 'false' - -dateFullYear <- digitQuad -dateMonth <- digitDual -dateMDay <- digitDual -timeHour <- digitDual -timeMinute <- digitDual -timeSecond <- digitDual -timeSecfrac <- '.' digit+ -timeNumoffset <- [\-+] timeHour ':' timeMinute -timeOffset <- 'Z' / timeNumoffset -partialTime <- timeHour ':' timeMinute ':' timeSecond timeSecfrac? -fullDate <- dateFullYear '-' dateMonth '-' dateMDay -fullTime <- partialTime timeOffset -datetime <- (fullDate ('T' fullTime)?) / partialTime - -digit <- [0-9] -digitDual <- digit digit -digitQuad <- digitDual digitDual - -array <- ( - '[' { p.StartArray() } - wsnl arrayValues? wsnl - ']' -) - -arrayValues <- ( - val { p.AddArrayVal() } - ( - wsnl comment? - wsnl arraySep - wsnl comment? - wsnl val { p.AddArrayVal() } - )* - wsnl arraySep? - wsnl comment? -) - -arraySep <- ',' diff --git a/vendor/github.com/onsi/ginkgo/LICENSE b/vendor/github.com/onsi/ginkgo/LICENSE new file mode 100644 index 000000000..9415ee72c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go new file mode 100644 index 000000000..dab2a2470 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -0,0 +1,200 @@ +/* +Ginkgo accepts a number of configuration options. + +These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli) + +You can also learn more via + + ginkgo help + +or (I kid you not): + + go test -asdf +*/ +package config + +import ( + "flag" + "time" + + "fmt" +) + +const VERSION = "1.8.0" + +type GinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusString string + SkipString string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +var GinkgoConfig = GinkgoConfigType{} + +type DefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool +} + +var DefaultReporterConfig = DefaultReporterConfigType{} + +func processPrefix(prefix string) string { + if prefix != "" { + prefix = prefix + "." + } + return prefix +} + +func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { + prefix = processPrefix(prefix) + flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") + flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.") + flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.") + flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.") + flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.") + + flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") + + flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.") + flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.") + + flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).") + + flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.") + + flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.") + + flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.") + + if includeParallelFlags { + flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.") + flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.") + flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.") + flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.") + } + + flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") + flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.") + flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") + flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.") + flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") + flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") + flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") +} + +func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { + prefix = processPrefix(prefix) + result := make([]string, 0) + + if ginkgo.RandomSeed > 0 { + result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed)) + } + + if ginkgo.RandomizeAllSpecs { + result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix)) + } + + if ginkgo.SkipMeasurements { + result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix)) + } + + if ginkgo.FailOnPending { + result = append(result, fmt.Sprintf("--%sfailOnPending", prefix)) + } + + if ginkgo.FailFast { + result = append(result, fmt.Sprintf("--%sfailFast", prefix)) + } + + if ginkgo.DryRun { + result = append(result, fmt.Sprintf("--%sdryRun", prefix)) + } + + if ginkgo.FocusString != "" { + result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString)) + } + + if ginkgo.SkipString != "" { + result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString)) + } + + if ginkgo.FlakeAttempts > 1 { + result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts)) + } + + if ginkgo.EmitSpecProgress { + result = append(result, fmt.Sprintf("--%sprogress", prefix)) + } + + if ginkgo.DebugParallel { + result = append(result, fmt.Sprintf("--%sdebug", prefix)) + } + + if ginkgo.ParallelNode != 0 { + result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode)) + } + + if ginkgo.ParallelTotal != 0 { + result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal)) + } + + if ginkgo.StreamHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost)) + } + + if ginkgo.SyncHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost)) + } + + if ginkgo.RegexScansFilePath { + result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix)) + } + + if reporter.NoColor { + result = append(result, fmt.Sprintf("--%snoColor", prefix)) + } + + if reporter.SlowSpecThreshold > 0 { + result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold)) + } + + if !reporter.NoisyPendings { + result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix)) + } + + if !reporter.NoisySkippings { + result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix)) + } + + if reporter.Verbose { + result = append(result, fmt.Sprintf("--%sv", prefix)) + } + + if reporter.Succinct { + result = append(result, fmt.Sprintf("--%ssuccinct", prefix)) + } + + if reporter.FullTrace { + result = append(result, fmt.Sprintf("--%strace", prefix)) + } + + return result +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go new file mode 100644 index 000000000..a6b96d88f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -0,0 +1,619 @@ +/* +Ginkgo is a BDD-style testing framework for Golang + +The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/ + +Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega) + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Ginkgo is MIT-Licensed +*/ +package ginkgo + +import ( + "flag" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/remote" + "github.com/onsi/ginkgo/internal/suite" + "github.com/onsi/ginkgo/internal/testingtproxy" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/reporters/stenographer" + colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" + "github.com/onsi/ginkgo/types" +) + +const GINKGO_VERSION = config.VERSION +const GINKGO_PANIC = ` +Your test failed. +Ginkgo panics to prevent subsequent assertions from running. +Normally Ginkgo rescues this panic so you shouldn't see it. + +But, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. +` +const defaultTimeout = 1 + +var globalSuite *suite.Suite +var globalFailer *failer.Failer + +func init() { + config.Flags(flag.CommandLine, "ginkgo", true) + GinkgoWriter = writer.New(os.Stdout) + globalFailer = failer.New() + globalSuite = suite.New(globalFailer) +} + +//GinkgoWriter implements an io.Writer +//When running in verbose mode any writes to GinkgoWriter will be immediately printed +//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen +//only if the current test fails. +var GinkgoWriter io.Writer + +//The interface by which Ginkgo receives *testing.T +type GinkgoTestingT interface { + Fail() +} + +//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is +//useful for seeding your own pseudorandom number generators (PRNGs) to ensure +//consistent executions from run to run, where your tests contain variability (for +//example, when selecting random test data). +func GinkgoRandomSeed() int64 { + return config.GinkgoConfig.RandomSeed +} + +//GinkgoParallelNode returns the parallel node number for the current ginkgo process +//The node number is 1-indexed +func GinkgoParallelNode() int { + return config.GinkgoConfig.ParallelNode +} + +//Some matcher libraries or legacy codebases require a *testing.T +//GinkgoT implements an interface analogous to *testing.T and can be used if +//the library in question accepts *testing.T through an interface +// +// For example, with testify: +// assert.Equal(GinkgoT(), 123, 123, "they should be equal") +// +// Or with gomock: +// gomock.NewController(GinkgoT()) +// +// GinkgoT() takes an optional offset argument that can be used to get the +// correct line number associated with the failure. +func GinkgoT(optionalOffset ...int) GinkgoTInterface { + offset := 3 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return testingtproxy.New(GinkgoWriter, Fail, offset) +} + +//The interface returned by GinkgoT(). This covers most of the methods +//in the testing package's T. +type GinkgoTInterface interface { + Fail() + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Failed() bool + Parallel() + Skip(args ...interface{}) + Skipf(format string, args ...interface{}) + SkipNow() + Skipped() bool +} + +//Custom Ginkgo test reporters must implement the Reporter interface. +// +//The custom reporter is passed in a SuiteSummary when the suite begins and ends, +//and a SpecSummary just before a spec begins and just after a spec ends +type Reporter reporters.Reporter + +//Asynchronous specs are given a channel of the Done type. You must close or write to the channel +//to tell Ginkgo that your async test is done. +type Done chan<- interface{} + +//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription +// FullTestText: a concatenation of ComponentTexts and the TestText +// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test +// TestText: the text in the actual It or Measure node +// IsMeasurement: true if the current test is a measurement +// FileName: the name of the file containing the current test +// LineNumber: the line number for the current test +// Failed: if the current test has failed, this will be true (useful in an AfterEach) +type GinkgoTestDescription struct { + FullTestText string + ComponentTexts []string + TestText string + + IsMeasurement bool + + FileName string + LineNumber int + + Failed bool + Duration time.Duration +} + +//CurrentGinkgoTestDescripton returns information about the current running test. +func CurrentGinkgoTestDescription() GinkgoTestDescription { + summary, ok := globalSuite.CurrentRunningSpecSummary() + if !ok { + return GinkgoTestDescription{} + } + + subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1] + + return GinkgoTestDescription{ + ComponentTexts: summary.ComponentTexts[1:], + FullTestText: strings.Join(summary.ComponentTexts[1:], " "), + TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1], + IsMeasurement: summary.IsMeasurement, + FileName: subjectCodeLocation.FileName, + LineNumber: subjectCodeLocation.LineNumber, + Failed: summary.HasFailureState(), + Duration: summary.RunTime, + } +} + +//Measurement tests receive a Benchmarker. +// +//You use the Time() function to time how long the passed in body function takes to run +//You use the RecordValue() function to track arbitrary numerical measurements. +//The RecordValueWithPrecision() function can be used alternatively to provide the unit +//and resolution of the numeric measurement. +//The optional info argument is passed to the test reporter and can be used to +// provide the measurement data to a custom reporter with context. +// +//See http://onsi.github.io/ginkgo/#benchmark_tests for more details +type Benchmarker interface { + Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...interface{}) + RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) +} + +//RunSpecs is the entry point for the Ginkgo test runner. +//You must call this within a Golang testing TestX(t *testing.T) function. +// +//To bootstrap a test suite you can use the Ginkgo CLI: +// +// ginkgo bootstrap +func RunSpecs(t GinkgoTestingT, description string) bool { + specReporters := []Reporter{buildDefaultReporter()} + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace +//RunSpecs() with this method. +func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + specReporters = append(specReporters, buildDefaultReporter()) + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace +//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter +func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + writer := GinkgoWriter.(*writer.Writer) + writer.SetStream(config.DefaultReporterConfig.Verbose) + reporters := make([]reporters.Reporter, len(specReporters)) + for i, reporter := range specReporters { + reporters[i] = reporter + } + passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig) + if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Println("PASS | FOCUSED") + os.Exit(types.GINKGO_FOCUS_EXIT_CODE) + } + return passed +} + +func buildDefaultReporter() Reporter { + remoteReportingServer := config.GinkgoConfig.StreamHost + if remoteReportingServer == "" { + stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout()) + return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) + } else { + debugFile := "" + if config.GinkgoConfig.DebugParallel { + debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode) + } + return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile) + } +} + +//Skip notifies Ginkgo that the current spec was skipped. +func Skip(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Skip(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) +func Fail(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Fail(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` +//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that +//calls out to Gomega +// +//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent +//further assertions from running. This panic must be recovered. Ginkgo does this for you +//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...) +// +//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no +//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. +func GinkgoRecover() { + e := recover() + if e != nil { + globalFailer.Panic(codelocation.New(1), e) + } +} + +//Describe blocks allow you to organize your specs. A Describe block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func Describe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FDescribe +func FDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PDescribe +func PDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XDescribe +func XDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//Context blocks allow you to organize your specs. A Context block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func Context(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FContext +func FContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PContext +func PContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XContext +func XContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//When blocks allow you to organize your specs. A When block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func When(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FWhen +func FWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PWhen +func PWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XWhen +func XWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks +//within an It block. +// +//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a +//function that accepts a Done channel. When you do this, you can also provide an optional timeout. +func It(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can focus individual Its using FIt +func FIt(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can mark Its as pending using PIt +func PIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Its as pending using XIt +func XIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//Specify blocks are aliases for It blocks and allow for more natural wording in situations +//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks +//which apply to It blocks. +func Specify(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can focus individual Specifys using FSpecify +func FSpecify(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can mark Specifys as pending using PSpecify +func PSpecify(text string, is ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Specifys as pending using XSpecify +func XSpecify(text string, is ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//By allows you to better document large Its. +// +//Generally you should try to keep your Its short and to the point. This is not always possible, however, +//especially in the context of integration tests that capture a particular workflow. +// +//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...) +//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. +func By(text string, callbacks ...func()) { + preamble := "\x1b[1mSTEP\x1b[0m" + if config.DefaultReporterConfig.NoColor { + preamble = "STEP" + } + fmt.Fprintln(GinkgoWriter, preamble+": "+text) + if len(callbacks) == 1 { + callbacks[0]() + } + if len(callbacks) > 1 { + panic("just one callback per By, please") + } +} + +//Measure blocks run the passed in body function repeatedly (determined by the samples argument) +//and accumulate metrics provided to the Benchmarker by the body function. +// +//The body function must have the signature: +// func(b Benchmarker) +func Measure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) + return true +} + +//You can focus individual Measures using FMeasure +func FMeasure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) + return true +} + +//You can mark Measurements as pending using PMeasure +func PMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Measurements as pending using XMeasure +func XMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each +//parallel node process will call BeforeSuite. +// +//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func BeforeSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed. +//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting. +// +//When running in parallel, each parallel node process will call AfterSuite. +// +//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func AfterSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across +//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that +//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait +//until that node is done before running. +// +//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is +//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1) +//to the second function (on all the other nodes). +// +//The functions have the following signatures. The first function (which only runs on node 1) has the signature: +// +// func() []byte +// +//or, to run asynchronously: +// +// func(done Done) []byte +// +//The byte array returned by the first function is then passed to the second function, which has the signature: +// +// func(data []byte) +// +//or, to run asynchronously: +// +// func(data []byte, done Done) +// +//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes: +// +// var dbClient db.Client +// var dbRunner db.Runner +// +// var _ = SynchronizedBeforeSuite(func() []byte { +// dbRunner = db.NewRunner() +// err := dbRunner.Start() +// Ω(err).ShouldNot(HaveOccurred()) +// return []byte(dbRunner.URL) +// }, func(data []byte) { +// dbClient = db.NewClient() +// err := dbClient.Connect(string(data)) +// Ω(err).ShouldNot(HaveOccurred()) +// }) +func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedBeforeSuiteNode( + node1Body, + allNodesBody, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up +//external singleton resources shared across nodes when running tests in parallel. +// +//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1 +//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until +//all other nodes are finished. +// +//Both functions have the same signature: either func() or func(done Done) to run asynchronously. +// +//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database +//only after all nodes have finished: +// +// var _ = SynchronizedAfterSuite(func() { +// dbClient.Cleanup() +// }, func() { +// dbRunner.Stop() +// }) +func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedAfterSuiteNode( + allNodesBody, + node1Body, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested +//Describe and Context blocks the outermost BeforeEach blocks are run first. +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func BeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details, +//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func JustBeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details, +//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) +// +//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func JustAfterEach(body interface{}, timeout ...float64) bool { + globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested +//Describe and Context blocks the innermost AfterEach blocks are run first. +// +//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func AfterEach(body interface{}, timeout ...float64) bool { + globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +func parseTimeout(timeout ...float64) time.Duration { + if len(timeout) == 0 { + return time.Duration(defaultTimeout * int64(time.Second)) + } else { + return time.Duration(timeout[0] * float64(time.Second)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go new file mode 100644 index 000000000..fa2f0bf73 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go @@ -0,0 +1,32 @@ +package codelocation + +import ( + "regexp" + "runtime" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo/types" +) + +func New(skip int) types.CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + stackTrace := PruneStack(string(debug.Stack()), skip) + return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} +} + +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + if !re.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go new file mode 100644 index 000000000..0737746dc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go @@ -0,0 +1,151 @@ +package containernode + +import ( + "math/rand" + "sort" + + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type subjectOrContainerNode struct { + containerNode *ContainerNode + subjectNode leafnodes.SubjectNode +} + +func (n subjectOrContainerNode) text() string { + if n.containerNode != nil { + return n.containerNode.Text() + } else { + return n.subjectNode.Text() + } +} + +type CollatedNodes struct { + Containers []*ContainerNode + Subject leafnodes.SubjectNode +} + +type ContainerNode struct { + text string + flag types.FlagType + codeLocation types.CodeLocation + + setupNodes []leafnodes.BasicNode + subjectAndContainerNodes []subjectOrContainerNode +} + +func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode { + return &ContainerNode{ + text: text, + flag: flag, + codeLocation: codeLocation, + } +} + +func (container *ContainerNode) Shuffle(r *rand.Rand) { + sort.Sort(container) + permutation := r.Perm(len(container.subjectAndContainerNodes)) + shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes)) + for i, j := range permutation { + shuffledNodes[i] = container.subjectAndContainerNodes[j] + } + container.subjectAndContainerNodes = shuffledNodes +} + +func (node *ContainerNode) BackPropagateProgrammaticFocus() bool { + if node.flag == types.FlagTypePending { + return false + } + + shouldUnfocus := false + for _, subjectOrContainerNode := range node.subjectAndContainerNodes { + if subjectOrContainerNode.containerNode != nil { + shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus + } else { + shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus + } + } + + if shouldUnfocus { + if node.flag == types.FlagTypeFocused { + node.flag = types.FlagTypeNone + } + return true + } + + return node.flag == types.FlagTypeFocused +} + +func (node *ContainerNode) Collate() []CollatedNodes { + return node.collate([]*ContainerNode{}) +} + +func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes { + collated := make([]CollatedNodes, 0) + + containers := make([]*ContainerNode, len(enclosingContainers)) + copy(containers, enclosingContainers) + containers = append(containers, node) + + for _, subjectOrContainer := range node.subjectAndContainerNodes { + if subjectOrContainer.containerNode != nil { + collated = append(collated, subjectOrContainer.containerNode.collate(containers)...) + } else { + collated = append(collated, CollatedNodes{ + Containers: containers, + Subject: subjectOrContainer.subjectNode, + }) + } + } + + return collated +} + +func (node *ContainerNode) PushContainerNode(container *ContainerNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container}) +} + +func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject}) +} + +func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) { + node.setupNodes = append(node.setupNodes, setupNode) +} + +func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode { + nodes := []leafnodes.BasicNode{} + for _, setupNode := range node.setupNodes { + if setupNode.Type() == nodeType { + nodes = append(nodes, setupNode) + } + } + return nodes +} + +func (node *ContainerNode) Text() string { + return node.text +} + +func (node *ContainerNode) CodeLocation() types.CodeLocation { + return node.codeLocation +} + +func (node *ContainerNode) Flag() types.FlagType { + return node.flag +} + +//sort.Interface + +func (node *ContainerNode) Len() int { + return len(node.subjectAndContainerNodes) +} + +func (node *ContainerNode) Less(i, j int) bool { + return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text() +} + +func (node *ContainerNode) Swap(i, j int) { + node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go new file mode 100644 index 000000000..678ea2514 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go @@ -0,0 +1,92 @@ +package failer + +import ( + "fmt" + "sync" + + "github.com/onsi/ginkgo/types" +) + +type Failer struct { + lock *sync.Mutex + failure types.SpecFailure + state types.SpecState +} + +func New() *Failer { + return &Failer{ + lock: &sync.Mutex{}, + state: types.SpecStatePassed, + } +} + +func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStatePanicked + f.failure = types.SpecFailure{ + Message: "Test Panicked", + Location: location, + ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), + } + } +} + +func (f *Failer) Timeout(location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateTimedOut + f.failure = types.SpecFailure{ + Message: "Timed out", + Location: location, + } + } +} + +func (f *Failer) Fail(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateFailed + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) { + f.lock.Lock() + defer f.lock.Unlock() + + failure := f.failure + outcome := f.state + if outcome != types.SpecStatePassed { + failure.ComponentType = componentType + failure.ComponentIndex = componentIndex + failure.ComponentCodeLocation = componentCodeLocation + } + + f.state = types.SpecStatePassed + f.failure = types.SpecFailure{} + + return failure, outcome +} + +func (f *Failer) Skip(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateSkipped + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go new file mode 100644 index 000000000..d6d54234c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go @@ -0,0 +1,103 @@ +package leafnodes + +import ( + "math" + "time" + + "sync" + + "github.com/onsi/ginkgo/types" +) + +type benchmarker struct { + mu sync.Mutex + measurements map[string]*types.SpecMeasurement + orderCounter int +} + +func newBenchmarker() *benchmarker { + return &benchmarker{ + measurements: make(map[string]*types.SpecMeasurement, 0), + } +} + +func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) { + t := time.Now() + body() + elapsedTime = time.Since(t) + + b.mu.Lock() + defer b.mu.Unlock() + measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...) + measurement.Results = append(measurement.Results, elapsedTime.Seconds()) + + return +} + +func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) { + b.mu.Lock() + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...) + defer b.mu.Unlock() + measurement.Results = append(measurement.Results, value) +} + +func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) { + b.mu.Lock() + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...) + defer b.mu.Unlock() + measurement.Results = append(measurement.Results, value) +} + +func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement { + measurement, ok := b.measurements[name] + if !ok { + var computedInfo interface{} + computedInfo = nil + if len(info) > 0 { + computedInfo = info[0] + } + measurement = &types.SpecMeasurement{ + Name: name, + Info: computedInfo, + Order: b.orderCounter, + SmallestLabel: smallestLabel, + LargestLabel: largestLabel, + AverageLabel: averageLabel, + Units: units, + Precision: precision, + Results: make([]float64, 0), + } + b.measurements[name] = measurement + b.orderCounter++ + } + + return measurement +} + +func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement { + b.mu.Lock() + defer b.mu.Unlock() + for _, measurement := range b.measurements { + measurement.Smallest = math.MaxFloat64 + measurement.Largest = -math.MaxFloat64 + sum := float64(0) + sumOfSquares := float64(0) + + for _, result := range measurement.Results { + if result > measurement.Largest { + measurement.Largest = result + } + if result < measurement.Smallest { + measurement.Smallest = result + } + sum += result + sumOfSquares += result * result + } + + n := float64(len(measurement.Results)) + measurement.Average = sum / n + measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n)) + } + + return b.measurements +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go new file mode 100644 index 000000000..8c3902d60 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go @@ -0,0 +1,19 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/types" +) + +type BasicNode interface { + Type() types.SpecComponentType + Run() (types.SpecState, types.SpecFailure) + CodeLocation() types.CodeLocation +} + +type SubjectNode interface { + BasicNode + + Text() string + Flag() types.FlagType + Samples() int +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go new file mode 100644 index 000000000..6eded7b76 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go @@ -0,0 +1,47 @@ +package leafnodes + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type ItNode struct { + runner *runner + + flag types.FlagType + text string +} + +func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode { + return &ItNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex), + flag: flag, + text: text, + } +} + +func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *ItNode) Type() types.SpecComponentType { + return types.SpecComponentTypeIt +} + +func (node *ItNode) Text() string { + return node.text +} + +func (node *ItNode) Flag() types.FlagType { + return node.flag +} + +func (node *ItNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *ItNode) Samples() int { + return 1 +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go new file mode 100644 index 000000000..3ab9a6d55 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go @@ -0,0 +1,62 @@ +package leafnodes + +import ( + "reflect" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type MeasureNode struct { + runner *runner + + text string + flag types.FlagType + samples int + benchmarker *benchmarker +} + +func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode { + benchmarker := newBenchmarker() + + wrappedBody := func() { + reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)}) + } + + return &MeasureNode{ + runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex), + + text: text, + flag: flag, + samples: samples, + benchmarker: benchmarker, + } +} + +func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement { + return node.benchmarker.measurementsReport() +} + +func (node *MeasureNode) Type() types.SpecComponentType { + return types.SpecComponentTypeMeasure +} + +func (node *MeasureNode) Text() string { + return node.text +} + +func (node *MeasureNode) Flag() types.FlagType { + return node.flag +} + +func (node *MeasureNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *MeasureNode) Samples() int { + return node.samples +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go new file mode 100644 index 000000000..16cb66c3e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go @@ -0,0 +1,117 @@ +package leafnodes + +import ( + "fmt" + "reflect" + "time" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type runner struct { + isAsync bool + asyncFunc func(chan<- interface{}) + syncFunc func() + codeLocation types.CodeLocation + timeoutThreshold time.Duration + nodeType types.SpecComponentType + componentIndex int + failer *failer.Failer +} + +func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner { + bodyType := reflect.TypeOf(body) + if bodyType.Kind() != reflect.Func { + panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation)) + } + + runner := &runner{ + codeLocation: codeLocation, + timeoutThreshold: timeout, + failer: failer, + nodeType: nodeType, + componentIndex: componentIndex, + } + + switch bodyType.NumIn() { + case 0: + runner.syncFunc = body.(func()) + return runner + case 1: + if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) { + panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation)) + } + + wrappedBody := func(done chan<- interface{}) { + bodyValue := reflect.ValueOf(body) + bodyValue.Call([]reflect.Value{reflect.ValueOf(done)}) + } + + runner.isAsync = true + runner.asyncFunc = wrappedBody + return runner + } + + panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation)) +} + +func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) { + if r.isAsync { + return r.runAsync() + } else { + return r.runSync() + } +} + +func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) { + done := make(chan interface{}, 1) + + go func() { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + select { + case <-done: + break + default: + close(done) + } + } + }() + + r.asyncFunc(done) + finished = true + }() + + // If this goroutine gets no CPU time before the select block, + // the <-done case may complete even if the test took longer than the timeoutThreshold. + // This can cause flaky behaviour, but we haven't seen it in the wild. + select { + case <-done: + case <-time.After(r.timeoutThreshold): + r.failer.Timeout(r.codeLocation) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + return +} +func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + }() + + r.syncFunc() + finished = true + + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go new file mode 100644 index 000000000..e3e9cb7c5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go @@ -0,0 +1,48 @@ +package leafnodes + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type SetupNode struct { + runner *runner +} + +func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *SetupNode) Type() types.SpecComponentType { + return node.runner.nodeType +} + +func (node *SetupNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex), + } +} + +func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex), + } +} + +func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex), + } +} + +func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go new file mode 100644 index 000000000..80f16ed78 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go @@ -0,0 +1,55 @@ +package leafnodes + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type SuiteNode interface { + Run(parallelNode int, parallelTotal int, syncHost string) bool + Passed() bool + Summary() *types.SetupSummary +} + +type simpleSuiteNode struct { + runner *runner + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + node.outcome, node.failure = node.runner.run() + node.runTime = time.Since(t) + + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runner.nodeType, + CodeLocation: node.runner.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0), + } +} + +func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go new file mode 100644 index 000000000..a721d0cf7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go @@ -0,0 +1,90 @@ +package leafnodes + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type synchronizedAfterSuiteNode struct { + runnerA *runner + runnerB *runner + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &synchronizedAfterSuiteNode{ + runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} + +func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + node.outcome, node.failure = node.runnerA.run() + + if parallelNode == 1 { + if parallelTotal > 1 { + node.waitUntilOtherNodesAreDone(syncHost) + } + + outcome, failure := node.runnerB.run() + + if node.outcome == types.SpecStatePassed { + node.outcome, node.failure = outcome, failure + } + } + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) { + for { + if node.canRun(syncHost) { + return + } + + time.Sleep(50 * time.Millisecond) + } +} + +func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool { + resp, err := http.Get(syncHost + "/RemoteAfterSuiteData") + if err != nil || resp.StatusCode != http.StatusOK { + return false + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false + } + resp.Body.Close() + + afterSuiteData := types.RemoteAfterSuiteData{} + err = json.Unmarshal(body, &afterSuiteData) + if err != nil { + return false + } + + return afterSuiteData.CanRun +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go new file mode 100644 index 000000000..d5c889319 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go @@ -0,0 +1,181 @@ +package leafnodes + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "reflect" + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type synchronizedBeforeSuiteNode struct { + runnerA *runner + runnerB *runner + + data []byte + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + node := &synchronizedBeforeSuiteNode{} + + node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + + return node +} + +func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + defer func() { + node.runTime = time.Since(t) + }() + + if parallelNode == 1 { + node.outcome, node.failure = node.runA(parallelTotal, syncHost) + } else { + node.outcome, node.failure = node.waitForA(syncHost) + } + + if node.outcome != types.SpecStatePassed { + return false + } + node.outcome, node.failure = node.runnerB.run() + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) { + outcome, failure := node.runnerA.run() + + if parallelTotal > 1 { + state := types.RemoteBeforeSuiteStatePassed + if outcome != types.SpecStatePassed { + state = types.RemoteBeforeSuiteStateFailed + } + json := (types.RemoteBeforeSuiteData{ + Data: node.data, + State: state, + }).ToJSON() + http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json)) + } + + return outcome, failure +} + +func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) { + failure := func(message string) types.SpecFailure { + return types.SpecFailure{ + Message: message, + Location: node.runnerA.codeLocation, + ComponentType: node.runnerA.nodeType, + ComponentIndex: node.runnerA.componentIndex, + ComponentCodeLocation: node.runnerA.codeLocation, + } + } + for { + resp, err := http.Get(syncHost + "/BeforeSuiteState") + if err != nil || resp.StatusCode != http.StatusOK { + return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state") + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return types.SpecStateFailed, failure("Failed to read BeforeSuite state") + } + resp.Body.Close() + + beforeSuiteData := types.RemoteBeforeSuiteData{} + err = json.Unmarshal(body, &beforeSuiteData) + if err != nil { + return types.SpecStateFailed, failure("Failed to decode BeforeSuite state") + } + + switch beforeSuiteData.State { + case types.RemoteBeforeSuiteStatePassed: + node.data = beforeSuiteData.Data + return types.SpecStatePassed, types.SpecFailure{} + case types.RemoteBeforeSuiteStateFailed: + return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed") + case types.RemoteBeforeSuiteStateDisappeared: + return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite") + } + + time.Sleep(50 * time.Millisecond) + } +} + +func (node *synchronizedBeforeSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} { + typeA := reflect.TypeOf(bodyA) + if typeA.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its first argument") + } + + takesNothing := typeA.NumIn() == 0 + takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface + returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8 + + if !((takesNothing || takesADoneChannel) && returnsBytes) { + panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.") + } + + if takesADoneChannel { + return func(done chan<- interface{}) { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)}) + node.data = out[0].Interface().([]byte) + } + } + + return func() { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{}) + node.data = out[0].Interface().([]byte) + } +} + +func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} { + typeB := reflect.TypeOf(bodyB) + if typeB.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its second argument") + } + + returnsNothing := typeB.NumOut() == 0 + takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 + takesBytesAndDone := typeB.NumIn() == 2 && + typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 && + typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface + + if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) { + panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)") + } + + if takesBytesAndDone { + return func(done chan<- interface{}) { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)}) + } + } + + return func() { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)}) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go new file mode 100644 index 000000000..6b54afe01 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -0,0 +1,249 @@ +/* + +Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output +coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel: + + ginkgo -nodes=N + +where N is the number of nodes you desire. +*/ +package remote + +import ( + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type configAndSuite struct { + config config.GinkgoConfigType + summary *types.SuiteSummary +} + +type Aggregator struct { + nodeCount int + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + result chan bool + + suiteBeginnings chan configAndSuite + aggregatedSuiteBeginnings []configAndSuite + + beforeSuites chan *types.SetupSummary + aggregatedBeforeSuites []*types.SetupSummary + + afterSuites chan *types.SetupSummary + aggregatedAfterSuites []*types.SetupSummary + + specCompletions chan *types.SpecSummary + completedSpecs []*types.SpecSummary + + suiteEndings chan *types.SuiteSummary + aggregatedSuiteEndings []*types.SuiteSummary + specs []*types.SpecSummary + + startTime time.Time +} + +func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator { + aggregator := &Aggregator{ + nodeCount: nodeCount, + result: result, + config: config, + stenographer: stenographer, + + suiteBeginnings: make(chan configAndSuite, 0), + beforeSuites: make(chan *types.SetupSummary, 0), + afterSuites: make(chan *types.SetupSummary, 0), + specCompletions: make(chan *types.SpecSummary, 0), + suiteEndings: make(chan *types.SuiteSummary, 0), + } + + go aggregator.mux() + + return aggregator +} + +func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + aggregator.suiteBeginnings <- configAndSuite{config, summary} +} + +func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.beforeSuites <- setupSummary +} + +func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.afterSuites <- setupSummary +} + +func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { + //noop +} + +func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { + aggregator.specCompletions <- specSummary +} + +func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { + aggregator.suiteEndings <- summary +} + +func (aggregator *Aggregator) mux() { +loop: + for { + select { + case configAndSuite := <-aggregator.suiteBeginnings: + aggregator.registerSuiteBeginning(configAndSuite) + case setupSummary := <-aggregator.beforeSuites: + aggregator.registerBeforeSuite(setupSummary) + case setupSummary := <-aggregator.afterSuites: + aggregator.registerAfterSuite(setupSummary) + case specSummary := <-aggregator.specCompletions: + aggregator.registerSpecCompletion(specSummary) + case suite := <-aggregator.suiteEndings: + finished, passed := aggregator.registerSuiteEnding(suite) + if finished { + aggregator.result <- passed + break loop + } + } + } +} + +func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) { + aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite) + + if len(aggregator.aggregatedSuiteBeginnings) == 1 { + aggregator.startTime = time.Now() + } + + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) + + totalNumberOfSpecs := 0 + if len(aggregator.aggregatedSuiteBeginnings) > 0 { + totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization + } + + aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct) + aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) { + aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary) + aggregator.specs = append(aggregator.specs, specSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) flushCompletedSpecs() { + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + for _, setupSummary := range aggregator.aggregatedBeforeSuites { + aggregator.announceBeforeSuite(setupSummary) + } + + for _, specSummary := range aggregator.completedSpecs { + aggregator.announceSpec(specSummary) + } + + for _, setupSummary := range aggregator.aggregatedAfterSuites { + aggregator.announceAfterSuite(setupSummary) + } + + aggregator.aggregatedBeforeSuites = []*types.SetupSummary{} + aggregator.completedSpecs = []*types.SpecSummary{} + aggregator.aggregatedAfterSuites = []*types.SetupSummary{} +} + +func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { + if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + aggregator.stenographer.AnnounceSpecWillRun(specSummary) + } + + aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) + + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct) + } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold { + aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct) + } else { + aggregator.stenographer.AnnounceSuccesfulSpec(specSummary) + } + + case types.SpecStatePending: + aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct) + case types.SpecStateSkipped: + aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace) + case types.SpecStateTimedOut: + aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStatePanicked: + aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStateFailed: + aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) { + aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite) + if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { + return false, false + } + + aggregatedSuiteSummary := &types.SuiteSummary{} + aggregatedSuiteSummary.SuiteSucceeded = true + + for _, suiteSummary := range aggregator.aggregatedSuiteEndings { + if suiteSummary.SuiteSucceeded == false { + aggregatedSuiteSummary.SuiteSucceeded = false + } + + aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun + aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs + aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs + aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs + aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs + aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs + aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs + } + + aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) + + aggregator.stenographer.SummarizeFailures(aggregator.specs) + aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) + + return true, aggregatedSuiteSummary.SuiteSucceeded +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go new file mode 100644 index 000000000..284bc62e5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go @@ -0,0 +1,147 @@ +package remote + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/reporters/stenographer" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//An interface to net/http's client to allow the injection of fakes under test +type Poster interface { + Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) +} + +/* +The ForwardingReporter is a Ginkgo reporter that forwards information to +a Ginkgo remote server. + +When streaming parallel test output, this repoter is automatically installed by Ginkgo. + +This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner +detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter +in place of Ginkgo's DefaultReporter. +*/ + +type ForwardingReporter struct { + serverHost string + poster Poster + outputInterceptor OutputInterceptor + debugMode bool + debugFile *os.File + nestedReporter *reporters.DefaultReporter +} + +func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter { + reporter := &ForwardingReporter{ + serverHost: serverHost, + poster: poster, + outputInterceptor: outputInterceptor, + } + + if debugFile != "" { + var err error + reporter.debugMode = true + reporter.debugFile, err = os.Create(debugFile) + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + if !config.Verbose { + //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication. + ginkgoWriter.AndRedirectTo(reporter.debugFile) + } + outputInterceptor.StreamTo(reporter.debugFile) //This is not working + + stenographer := stenographer.New(false, true, reporter.debugFile) + config.Succinct = false + config.Verbose = true + config.FullTrace = true + reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer) + } + + return reporter +} + +func (reporter *ForwardingReporter) post(path string, data interface{}) { + encoded, _ := json.Marshal(data) + buffer := bytes.NewBuffer(encoded) + reporter.poster.Post(reporter.serverHost+path, "application/json", buffer) +} + +func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) { + data := struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + }{ + conf, + summary, + } + + reporter.outputInterceptor.StartInterceptingOutput() + if reporter.debugMode { + reporter.nestedReporter.SpecSuiteWillBegin(conf, summary) + reporter.debugFile.Sync() + } + reporter.post("/SpecSuiteWillBegin", data) +} + +func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.BeforeSuiteDidRun(setupSummary) + reporter.debugFile.Sync() + } + reporter.post("/BeforeSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { + if reporter.debugMode { + reporter.nestedReporter.SpecWillRun(specSummary) + reporter.debugFile.Sync() + } + reporter.post("/SpecWillRun", specSummary) +} + +func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + specSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.SpecDidComplete(specSummary) + reporter.debugFile.Sync() + } + reporter.post("/SpecDidComplete", specSummary) +} + +func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.AfterSuiteDidRun(setupSummary) + reporter.debugFile.Sync() + } + reporter.post("/AfterSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.outputInterceptor.StopInterceptingAndReturnOutput() + if reporter.debugMode { + reporter.nestedReporter.SpecSuiteDidEnd(summary) + reporter.debugFile.Sync() + } + reporter.post("/SpecSuiteDidEnd", summary) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go new file mode 100644 index 000000000..5154abe87 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go @@ -0,0 +1,13 @@ +package remote + +import "os" + +/* +The OutputInterceptor is used by the ForwardingReporter to +intercept and capture all stdin and stderr output during a test run. +*/ +type OutputInterceptor interface { + StartInterceptingOutput() error + StopInterceptingAndReturnOutput() (string, error) + StreamTo(*os.File) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go new file mode 100644 index 000000000..ab6622a29 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -0,0 +1,83 @@ +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package remote + +import ( + "errors" + "io/ioutil" + "os" + + "github.com/hpcloud/tail" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + redirectFile *os.File + streamTarget *os.File + intercepting bool + tailer *tail.Tail + doneTailing chan bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + var err error + + interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output") + if err != nil { + return err + } + + // Call a function in ./syscall_dup_*.go + // If building for everything other than linux_arm64, + // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2) + // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3 + syscallDup(int(interceptor.redirectFile.Fd()), 1) + syscallDup(int(interceptor.redirectFile.Fd()), 2) + + if interceptor.streamTarget != nil { + interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) + interceptor.doneTailing = make(chan bool) + + go func() { + for line := range interceptor.tailer.Lines { + interceptor.streamTarget.Write([]byte(line.Text + "\n")) + } + close(interceptor.doneTailing) + }() + } + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + if !interceptor.intercepting { + return "", errors.New("Not intercepting output!") + } + + interceptor.redirectFile.Close() + output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) + os.Remove(interceptor.redirectFile.Name()) + + interceptor.intercepting = false + + if interceptor.streamTarget != nil { + interceptor.tailer.Stop() + interceptor.tailer.Cleanup() + <-interceptor.doneTailing + interceptor.streamTarget.Sync() + } + + return string(output), err +} + +func (interceptor *outputInterceptor) StreamTo(out *os.File) { + interceptor.streamTarget = out +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go new file mode 100644 index 000000000..40c790336 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go @@ -0,0 +1,36 @@ +// +build windows + +package remote + +import ( + "errors" + "os" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + intercepting bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + // not working on windows... + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + // not working on windows... + interceptor.intercepting = false + + return "", nil +} + +func (interceptor *outputInterceptor) StreamTo(*os.File) {} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go new file mode 100644 index 000000000..367c54daf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go @@ -0,0 +1,224 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package remote + +import ( + "encoding/json" + "io/ioutil" + "net" + "net/http" + "sync" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +/* +Server spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type Server struct { + listener net.Listener + reporters []reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteData types.RemoteBeforeSuiteData + parallelTotal int + counter int +} + +//Create a new server, automatically selecting a port +func NewServer(parallelTotal int) (*Server, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &Server{ + listener: listener, + lock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}, + parallelTotal: parallelTotal, + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *Server) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin) + mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun) + mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun) + mux.HandleFunc("/SpecWillRun", server.specWillRun) + mux.HandleFunc("/SpecDidComplete", server.specDidComplete) + mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd) + + //synchronization endpoints + mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) + mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *Server) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *Server) Address() string { + return "http://" + server.listener.Addr().String() +} + +// +// Streaming Endpoints +// + +//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *Server) readAll(request *http.Request) []byte { + defer request.Body.Close() + body, _ := ioutil.ReadAll(request.Body) + return body +} + +func (server *Server) RegisterReporters(reporters ...reporters.Reporter) { + server.reporters = reporters +} + +func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + + var data struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + } + + json.Unmarshal(body, &data) + + for _, reporter := range server.reporters { + reporter.SpecSuiteWillBegin(data.Config, data.Summary) + } +} + +func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.BeforeSuiteDidRun(setupSummary) + } +} + +func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.AfterSuiteDidRun(setupSummary) + } +} + +func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecWillRun(specSummary) + } +} + +func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecDidComplete(specSummary) + } +} + +func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var suiteSummary *types.SuiteSummary + json.Unmarshal(body, &suiteSummary) + + for _, reporter := range server.reporters { + reporter.SpecSuiteDidEnd(suiteSummary) + } +} + +// +// Synchronization Endpoints +// + +func (server *Server) RegisterAlive(node int, alive func() bool) { + server.lock.Lock() + defer server.lock.Unlock() + server.alives[node-1] = alive +} + +func (server *Server) nodeIsAlive(node int) bool { + server.lock.Lock() + defer server.lock.Unlock() + alive := server.alives[node-1] + if alive == nil { + return true + } + return alive() +} + +func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + if request.Method == "POST" { + dec := json.NewDecoder(request.Body) + dec.Decode(&(server.beforeSuiteData)) + } else { + beforeSuiteData := server.beforeSuiteData + if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) { + beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared + } + enc := json.NewEncoder(writer) + enc.Encode(beforeSuiteData) + } +} + +func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) { + afterSuiteData := types.RemoteAfterSuiteData{ + CanRun: true, + } + for i := 2; i <= server.parallelTotal; i++ { + afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i) + } + + enc := json.NewEncoder(writer) + enc.Encode(afterSuiteData) +} + +func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) { + c := spec_iterator.Counter{} + server.lock.Lock() + c.Index = server.counter + server.counter = server.counter + 1 + server.lock.Unlock() + + json.NewEncoder(writer).Encode(c) +} + +func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) { + writer.Write([]byte("")) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go new file mode 100644 index 000000000..9550d37b3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go @@ -0,0 +1,11 @@ +// +build linux,arm64 + +package remote + +import "syscall" + +// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so +// use the nearly identical syscall.Dup3 instead +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup3(oldfd, newfd, 0) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go new file mode 100644 index 000000000..75ef7fb78 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go @@ -0,0 +1,9 @@ +// +build solaris + +package remote + +import "golang.org/x/sys/unix" + +func syscallDup(oldfd int, newfd int) (err error) { + return unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go new file mode 100644 index 000000000..ef6255960 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go @@ -0,0 +1,11 @@ +// +build !linux !arm64 +// +build !windows +// +build !solaris + +package remote + +import "syscall" + +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go new file mode 100644 index 000000000..7fd68ee8e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go @@ -0,0 +1,247 @@ +package spec + +import ( + "fmt" + "io" + "time" + + "sync" + + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type Spec struct { + subject leafnodes.SubjectNode + focused bool + announceProgress bool + + containers []*containernode.ContainerNode + + state types.SpecState + runTime time.Duration + startTime time.Time + failure types.SpecFailure + previousFailures bool + + stateMutex *sync.Mutex +} + +func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec { + spec := &Spec{ + subject: subject, + containers: containers, + focused: subject.Flag() == types.FlagTypeFocused, + announceProgress: announceProgress, + stateMutex: &sync.Mutex{}, + } + + spec.processFlag(subject.Flag()) + for i := len(containers) - 1; i >= 0; i-- { + spec.processFlag(containers[i].Flag()) + } + + return spec +} + +func (spec *Spec) processFlag(flag types.FlagType) { + if flag == types.FlagTypeFocused { + spec.focused = true + } else if flag == types.FlagTypePending { + spec.setState(types.SpecStatePending) + } +} + +func (spec *Spec) Skip() { + spec.setState(types.SpecStateSkipped) +} + +func (spec *Spec) Failed() bool { + return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut +} + +func (spec *Spec) Passed() bool { + return spec.getState() == types.SpecStatePassed +} + +func (spec *Spec) Flaked() bool { + return spec.getState() == types.SpecStatePassed && spec.previousFailures +} + +func (spec *Spec) Pending() bool { + return spec.getState() == types.SpecStatePending +} + +func (spec *Spec) Skipped() bool { + return spec.getState() == types.SpecStateSkipped +} + +func (spec *Spec) Focused() bool { + return spec.focused +} + +func (spec *Spec) IsMeasurement() bool { + return spec.subject.Type() == types.SpecComponentTypeMeasure +} + +func (spec *Spec) Summary(suiteID string) *types.SpecSummary { + componentTexts := make([]string, len(spec.containers)+1) + componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1) + + for i, container := range spec.containers { + componentTexts[i] = container.Text() + componentCodeLocations[i] = container.CodeLocation() + } + + componentTexts[len(spec.containers)] = spec.subject.Text() + componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation() + + runTime := spec.runTime + if runTime == 0 && !spec.startTime.IsZero() { + runTime = time.Since(spec.startTime) + } + + return &types.SpecSummary{ + IsMeasurement: spec.IsMeasurement(), + NumberOfSamples: spec.subject.Samples(), + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.getState(), + RunTime: runTime, + Failure: spec.failure, + Measurements: spec.measurementsReport(), + SuiteID: suiteID, + } +} + +func (spec *Spec) ConcatenatedString() string { + s := "" + for _, container := range spec.containers { + s += container.Text() + " " + } + + return s + spec.subject.Text() +} + +func (spec *Spec) Run(writer io.Writer) { + if spec.getState() == types.SpecStateFailed { + spec.previousFailures = true + } + + spec.startTime = time.Now() + defer func() { + spec.runTime = time.Since(spec.startTime) + }() + + for sample := 0; sample < spec.subject.Samples(); sample++ { + spec.runSample(sample, writer) + + if spec.getState() != types.SpecStatePassed { + return + } + } +} + +func (spec *Spec) getState() types.SpecState { + spec.stateMutex.Lock() + defer spec.stateMutex.Unlock() + return spec.state +} + +func (spec *Spec) setState(state types.SpecState) { + spec.stateMutex.Lock() + defer spec.stateMutex.Unlock() + spec.state = state +} + +func (spec *Spec) runSample(sample int, writer io.Writer) { + spec.setState(types.SpecStatePassed) + spec.failure = types.SpecFailure{} + innerMostContainerIndexToUnwind := -1 + + defer func() { + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { + container := spec.containers[i] + for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) { + spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach) + justAfterEachState, justAfterEachFailure := justAfterEach.Run() + if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed { + spec.state = justAfterEachState + spec.failure = justAfterEachFailure + } + } + } + + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { + container := spec.containers[i] + for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { + spec.announceSetupNode(writer, "AfterEach", container, afterEach) + afterEachState, afterEachFailure := afterEach.Run() + if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed { + spec.setState(afterEachState) + spec.failure = afterEachFailure + } + } + } + }() + + for i, container := range spec.containers { + innerMostContainerIndexToUnwind = i + for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { + spec.announceSetupNode(writer, "BeforeEach", container, beforeEach) + s, f := beforeEach.Run() + spec.failure = f + spec.setState(s) + if spec.getState() != types.SpecStatePassed { + return + } + } + } + + for _, container := range spec.containers { + for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { + spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach) + s, f := justBeforeEach.Run() + spec.failure = f + spec.setState(s) + if spec.getState() != types.SpecStatePassed { + return + } + } + } + + spec.announceSubject(writer, spec.subject) + s, f := spec.subject.Run() + spec.failure = f + spec.setState(s) +} + +func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) { + if spec.announceProgress { + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) { + if spec.announceProgress { + nodeType := "" + switch subject.Type() { + case types.SpecComponentTypeIt: + nodeType = "It" + case types.SpecComponentTypeMeasure: + nodeType = "Measure" + } + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement { + if !spec.IsMeasurement() || spec.Failed() { + return map[string]*types.SpecMeasurement{} + } + + return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport() +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go new file mode 100644 index 000000000..27c0d1d6c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -0,0 +1,144 @@ +package spec + +import ( + "math/rand" + "regexp" + "sort" +) + +type Specs struct { + specs []*Spec + names []string + + hasProgrammaticFocus bool + RegexScansFilePath bool +} + +func NewSpecs(specs []*Spec) *Specs { + names := make([]string, len(specs)) + for i, spec := range specs { + names[i] = spec.ConcatenatedString() + } + return &Specs{ + specs: specs, + names: names, + } +} + +func (e *Specs) Specs() []*Spec { + return e.specs +} + +func (e *Specs) HasProgrammaticFocus() bool { + return e.hasProgrammaticFocus +} + +func (e *Specs) Shuffle(r *rand.Rand) { + sort.Sort(e) + permutation := r.Perm(len(e.specs)) + shuffledSpecs := make([]*Spec, len(e.specs)) + names := make([]string, len(e.specs)) + for i, j := range permutation { + shuffledSpecs[i] = e.specs[j] + names[i] = e.names[j] + } + e.specs = shuffledSpecs + e.names = names +} + +func (e *Specs) ApplyFocus(description string, focusString string, skipString string) { + if focusString == "" && skipString == "" { + e.applyProgrammaticFocus() + } else { + e.applyRegExpFocusAndSkip(description, focusString, skipString) + } +} + +func (e *Specs) applyProgrammaticFocus() { + e.hasProgrammaticFocus = false + for _, spec := range e.specs { + if spec.Focused() && !spec.Pending() { + e.hasProgrammaticFocus = true + break + } + } + + if e.hasProgrammaticFocus { + for _, spec := range e.specs { + if !spec.Focused() { + spec.Skip() + } + } + } +} + +// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function, +// this is the place which we append to. +func (e *Specs) toMatch(description string, i int) []byte { + if i > len(e.names) { + return nil + } + if e.RegexScansFilePath { + return []byte( + description + " " + + e.names[i] + " " + + e.specs[i].subject.CodeLocation().FileName) + } else { + return []byte( + description + " " + + e.names[i]) + } +} + +func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) { + var focusFilter *regexp.Regexp + if focusString != "" { + focusFilter = regexp.MustCompile(focusString) + } + var skipFilter *regexp.Regexp + if skipString != "" { + skipFilter = regexp.MustCompile(skipString) + } + + for i, spec := range e.specs { + matchesFocus := true + matchesSkip := false + + toMatch := e.toMatch(description, i) + + if focusFilter != nil { + matchesFocus = focusFilter.Match([]byte(toMatch)) + } + + if skipFilter != nil { + matchesSkip = skipFilter.Match([]byte(toMatch)) + } + + if !matchesFocus || matchesSkip { + spec.Skip() + } + } +} + +func (e *Specs) SkipMeasurements() { + for _, spec := range e.specs { + if spec.IsMeasurement() { + spec.Skip() + } + } +} + +//sort.Interface + +func (e *Specs) Len() int { + return len(e.specs) +} + +func (e *Specs) Less(i, j int) bool { + return e.names[i] < e.names[j] +} + +func (e *Specs) Swap(i, j int) { + e.names[i], e.names[j] = e.names[j], e.names[i] + e.specs[i], e.specs[j] = e.specs[j], e.specs[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go new file mode 100644 index 000000000..82272554a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go @@ -0,0 +1,55 @@ +package spec_iterator + +func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { + if length == 0 { + return 0, 0 + } + + // We have more nodes than tests. Trivial case. + if parallelTotal >= length { + if parallelNode > length { + return 0, 0 + } else { + return parallelNode - 1, 1 + } + } + + // This is the minimum amount of tests that a node will be required to run + minTestsPerNode := length / parallelTotal + + // This is the maximum amount of tests that a node will be required to run + // The algorithm guarantees that this would be equal to at least the minimum amount + // and at most one more + maxTestsPerNode := minTestsPerNode + if length%parallelTotal != 0 { + maxTestsPerNode++ + } + + // Number of nodes that will have to run the maximum amount of tests per node + numMaxLoadNodes := length % parallelTotal + + // Number of nodes that precede the current node and will have to run the maximum amount of tests per node + var numPrecedingMaxLoadNodes int + if parallelNode > numMaxLoadNodes { + numPrecedingMaxLoadNodes = numMaxLoadNodes + } else { + numPrecedingMaxLoadNodes = parallelNode - 1 + } + + // Number of nodes that precede the current node and will have to run the minimum amount of tests per node + var numPrecedingMinLoadNodes int + if parallelNode <= numMaxLoadNodes { + numPrecedingMinLoadNodes = 0 + } else { + numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1 + } + + // Evaluate the test start index and number of tests to run + startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode + if parallelNode > numMaxLoadNodes { + count = minTestsPerNode + } else { + count = maxTestsPerNode + } + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go new file mode 100644 index 000000000..99f548bca --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go @@ -0,0 +1,59 @@ +package spec_iterator + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/onsi/ginkgo/internal/spec" +) + +type ParallelIterator struct { + specs []*spec.Spec + host string + client *http.Client +} + +func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator { + return &ParallelIterator{ + specs: specs, + host: host, + client: &http.Client{}, + } +} + +func (s *ParallelIterator) Next() (*spec.Spec, error) { + resp, err := s.client.Get(s.host + "/counter") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode) + } + + var counter Counter + err = json.NewDecoder(resp.Body).Decode(&counter) + if err != nil { + return nil, err + } + + if counter.Index >= len(s.specs) { + return nil, ErrClosed + } + + return s.specs[counter.Index], nil +} + +func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return -1, false +} + +func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + return -1, false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go new file mode 100644 index 000000000..a51c93b8b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go @@ -0,0 +1,45 @@ +package spec_iterator + +import ( + "github.com/onsi/ginkgo/internal/spec" +) + +type SerialIterator struct { + specs []*spec.Spec + index int +} + +func NewSerialIterator(specs []*spec.Spec) *SerialIterator { + return &SerialIterator{ + specs: specs, + index: 0, + } +} + +func (s *SerialIterator) Next() (*spec.Spec, error) { + if s.index >= len(s.specs) { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *SerialIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return len(s.specs), true +} + +func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for _, s := range s.specs { + if !s.Skipped() && !s.Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go new file mode 100644 index 000000000..ad4a3ea3c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go @@ -0,0 +1,47 @@ +package spec_iterator + +import "github.com/onsi/ginkgo/internal/spec" + +type ShardedParallelIterator struct { + specs []*spec.Spec + index int + maxIndex int +} + +func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator { + startIndex, count := ParallelizedIndexRange(len(specs), total, node) + + return &ShardedParallelIterator{ + specs: specs, + index: startIndex, + maxIndex: startIndex + count, + } +} + +func (s *ShardedParallelIterator) Next() (*spec.Spec, error) { + if s.index >= s.maxIndex { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return s.maxIndex - s.index, true +} + +func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for i := s.index; i < s.maxIndex; i += 1 { + if !s.specs[i].Skipped() && !s.specs[i].Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go new file mode 100644 index 000000000..74bffad64 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go @@ -0,0 +1,20 @@ +package spec_iterator + +import ( + "errors" + + "github.com/onsi/ginkgo/internal/spec" +) + +var ErrClosed = errors.New("no more specs to run") + +type SpecIterator interface { + Next() (*spec.Spec, error) + NumberOfSpecsPriorToIteration() int + NumberOfSpecsToProcessIfKnown() (int, bool) + NumberOfSpecsThatWillBeRunIfKnown() (int, bool) +} + +type Counter struct { + Index int `json:"index"` +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go new file mode 100644 index 000000000..a0b8b62d5 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go @@ -0,0 +1,15 @@ +package specrunner + +import ( + "crypto/rand" + "fmt" +) + +func randomID() string { + b := make([]byte, 8) + _, err := rand.Read(b) + if err != nil { + return "" + } + return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8]) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go new file mode 100644 index 000000000..2c683cb8b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go @@ -0,0 +1,411 @@ +package specrunner + +import ( + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + Writer "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + + "time" +) + +type SpecRunner struct { + description string + beforeSuiteNode leafnodes.SuiteNode + iterator spec_iterator.SpecIterator + afterSuiteNode leafnodes.SuiteNode + reporters []reporters.Reporter + startTime time.Time + suiteID string + runningSpec *spec.Spec + writer Writer.WriterInterface + config config.GinkgoConfigType + interrupted bool + processedSpecs []*spec.Spec + lock *sync.Mutex +} + +func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { + return &SpecRunner{ + description: description, + beforeSuiteNode: beforeSuiteNode, + iterator: iterator, + afterSuiteNode: afterSuiteNode, + reporters: reporters, + writer: writer, + config: config, + suiteID: randomID(), + lock: &sync.Mutex{}, + } +} + +func (runner *SpecRunner) Run() bool { + if runner.config.DryRun { + runner.performDryRun() + return true + } + + runner.reportSuiteWillBegin() + signalRegistered := make(chan struct{}) + go runner.registerForInterrupts(signalRegistered) + <-signalRegistered + + suitePassed := runner.runBeforeSuite() + + if suitePassed { + suitePassed = runner.runSpecs() + } + + runner.blockForeverIfInterrupted() + + suitePassed = runner.runAfterSuite() && suitePassed + + runner.reportSuiteDidEnd(suitePassed) + + return suitePassed +} + +func (runner *SpecRunner) performDryRun() { + runner.reportSuiteWillBegin() + + if runner.beforeSuiteNode != nil { + summary := runner.beforeSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportBeforeSuite(summary) + } + + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + + summary := spec.Summary(runner.suiteID) + runner.reportSpecWillRun(summary) + if summary.State == types.SpecStateInvalid { + summary.State = types.SpecStatePassed + } + runner.reportSpecDidComplete(summary, false) + } + + if runner.afterSuiteNode != nil { + summary := runner.afterSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportAfterSuite(summary) + } + + runner.reportSuiteDidEnd(true) +} + +func (runner *SpecRunner) runBeforeSuite() bool { + if runner.beforeSuiteNode == nil || runner.wasInterrupted() { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportBeforeSuite(runner.beforeSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runAfterSuite() bool { + if runner.afterSuiteNode == nil { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportAfterSuite(runner.afterSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runSpecs() bool { + suiteFailed := false + skipRemainingSpecs := false + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + suiteFailed = true + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + + if runner.wasInterrupted() { + break + } + if skipRemainingSpecs { + spec.Skip() + } + + if !spec.Skipped() && !spec.Pending() { + if passed := runner.runSpec(spec); !passed { + suiteFailed = true + } + } else if spec.Pending() && runner.config.FailOnPending { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + suiteFailed = true + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + } else { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + } + + if spec.Failed() && runner.config.FailFast { + skipRemainingSpecs = true + } + } + + return !suiteFailed +} + +func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) { + maxAttempts := 1 + if runner.config.FlakeAttempts > 0 { + // uninitialized configs count as 1 + maxAttempts = runner.config.FlakeAttempts + } + + for i := 0; i < maxAttempts; i++ { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + runner.runningSpec = spec + spec.Run(runner.writer) + runner.runningSpec = nil + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + if !spec.Failed() { + return true + } + } + return false +} + +func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) { + if runner.runningSpec == nil { + return nil, false + } + + return runner.runningSpec.Summary(runner.suiteID), true +} + +func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + close(signalRegistered) + + <-c + signal.Stop(c) + runner.markInterrupted() + go runner.registerForHardInterrupts() + runner.writer.DumpOutWithHeader(` +Received interrupt. Emitting contents of GinkgoWriter... +--------------------------------------------------------- +`) + if runner.afterSuiteNode != nil { + fmt.Fprint(os.Stderr, ` +--------------------------------------------------------- +Received interrupt. Running AfterSuite... +^C again to terminate immediately +`) + runner.runAfterSuite() + } + runner.reportSuiteDidEnd(false) + os.Exit(1) +} + +func (runner *SpecRunner) registerForHardInterrupts() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + + <-c + fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.") + os.Exit(1) +} + +func (runner *SpecRunner) blockForeverIfInterrupted() { + runner.lock.Lock() + interrupted := runner.interrupted + runner.lock.Unlock() + + if interrupted { + select {} + } +} + +func (runner *SpecRunner) markInterrupted() { + runner.lock.Lock() + defer runner.lock.Unlock() + runner.interrupted = true +} + +func (runner *SpecRunner) wasInterrupted() bool { + runner.lock.Lock() + defer runner.lock.Unlock() + return runner.interrupted +} + +func (runner *SpecRunner) reportSuiteWillBegin() { + runner.startTime = time.Now() + summary := runner.suiteWillBeginSummary() + for _, reporter := range runner.reporters { + reporter.SpecSuiteWillBegin(runner.config, summary) + } +} + +func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.BeforeSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.AfterSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { + runner.writer.Truncate() + + for _, reporter := range runner.reporters { + reporter.SpecWillRun(summary) + } +} + +func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { + if failed && len(summary.CapturedOutput) == 0 { + summary.CapturedOutput = string(runner.writer.Bytes()) + } + for i := len(runner.reporters) - 1; i >= 1; i-- { + runner.reporters[i].SpecDidComplete(summary) + } + + if failed { + runner.writer.DumpOut() + } + + runner.reporters[0].SpecDidComplete(summary) +} + +func (runner *SpecRunner) reportSuiteDidEnd(success bool) { + summary := runner.suiteDidEndSummary(success) + summary.RunTime = time.Since(runner.startTime) + for _, reporter := range runner.reporters { + reporter.SpecSuiteDidEnd(summary) + } +} + +func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) { + count = 0 + + for _, spec := range runner.processedSpecs { + if filter(spec) { + count++ + } + } + + return count +} + +func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary { + numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return !ex.Skipped() && !ex.Pending() + }) + + numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Pending() + }) + + numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Skipped() + }) + + numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Passed() + }) + + numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Flaked() + }) + + numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Failed() + }) + + if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun { + var known bool + numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration() + } + numberOfFailedSpecs = numberOfSpecsThatWillBeRun + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteSucceeded: success, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: len(runner.processedSpecs), + NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, + NumberOfPendingSpecs: numberOfPendingSpecs, + NumberOfSkippedSpecs: numberOfSkippedSpecs, + NumberOfPassedSpecs: numberOfPassedSpecs, + NumberOfFailedSpecs: numberOfFailedSpecs, + NumberOfFlakedSpecs: numberOfFlakedSpecs, + } +} + +func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary { + numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown() + if !known { + numTotal = -1 + } + + numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numToRun = -1 + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: numTotal, + NumberOfSpecsThatWillBeRun: numToRun, + NumberOfPendingSpecs: -1, + NumberOfSkippedSpecs: -1, + NumberOfPassedSpecs: -1, + NumberOfFailedSpecs: -1, + NumberOfFlakedSpecs: -1, + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go new file mode 100644 index 000000000..3104bbc88 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -0,0 +1,190 @@ +package suite + +import ( + "math/rand" + "net/http" + "time" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/internal/specrunner" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +type ginkgoTestingT interface { + Fail() +} + +type Suite struct { + topLevelContainer *containernode.ContainerNode + currentContainer *containernode.ContainerNode + containerIndex int + beforeSuiteNode leafnodes.SuiteNode + afterSuiteNode leafnodes.SuiteNode + runner *specrunner.SpecRunner + failer *failer.Failer + running bool +} + +func New(failer *failer.Failer) *Suite { + topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{}) + + return &Suite{ + topLevelContainer: topLevelContainer, + currentContainer: topLevelContainer, + failer: failer, + containerIndex: 1, + } +} + +func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) { + if config.ParallelTotal < 1 { + panic("ginkgo.parallel.total must be >= 1") + } + + if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 { + panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total") + } + + r := rand.New(rand.NewSource(config.RandomSeed)) + suite.topLevelContainer.Shuffle(r) + iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config) + suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config) + + suite.running = true + success := suite.runner.Run() + if !success { + t.Fail() + } + return success, hasProgrammaticFocus +} + +func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) { + specsSlice := []*spec.Spec{} + suite.topLevelContainer.BackPropagateProgrammaticFocus() + for _, collatedNodes := range suite.topLevelContainer.Collate() { + specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress)) + } + + specs := spec.NewSpecs(specsSlice) + specs.RegexScansFilePath = config.RegexScansFilePath + + if config.RandomizeAllSpecs { + specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) + } + + specs.ApplyFocus(description, config.FocusString, config.SkipString) + + if config.SkipMeasurements { + specs.SkipMeasurements() + } + + var iterator spec_iterator.SpecIterator + + if config.ParallelTotal > 1 { + iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost) + resp, err := http.Get(config.SyncHost + "/has-counter") + if err != nil || resp.StatusCode != http.StatusOK { + iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode) + } + } else { + iterator = spec_iterator.NewSerialIterator(specs.Specs()) + } + + return iterator, specs.HasProgrammaticFocus() +} + +func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { + return suite.runner.CurrentSpecSummary() +} + +func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) { + container := containernode.New(text, flag, codeLocation) + suite.currentContainer.PushContainerNode(container) + + previousContainer := suite.currentContainer + suite.currentContainer = container + suite.containerIndex++ + + body() + + suite.containerIndex-- + suite.currentContainer = previousContainer +} + +func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) { + if suite.running { + suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go new file mode 100644 index 000000000..090445d08 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go @@ -0,0 +1,76 @@ +package testingtproxy + +import ( + "fmt" + "io" +) + +type failFunc func(message string, callerSkip ...int) + +func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy { + return &ginkgoTestingTProxy{ + fail: fail, + offset: offset, + writer: writer, + } +} + +type ginkgoTestingTProxy struct { + fail failFunc + offset int + writer io.Writer +} + +func (t *ginkgoTestingTProxy) Error(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fail() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) FailNow() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Log(args ...interface{}) { + fmt.Fprintln(t.writer, args...) +} + +func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { + t.Log(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) Failed() bool { + return false +} + +func (t *ginkgoTestingTProxy) Parallel() { +} + +func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { + fmt.Println(args...) +} + +func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { + t.Skip(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) SkipNow() { +} + +func (t *ginkgoTestingTProxy) Skipped() bool { + return false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go new file mode 100644 index 000000000..6739c3f60 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go @@ -0,0 +1,36 @@ +package writer + +type FakeGinkgoWriter struct { + EventStream []string +} + +func NewFake() *FakeGinkgoWriter { + return &FakeGinkgoWriter{ + EventStream: []string{}, + } +} + +func (writer *FakeGinkgoWriter) AddEvent(event string) { + writer.EventStream = append(writer.EventStream, event) +} + +func (writer *FakeGinkgoWriter) Truncate() { + writer.EventStream = append(writer.EventStream, "TRUNCATE") +} + +func (writer *FakeGinkgoWriter) DumpOut() { + writer.EventStream = append(writer.EventStream, "DUMP") +} + +func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) { + writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header) +} + +func (writer *FakeGinkgoWriter) Bytes() []byte { + writer.EventStream = append(writer.EventStream, "BYTES") + return nil +} + +func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) { + return 0, nil +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go new file mode 100644 index 000000000..98eca3bdd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go @@ -0,0 +1,89 @@ +package writer + +import ( + "bytes" + "io" + "sync" +) + +type WriterInterface interface { + io.Writer + + Truncate() + DumpOut() + DumpOutWithHeader(header string) + Bytes() []byte +} + +type Writer struct { + buffer *bytes.Buffer + outWriter io.Writer + lock *sync.Mutex + stream bool + redirector io.Writer +} + +func New(outWriter io.Writer) *Writer { + return &Writer{ + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + stream: true, + } +} + +func (w *Writer) AndRedirectTo(writer io.Writer) { + w.redirector = writer +} + +func (w *Writer) SetStream(stream bool) { + w.lock.Lock() + defer w.lock.Unlock() + w.stream = stream +} + +func (w *Writer) Write(b []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + n, err = w.buffer.Write(b) + if w.redirector != nil { + w.redirector.Write(b) + } + if w.stream { + return w.outWriter.Write(b) + } + return n, err +} + +func (w *Writer) Truncate() { + w.lock.Lock() + defer w.lock.Unlock() + w.buffer.Reset() +} + +func (w *Writer) DumpOut() { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream { + w.buffer.WriteTo(w.outWriter) + } +} + +func (w *Writer) Bytes() []byte { + w.lock.Lock() + defer w.lock.Unlock() + b := w.buffer.Bytes() + copied := make([]byte, len(b)) + copy(copied, b) + return copied +} + +func (w *Writer) DumpOutWithHeader(header string) { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream && w.buffer.Len() > 0 { + w.outWriter.Write([]byte(header)) + w.buffer.WriteTo(w.outWriter) + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go new file mode 100644 index 000000000..ac58dd5f7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -0,0 +1,84 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type DefaultReporter struct { + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + specSummaries []*types.SpecSummary +} + +func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter { + return &DefaultReporter{ + config: config, + stenographer: stenographer, + } +} + +func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) + if config.ParallelTotal > 1 { + reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) + } +} + +func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) { + if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + reporter.stenographer.AnnounceSpecWillRun(specSummary) + } +} + +func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) { + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct) + } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold { + reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceSuccesfulSpec(specSummary) + } + case types.SpecStatePending: + reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) + case types.SpecStateSkipped: + reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace) + case types.SpecStateTimedOut: + reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStatePanicked: + reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStateFailed: + reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + } + + reporter.specSummaries = append(reporter.specSummaries, specSummary) +} + +func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.stenographer.SummarizeFailures(reporter.specSummaries) + reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go new file mode 100644 index 000000000..27db47949 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go @@ -0,0 +1,59 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//FakeReporter is useful for testing purposes +type FakeReporter struct { + Config config.GinkgoConfigType + + BeginSummary *types.SuiteSummary + BeforeSuiteSummary *types.SetupSummary + SpecWillRunSummaries []*types.SpecSummary + SpecSummaries []*types.SpecSummary + AfterSuiteSummary *types.SetupSummary + EndSummary *types.SuiteSummary + + SpecWillRunStub func(specSummary *types.SpecSummary) + SpecDidCompleteStub func(specSummary *types.SpecSummary) +} + +func NewFakeReporter() *FakeReporter { + return &FakeReporter{ + SpecWillRunSummaries: make([]*types.SpecSummary, 0), + SpecSummaries: make([]*types.SpecSummary, 0), + } +} + +func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + fakeR.Config = config + fakeR.BeginSummary = summary +} + +func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.BeforeSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) { + if fakeR.SpecWillRunStub != nil { + fakeR.SpecWillRunStub(specSummary) + } + fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary) +} + +func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) { + if fakeR.SpecDidCompleteStub != nil { + fakeR.SpecDidCompleteStub(specSummary) + } + fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary) +} + +func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.AfterSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fakeR.EndSummary = summary +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go new file mode 100644 index 000000000..2c9f3c792 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -0,0 +1,152 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "math" + "os" + "strings" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + TestCases []JUnitTestCase `xml:"testcase"` + Name string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Errors int `xml:"errors,attr"` + Time float64 `xml:"time,attr"` +} + +type JUnitTestCase struct { + Name string `xml:"name,attr"` + ClassName string `xml:"classname,attr"` + FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + Time float64 `xml:"time,attr"` + SystemOut string `xml:"system-out,omitempty"` +} + +type JUnitFailureMessage struct { + Type string `xml:"type,attr"` + Message string `xml:",chardata"` +} + +type JUnitSkipped struct { + XMLName xml.Name `xml:"skipped"` +} + +type JUnitReporter struct { + suite JUnitTestSuite + filename string + testSuiteName string +} + +//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. +func NewJUnitReporter(filename string) *JUnitReporter { + return &JUnitReporter{ + filename: filename, + } +} + +func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.suite = JUnitTestSuite{ + Name: summary.SuiteDescription, + TestCases: []JUnitTestCase{}, + } + reporter.testSuiteName = summary.SuiteDescription +} + +func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { +} + +func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func failureMessage(failure types.SpecFailure) string { + return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String()) +} + +func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testCase := JUnitTestCase{ + Name: name, + ClassName: reporter.testSuiteName, + } + + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(setupSummary.State), + Message: failureMessage(setupSummary.Failure), + } + testCase.SystemOut = setupSummary.CapturedOutput + testCase.Time = setupSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) + } +} + +func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testCase := JUnitTestCase{ + Name: strings.Join(specSummary.ComponentTexts[1:], " "), + ClassName: reporter.testSuiteName, + } + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(specSummary.State), + Message: failureMessage(specSummary.Failure), + } + testCase.SystemOut = specSummary.CapturedOutput + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + testCase.Skipped = &JUnitSkipped{} + } + testCase.Time = specSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) +} + +func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun + reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000 + reporter.suite.Failures = summary.NumberOfFailedSpecs + reporter.suite.Errors = 0 + file, err := os.Create(reporter.filename) + if err != nil { + fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error()) + } + defer file.Close() + file.WriteString(xml.Header) + encoder := xml.NewEncoder(file) + encoder.Indent(" ", " ") + err = encoder.Encode(reporter.suite) + if err != nil { + fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error()) + } +} + +func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string { + switch state { + case types.SpecStateFailed: + return "Failure" + case types.SpecStateTimedOut: + return "Timeout" + case types.SpecStatePanicked: + return "Panic" + default: + return "" + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go new file mode 100644 index 000000000..348b9dfce --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/reporter.go @@ -0,0 +1,15 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type Reporter interface { + SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SpecSuiteDidEnd(summary *types.SuiteSummary) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go new file mode 100644 index 000000000..45b8f8869 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go @@ -0,0 +1,64 @@ +package stenographer + +import ( + "fmt" + "strings" +) + +func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string { + var out string + + if len(args) > 0 { + out = fmt.Sprintf(format, args...) + } else { + out = format + } + + if s.color { + return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle) + } else { + return out + } +} + +func (s *consoleStenographer) printBanner(text string, bannerCharacter string) { + fmt.Fprintln(s.w, text) + fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text))) +} + +func (s *consoleStenographer) printNewLine() { + fmt.Fprintln(s.w, "") +} + +func (s *consoleStenographer) printDelimiter() { + fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30))) +} + +func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) { + fmt.Fprint(s.w, s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) { + fmt.Fprintln(s.w, s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string { + var text string + + if len(args) > 0 { + text = fmt.Sprintf(format, args...) + } else { + text = format + } + + stringArray := strings.Split(text, "\n") + padding := "" + if indentation >= 0 { + padding = strings.Repeat(" ", indentation) + } + for i, s := range stringArray { + stringArray[i] = fmt.Sprintf("%s%s", padding, s) + } + + return strings.Join(stringArray, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go new file mode 100644 index 000000000..98854e7d9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go @@ -0,0 +1,142 @@ +package stenographer + +import ( + "sync" + + "github.com/onsi/ginkgo/types" +) + +func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall { + return FakeStenographerCall{ + Method: method, + Args: args, + } +} + +type FakeStenographer struct { + calls []FakeStenographerCall + lock *sync.Mutex +} + +type FakeStenographerCall struct { + Method string + Args []interface{} +} + +func NewFakeStenographer() *FakeStenographer { + stenographer := &FakeStenographer{ + lock: &sync.Mutex{}, + } + stenographer.Reset() + return stenographer +} + +func (stenographer *FakeStenographer) Calls() []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + return stenographer.calls +} + +func (stenographer *FakeStenographer) Reset() { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = make([]FakeStenographerCall, 0) +} + +func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + results := make([]FakeStenographerCall, 0) + for _, call := range stenographer.calls { + if call.Method == method { + results = append(results, call) + } + } + + return results +} + +func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...)) +} + +func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct) +} + +func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) +} + +func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { + stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct) +} + +func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) +} + +func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSpecWillRun", spec) +} + +func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace) +} +func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) { + stenographer.registerCall("AnnounceCapturedOutput", output) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSuccesfulSpec", spec) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + stenographer.registerCall("AnnouncePendingSpec", spec, noisy) +} + +func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + stenographer.registerCall("SummarizeFailures", summaries) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go new file mode 100644 index 000000000..601c74d66 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go @@ -0,0 +1,572 @@ +/* +The stenographer is used by Ginkgo's reporters to generate output. + +Move along, nothing to see here. +*/ + +package stenographer + +import ( + "fmt" + "io" + "runtime" + "strings" + + "github.com/onsi/ginkgo/types" +) + +const defaultStyle = "\x1b[0m" +const boldStyle = "\x1b[1m" +const redColor = "\x1b[91m" +const greenColor = "\x1b[32m" +const yellowColor = "\x1b[33m" +const cyanColor = "\x1b[36m" +const grayColor = "\x1b[90m" +const lightGrayColor = "\x1b[37m" + +type cursorStateType int + +const ( + cursorStateTop cursorStateType = iota + cursorStateStreaming + cursorStateMidBlock + cursorStateEndBlock +) + +type Stenographer interface { + AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) + AnnounceAggregatedParallelRun(nodes int, succinct bool) + AnnounceParallelRun(node int, nodes int, succinct bool) + AnnounceTotalNumberOfSpecs(total int, succinct bool) + AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) + AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) + + AnnounceSpecWillRun(spec *types.SpecSummary) + AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + + AnnounceCapturedOutput(output string) + + AnnounceSuccesfulSpec(spec *types.SpecSummary) + AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) + AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) + + AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) + AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) + + AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) + + SummarizeFailures(summaries []*types.SpecSummary) +} + +func New(color bool, enableFlakes bool, writer io.Writer) Stenographer { + denoter := "•" + if runtime.GOOS == "windows" { + denoter = "+" + } + return &consoleStenographer{ + color: color, + denoter: denoter, + cursorState: cursorStateTop, + enableFlakes: enableFlakes, + w: writer, + } +} + +type consoleStenographer struct { + color bool + denoter string + cursorState cursorStateType + enableFlakes bool + w io.Writer +} + +var alternatingColors = []string{defaultStyle, grayColor} + +func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + if succinct { + s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description)) + return + } + s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=") + s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed)) + if randomizingAll { + s.print(0, " - Will randomize all specs") + } + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { + if succinct { + s.print(0, "- node #%d ", node) + return + } + s.println(0, + "Parallel test node %s/%s.", + s.colorize(boldStyle, "%d", node), + s.colorize(boldStyle, "%d", nodes), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + if succinct { + s.print(0, "- %d nodes ", nodes) + return + } + s.println(0, + "Running in parallel across %s nodes", + s.colorize(boldStyle, "%d", nodes), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + if succinct { + s.print(0, "- %d/%d specs ", specsToRun, total) + s.stream() + return + } + s.println(0, + "Will run %s of %s specs", + s.colorize(boldStyle, "%d", specsToRun), + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + if succinct { + s.print(0, "- %d specs ", total) + s.stream() + return + } + s.println(0, + "Will run %s specs", + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + if succinct && summary.SuiteSucceeded { + s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) + return + } + s.printNewLine() + color := greenColor + if !summary.SuiteSucceeded { + color = redColor + } + s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds())) + + status := "" + if summary.SuiteSucceeded { + status = s.colorize(boldStyle+greenColor, "SUCCESS!") + } else { + status = s.colorize(boldStyle+redColor, "FAIL!") + } + + flakes := "" + if s.enableFlakes { + flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs) + } + + s.print(0, + "%s -- %s | %s | %s | %s\n", + status, + s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs), + s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes, + s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs), + s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs), + ) +} + +func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + s.startBlock() + for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] { + s.print(0, s.colorize(alternatingColors[i%2], text)+" ") + } + + indentation := 0 + if len(spec.ComponentTexts) > 2 { + indentation = 1 + s.printNewLine() + } + index := len(spec.ComponentTexts) - 1 + s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index])) + s.printNewLine() + s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String())) + s.printNewLine() + s.midBlock() +} + +func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.startBlock() + var message string + switch summary.State { + case types.SpecStateFailed: + message = "Failure" + case types.SpecStatePanicked: + message = "Panic" + case types.SpecStateTimedOut: + message = "Timeout" + } + + s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true) + + s.printNewLine() + s.printFailure(indentation, summary.State, summary.Failure, fullTrace) + + s.endBlock() +} + +func (s *consoleStenographer) AnnounceCapturedOutput(output string) { + if output == "" { + return + } + + s.startBlock() + s.println(0, output) + s.midBlock() +} + +func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + s.print(0, s.colorize(greenColor, s.denoter)) + s.stream() +} + +func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()), + "", + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter), + s.measurementReport(spec, succinct), + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + if noisy { + s.printBlockWithMessage( + s.colorize(yellowColor, "P [PENDING]"), + "", + spec, + false, + ) + } else { + s.print(0, s.colorize(yellowColor, "P")) + s.stream() + } +} + +func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + // Skips at runtime will have a non-empty spec.Failure. All others should be succinct. + if succinct || spec.Failure == (types.SpecFailure{}) { + s.print(0, s.colorize(cyanColor, "S")) + s.stream() + } else { + s.startBlock() + s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printSkip(indentation, spec.Failure) + s.endBlock() + } +} + +func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + failingSpecs := []*types.SpecSummary{} + + for _, summary := range summaries { + if summary.HasFailureState() { + failingSpecs = append(failingSpecs, summary) + } + } + + if len(failingSpecs) == 0 { + return + } + + s.printNewLine() + s.printNewLine() + plural := "s" + if len(failingSpecs) == 1 { + plural = "" + } + s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural)) + for _, summary := range failingSpecs { + s.printNewLine() + if summary.HasFailureState() { + if summary.TimedOut() { + s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] ")) + } else if summary.Panicked() { + s.print(0, s.colorize(redColor+boldStyle, "[Panic!] ")) + } else if summary.Failed() { + s.print(0, s.colorize(redColor+boldStyle, "[Fail] ")) + } + s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true) + s.printNewLine() + s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String())) + } + } +} + +func (s *consoleStenographer) startBlock() { + if s.cursorState == cursorStateStreaming { + s.printNewLine() + s.printDelimiter() + } else if s.cursorState == cursorStateMidBlock { + s.printNewLine() + } +} + +func (s *consoleStenographer) midBlock() { + s.cursorState = cursorStateMidBlock +} + +func (s *consoleStenographer) endBlock() { + s.printDelimiter() + s.cursorState = cursorStateEndBlock +} + +func (s *consoleStenographer) stream() { + s.cursorState = cursorStateStreaming +} + +func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) { + s.startBlock() + s.println(0, header) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct) + + if message != "" { + s.printNewLine() + s.println(indentation, message) + } + + s.endBlock() +} + +func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.startBlock() + s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printFailure(indentation, spec.State, spec.Failure, fullTrace) + s.endBlock() +} + +func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string { + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + return " in Suite Setup (BeforeSuite)" + case types.SpecComponentTypeAfterSuite: + return " in Suite Teardown (AfterSuite)" + case types.SpecComponentTypeBeforeEach: + return " in Spec Setup (BeforeEach)" + case types.SpecComponentTypeJustBeforeEach: + return " in Spec Setup (JustBeforeEach)" + case types.SpecComponentTypeAfterEach: + return " in Spec Teardown (AfterEach)" + } + + return "" +} + +func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) { + s.println(indentation, s.colorize(cyanColor, spec.Message)) + s.printNewLine() + s.println(indentation, spec.Location.String()) +} + +func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) { + if state == types.SpecStatePanicked { + s.println(indentation, s.colorize(redColor+boldStyle, failure.Message)) + s.println(indentation, s.colorize(redColor, failure.ForwardedPanic)) + s.println(indentation, failure.Location.String()) + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } else { + s.println(indentation, s.colorize(redColor, failure.Message)) + s.printNewLine() + s.println(indentation, failure.Location.String()) + if fullTrace { + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } + } +} + +func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + startIndex := 1 + indentation := 0 + + if len(componentTexts) == 1 { + startIndex = 0 + } + + for i := startIndex; i < len(componentTexts); i++ { + if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex { + color := redColor + if state == types.SpecStateSkipped { + color = cyanColor + } + blockType := "" + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + blockType = "BeforeSuite" + case types.SpecComponentTypeAfterSuite: + blockType = "AfterSuite" + case types.SpecComponentTypeBeforeEach: + blockType = "BeforeEach" + case types.SpecComponentTypeJustBeforeEach: + blockType = "JustBeforeEach" + case types.SpecComponentTypeAfterEach: + blockType = "AfterEach" + case types.SpecComponentTypeIt: + blockType = "It" + case types.SpecComponentTypeMeasure: + blockType = "Measurement" + } + if succinct { + s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i])) + } else { + s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType)) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } else { + if succinct { + s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i])) + } else { + s.println(indentation, componentTexts[i]) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } + indentation++ + } + + return indentation +} + +func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct) + + if succinct { + if len(componentTexts) > 0 { + s.printNewLine() + s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1])) + } + s.printNewLine() + indentation = 1 + } else { + indentation-- + } + + return indentation +} + +func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string { + orderedKeys := make([]string, len(measurements)) + for key, measurement := range measurements { + orderedKeys[measurement.Order] = key + } + return orderedKeys +} + +func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string { + if len(spec.Measurements) == 0 { + return "Found no measurements" + } + + message := []string{} + orderedKeys := s.orderedMeasurementKeys(spec.Measurements) + + if succinct { + message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + measurement.SmallestLabel, + s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), + measurement.Units, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), + measurement.Units, + )) + } + } else { + message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + info := "" + if measurement.Info != nil { + message = append(message, fmt.Sprintf("%v", measurement.Info)) + } + + message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + info, + measurement.SmallestLabel, + s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), + measurement.Units, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), + measurement.Units, + )) + } + } + + return strings.Join(message, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go new file mode 100644 index 000000000..52d6653b3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go @@ -0,0 +1,24 @@ +// +build !windows + +package colorable + +import ( + "io" + "os" +) + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +func NewColorableStdout() io.Writer { + return os.Stdout +} + +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go new file mode 100644 index 000000000..108800923 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go @@ -0,0 +1,783 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") +) + +type Writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go new file mode 100644 index 000000000..fb976dbd8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" +) + +type NonColorable struct { + out io.Writer + lastbuf bytes.Buffer +} + +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewBuffer(data) +loop: + for { + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + return len(data) - w.lastbuf.Len(), nil +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE similarity index 51% rename from vendor/github.com/konsorten/go-windows-terminal-sequences/README.md rename to vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE index 195333e51..65dc692b6 100644 --- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE @@ -1,41 +1,9 @@ -# Windows Terminal Sequences +Copyright (c) Yasuhiro MATSUMOTO -This library allow for enabling Windows terminal color support for Go. +MIT License (Expat) -See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details. - -## Usage - -```go -import ( - "syscall" - - sequences "github.com/konsorten/go-windows-terminal-sequences" -) - -func main() { - sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true) -} - -``` - -## Authors - -The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de). - -We thank all the authors who provided code to this library: - -* Felix Kollmann -* Nicolas Perraut - -## License - -(The MIT License) - -Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go new file mode 100644 index 000000000..83c588773 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go @@ -0,0 +1,9 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..98ffe86a4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go new file mode 100644 index 000000000..9d24bac1d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..1f0c6bf53 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go new file mode 100644 index 000000000..83c398b16 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go @@ -0,0 +1,19 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go new file mode 100644 index 000000000..36ee2a600 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go @@ -0,0 +1,93 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +const ( + messageId = "##teamcity" +) + +type TeamCityReporter struct { + writer io.Writer + testSuiteName string +} + +func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { + return &TeamCityReporter{ + writer: writer, + } +} + +func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.testSuiteName = escape(summary.SuiteDescription) + fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName) +} + +func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testName := escape(name) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) + message := escape(setupSummary.Failure.ComponentCodeLocation.String()) + details := escape(setupSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) + } +} + +func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) +} + +func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + message := escape(specSummary.Failure.ComponentCodeLocation.String()) + details := escape(specSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName) + } + + durationInMilliseconds := specSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) +} + +func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName) +} + +func escape(output string) string { + output = strings.Replace(output, "|", "||", -1) + output = strings.Replace(output, "'", "|'", -1) + output = strings.Replace(output, "\n", "|n", -1) + output = strings.Replace(output, "\r", "|r", -1) + output = strings.Replace(output, "[", "|[", -1) + output = strings.Replace(output, "]", "|]", -1) + return output +} diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go new file mode 100644 index 000000000..935a89e13 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/code_location.go @@ -0,0 +1,15 @@ +package types + +import ( + "fmt" +) + +type CodeLocation struct { + FileName string + LineNumber int + FullStackTrace string +} + +func (codeLocation CodeLocation) String() string { + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go new file mode 100644 index 000000000..fdd6ed5bd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/synchronization.go @@ -0,0 +1,30 @@ +package types + +import ( + "encoding/json" +) + +type RemoteBeforeSuiteState int + +const ( + RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota + + RemoteBeforeSuiteStatePending + RemoteBeforeSuiteStatePassed + RemoteBeforeSuiteStateFailed + RemoteBeforeSuiteStateDisappeared +) + +type RemoteBeforeSuiteData struct { + Data []byte + State RemoteBeforeSuiteState +} + +func (r RemoteBeforeSuiteData) ToJSON() []byte { + data, _ := json.Marshal(r) + return data +} + +type RemoteAfterSuiteData struct { + CanRun bool +} diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go new file mode 100644 index 000000000..0e89521be --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/types.go @@ -0,0 +1,174 @@ +package types + +import ( + "strconv" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 + +/* +SuiteSummary represents the a summary of the test suite and is passed to both +Reporter.SpecSuiteWillBegin +Reporter.SpecSuiteDidEnd + +this is unfortunate as these two methods should receive different objects. When running in parallel +each node does not deterministically know how many specs it will end up running. + +Unfortunately making such a change would break backward compatibility. + +Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields +with -1. +*/ +type SuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + // Flaked specs are those that failed initially, but then passed on a + // subsequent try. + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type SpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*SpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s SpecSummary) HasFailureState() bool { + return s.State.IsFailure() +} + +func (s SpecSummary) TimedOut() bool { + return s.State == SpecStateTimedOut +} + +func (s SpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s SpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s SpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s SpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s SpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type SetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type SpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type SpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s SpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} + +type SpecState uint + +const ( + SpecStateInvalid SpecState = iota + + SpecStatePending + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStatePanicked + SpecStateTimedOut +) + +func (state SpecState) IsFailure() bool { + return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed +} + +type SpecComponentType uint + +const ( + SpecComponentTypeInvalid SpecComponentType = iota + + SpecComponentTypeContainer + SpecComponentTypeBeforeSuite + SpecComponentTypeAfterSuite + SpecComponentTypeBeforeEach + SpecComponentTypeJustBeforeEach + SpecComponentTypeJustAfterEach + SpecComponentTypeAfterEach + SpecComponentTypeIt + SpecComponentTypeMeasure +) + +type FlagType uint + +const ( + FlagTypeNone FlagType = iota + FlagTypeFocused + FlagTypePending +) diff --git a/vendor/github.com/onsi/gomega/LICENSE b/vendor/github.com/onsi/gomega/LICENSE new file mode 100644 index 000000000..9415ee72c --- /dev/null +++ b/vendor/github.com/onsi/gomega/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go new file mode 100644 index 000000000..6559525f1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -0,0 +1,382 @@ +/* +Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information. +*/ +package format + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects +var MaxDepth = uint(10) + +/* +By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output. + +Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead. + +Note that GoString and String don't always have all the information you need to understand why a test failed! +*/ +var UseStringerRepresentation = false + +/* +Print the content of context objects. By default it will be suppressed. + +Set PrintContextObjects = true to enable printing of the context internals. +*/ +var PrintContextObjects = false + +// TruncatedDiff choose if we should display a truncated pretty diff or not +var TruncatedDiff = true + +// Ctx interface defined here to keep backwards compatability with go < 1.7 +// It matches the context.Context interface +type Ctx interface { + Deadline() (deadline time.Time, ok bool) + Done() <-chan struct{} + Err() error + Value(key interface{}) interface{} +} + +var contextType = reflect.TypeOf((*Ctx)(nil)).Elem() +var timeType = reflect.TypeOf(time.Time{}) + +//The default indentation string emitted by the format package +var Indent = " " + +var longFormThreshold = 20 + +/* +Generates a formatted matcher success/failure message of the form: + + Expected + + + + +If expected is omited, then the message looks like: + + Expected + + +*/ +func Message(actual interface{}, message string, expected ...interface{}) string { + if len(expected) == 0 { + return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) + } + return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1)) +} + +/* + +Generates a nicely formatted matcher success / failure message + +Much like Message(...), but it attempts to pretty print diffs in strings + +Expected + : "...aaaaabaaaaa..." +to equal | + : "...aaaaazaaaaa..." + +*/ + +func MessageWithDiff(actual, message, expected string) string { + if TruncatedDiff && len(actual) >= truncateThreshold && len(expected) >= truncateThreshold { + diffPoint := findFirstMismatch(actual, expected) + formattedActual := truncateAndFormat(actual, diffPoint) + formattedExpected := truncateAndFormat(expected, diffPoint) + + spacesBeforeFormattedMismatch := findFirstMismatch(formattedActual, formattedExpected) + + tabLength := 4 + spaceFromMessageToActual := tabLength + len(": ") - len(message) + padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" + return Message(formattedActual, message+padding, formattedExpected) + } + return Message(actual, message, expected) +} + +func truncateAndFormat(str string, index int) string { + leftPadding := `...` + rightPadding := `...` + + start := index - charactersAroundMismatchToInclude + if start < 0 { + start = 0 + leftPadding = "" + } + + // slice index must include the mis-matched character + lengthOfMismatchedCharacter := 1 + end := index + charactersAroundMismatchToInclude + lengthOfMismatchedCharacter + if end > len(str) { + end = len(str) + rightPadding = "" + + } + return fmt.Sprintf("\"%s\"", leftPadding+str[start:end]+rightPadding) +} + +func findFirstMismatch(a, b string) int { + aSlice := strings.Split(a, "") + bSlice := strings.Split(b, "") + + for index, str := range aSlice { + if index > len(bSlice)-1 { + return index + } + if str != bSlice[index] { + return index + } + } + + if len(b) > len(a) { + return len(a) + 1 + } + + return 0 +} + +const ( + truncateThreshold = 50 + charactersAroundMismatchToInclude = 5 +) + +/* +Pretty prints the passed in object at the passed in indentation level. + +Object recurses into deeply nested objects emitting pretty-printed representations of their components. + +Modify format.MaxDepth to control how deep the recursion is allowed to go +Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of +recursing into the object. + +Set PrintContextObjects to true to print the content of objects implementing context.Context +*/ +func Object(object interface{}, indentation uint) string { + indent := strings.Repeat(Indent, int(indentation)) + value := reflect.ValueOf(object) + return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) +} + +/* +IndentString takes a string and indents each line by the specified amount. +*/ +func IndentString(s string, indentation uint) string { + components := strings.Split(s, "\n") + result := "" + indent := strings.Repeat(Indent, int(indentation)) + for i, component := range components { + result += indent + component + if i < len(components)-1 { + result += "\n" + } + } + + return result +} + +func formatType(object interface{}) string { + t := reflect.TypeOf(object) + if t == nil { + return "nil" + } + switch t.Kind() { + case reflect.Chan: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Ptr: + return fmt.Sprintf("%T | %p", object, object) + case reflect.Slice: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + case reflect.Map: + v := reflect.ValueOf(object) + return fmt.Sprintf("%T | len:%d", object, v.Len()) + default: + return fmt.Sprintf("%T", object) + } +} + +func formatValue(value reflect.Value, indentation uint) string { + if indentation > MaxDepth { + return "..." + } + + if isNilValue(value) { + return "nil" + } + + if UseStringerRepresentation { + if value.CanInterface() { + obj := value.Interface() + switch x := obj.(type) { + case fmt.GoStringer: + return x.GoString() + case fmt.Stringer: + return x.String() + } + } + } + + if !PrintContextObjects { + if value.Type().Implements(contextType) && indentation > 1 { + return "" + } + } + + switch value.Kind() { + case reflect.Bool: + return fmt.Sprintf("%v", value.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", value.Uint()) + case reflect.Uintptr: + return fmt.Sprintf("0x%x", value.Uint()) + case reflect.Float32, reflect.Float64: + return fmt.Sprintf("%v", value.Float()) + case reflect.Complex64, reflect.Complex128: + return fmt.Sprintf("%v", value.Complex()) + case reflect.Chan: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Func: + return fmt.Sprintf("0x%x", value.Pointer()) + case reflect.Ptr: + return formatValue(value.Elem(), indentation) + case reflect.Slice: + return formatSlice(value, indentation) + case reflect.String: + return formatString(value.String(), indentation) + case reflect.Array: + return formatSlice(value, indentation) + case reflect.Map: + return formatMap(value, indentation) + case reflect.Struct: + if value.Type() == timeType && value.CanInterface() { + t, _ := value.Interface().(time.Time) + return t.Format(time.RFC3339Nano) + } + return formatStruct(value, indentation) + case reflect.Interface: + return formatValue(value.Elem(), indentation) + default: + if value.CanInterface() { + return fmt.Sprintf("%#v", value.Interface()) + } + return fmt.Sprintf("%#v", value) + } +} + +func formatString(object interface{}, indentation uint) string { + if indentation == 1 { + s := fmt.Sprintf("%s", object) + components := strings.Split(s, "\n") + result := "" + for i, component := range components { + if i == 0 { + result += component + } else { + result += Indent + component + } + if i < len(components)-1 { + result += "\n" + } + } + + return fmt.Sprintf("%s", result) + } else { + return fmt.Sprintf("%q", object) + } +} + +func formatSlice(v reflect.Value, indentation uint) string { + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) { + return formatString(v.Bytes(), indentation) + } + + l := v.Len() + result := make([]string, l) + longest := 0 + for i := 0; i < l; i++ { + result[i] = formatValue(v.Index(i), indentation+1) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("[%s]", strings.Join(result, ", ")) +} + +func formatMap(v reflect.Value, indentation uint) string { + l := v.Len() + result := make([]string, l) + + longest := 0 + for i, key := range v.MapKeys() { + value := v.MapIndex(key) + result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1)) + if len(result[i]) > longest { + longest = len(result[i]) + } + } + + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) +} + +func formatStruct(v reflect.Value, indentation uint) string { + t := v.Type() + + l := v.NumField() + result := []string{} + longest := 0 + for i := 0; i < l; i++ { + structField := t.Field(i) + fieldEntry := v.Field(i) + representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1)) + result = append(result, representation) + if len(representation) > longest { + longest = len(representation) + } + } + if longest > longFormThreshold { + indenter := strings.Repeat(Indent, int(indentation)) + return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) + } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) +} + +func isNilValue(a reflect.Value) bool { + switch a.Kind() { + case reflect.Invalid: + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return a.IsNil() + } + + return false +} + +/* +Returns true when the string is entirely made of printable runes, false otherwise. +*/ +func isPrintableString(str string) bool { + for _, runeValue := range str { + if !strconv.IsPrint(runeValue) { + return false + } + } + return true +} diff --git a/vendor/github.com/onsi/gomega/gbytes/buffer.go b/vendor/github.com/onsi/gomega/gbytes/buffer.go new file mode 100644 index 000000000..336086f4a --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/buffer.go @@ -0,0 +1,245 @@ +/* +Package gbytes provides a buffer that supports incrementally detecting input. + +You use gbytes.Buffer with the gbytes.Say matcher. When Say finds a match, it fastforwards the buffer's read cursor to the end of that match. + +Subsequent matches against the buffer will only operate against data that appears *after* the read cursor. + +The read cursor is an opaque implementation detail that you cannot access. You should use the Say matcher to sift through the buffer. You can always +access the entire buffer's contents with Contents(). + +*/ +package gbytes + +import ( + "errors" + "fmt" + "io" + "regexp" + "sync" + "time" +) + +/* +gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher. + +You should only use a gbytes.Buffer in test code. It stores all writes in an in-memory buffer - behavior that is inappropriate for production code! +*/ +type Buffer struct { + contents []byte + readCursor uint64 + lock *sync.Mutex + detectCloser chan interface{} + closed bool +} + +/* +NewBuffer returns a new gbytes.Buffer +*/ +func NewBuffer() *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + } +} + +/* +BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes +*/ +func BufferWithBytes(bytes []byte) *Buffer { + return &Buffer{ + lock: &sync.Mutex{}, + contents: bytes, + } +} + +/* +BufferReader returns a new gbytes.Buffer that wraps a reader. The reader's contents are read into +the Buffer via io.Copy +*/ +func BufferReader(reader io.Reader) *Buffer { + b := &Buffer{ + lock: &sync.Mutex{}, + } + + go func() { + io.Copy(b, reader) + b.Close() + }() + + return b +} + +/* +Write implements the io.Writer interface +*/ +func (b *Buffer) Write(p []byte) (n int, err error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to write to closed buffer") + } + + b.contents = append(b.contents, p...) + return len(p), nil +} + +/* +Read implements the io.Reader interface. It advances the +cursor as it reads. + +Returns an error if called after Close. +*/ +func (b *Buffer) Read(d []byte) (int, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.closed { + return 0, errors.New("attempt to read from closed buffer") + } + + if uint64(len(b.contents)) <= b.readCursor { + return 0, io.EOF + } + + n := copy(d, b.contents[b.readCursor:]) + b.readCursor += uint64(n) + + return n, nil +} + +/* +Close signifies that the buffer will no longer be written to +*/ +func (b *Buffer) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + b.closed = true + + return nil +} + +/* +Closed returns true if the buffer has been closed +*/ +func (b *Buffer) Closed() bool { + b.lock.Lock() + defer b.lock.Unlock() + + return b.closed +} + +/* +Contents returns all data ever written to the buffer. +*/ +func (b *Buffer) Contents() []byte { + b.lock.Lock() + defer b.lock.Unlock() + + contents := make([]byte, len(b.contents)) + copy(contents, b.contents) + return contents +} + +/* +Detect takes a regular expression and returns a channel. + +The channel will receive true the first time data matching the regular expression is written to the buffer. +The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region. + +You typically don't need to use Detect and should use the ghttp.Say matcher instead. Detect is useful, however, in cases where your code must +be branch and handle different outputs written to the buffer. + +For example, consider a buffer hooked up to the stdout of a client library. You may (or may not, depending on state outside of your control) need to authenticate the client library. + +You could do something like: + +select { +case <-buffer.Detect("You are not logged in"): + //log in +case <-buffer.Detect("Success"): + //carry on +case <-time.After(time.Second): + //welp +} +buffer.CancelDetects() + +You should always call CancelDetects after using Detect. This will close any channels that have not detected and clean up the goroutines that were spawned to support them. + +Finally, you can pass detect a format string followed by variadic arguments. This will construct the regexp using fmt.Sprintf. +*/ +func (b *Buffer) Detect(desired string, args ...interface{}) chan bool { + formattedRegexp := desired + if len(args) > 0 { + formattedRegexp = fmt.Sprintf(desired, args...) + } + re := regexp.MustCompile(formattedRegexp) + + b.lock.Lock() + defer b.lock.Unlock() + + if b.detectCloser == nil { + b.detectCloser = make(chan interface{}) + } + + closer := b.detectCloser + response := make(chan bool) + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + defer close(response) + for { + select { + case <-ticker.C: + b.lock.Lock() + data, cursor := b.contents[b.readCursor:], b.readCursor + loc := re.FindIndex(data) + b.lock.Unlock() + + if loc != nil { + response <- true + b.lock.Lock() + newCursorPosition := cursor + uint64(loc[1]) + if newCursorPosition >= b.readCursor { + b.readCursor = newCursorPosition + } + b.lock.Unlock() + return + } + case <-closer: + return + } + } + }() + + return response +} + +/* +CancelDetects cancels any pending detects and cleans up their goroutines. You should always call this when you're done with a set of Detect channels. +*/ +func (b *Buffer) CancelDetects() { + b.lock.Lock() + defer b.lock.Unlock() + + close(b.detectCloser) + b.detectCloser = nil +} + +func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) { + b.lock.Lock() + defer b.lock.Unlock() + + unreadBytes := b.contents[b.readCursor:] + copyOfUnreadBytes := make([]byte, len(unreadBytes)) + copy(copyOfUnreadBytes, unreadBytes) + + loc := re.FindIndex(unreadBytes) + + if loc != nil { + b.readCursor += uint64(loc[1]) + return true, copyOfUnreadBytes + } + return false, copyOfUnreadBytes +} diff --git a/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go new file mode 100644 index 000000000..3caed8769 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/io_wrappers.go @@ -0,0 +1,85 @@ +package gbytes + +import ( + "errors" + "io" + "time" +) + +// ErrTimeout is returned by TimeoutCloser, TimeoutReader, and TimeoutWriter when the underlying Closer/Reader/Writer does not return within the specified timeout +var ErrTimeout = errors.New("timeout occurred") + +// TimeoutCloser returns an io.Closer that wraps the passed-in io.Closer. If the underlying Closer fails to close within the alloted timeout ErrTimeout is returned. +func TimeoutCloser(c io.Closer, timeout time.Duration) io.Closer { + return timeoutReaderWriterCloser{c: c, d: timeout} +} + +// TimeoutReader returns an io.Reader that wraps the passed-in io.Reader. If the underlying Reader fails to read within the alloted timeout ErrTimeout is returned. +func TimeoutReader(r io.Reader, timeout time.Duration) io.Reader { + return timeoutReaderWriterCloser{r: r, d: timeout} +} + +// TimeoutWriter returns an io.Writer that wraps the passed-in io.Writer. If the underlying Writer fails to write within the alloted timeout ErrTimeout is returned. +func TimeoutWriter(w io.Writer, timeout time.Duration) io.Writer { + return timeoutReaderWriterCloser{w: w, d: timeout} +} + +type timeoutReaderWriterCloser struct { + c io.Closer + w io.Writer + r io.Reader + d time.Duration +} + +func (t timeoutReaderWriterCloser) Close() error { + done := make(chan struct{}) + var err error + + go func() { + err = t.c.Close() + close(done) + }() + + select { + case <-done: + return err + case <-time.After(t.d): + return ErrTimeout + } +} + +func (t timeoutReaderWriterCloser) Read(p []byte) (int, error) { + done := make(chan struct{}) + var n int + var err error + + go func() { + n, err = t.r.Read(p) + close(done) + }() + + select { + case <-done: + return n, err + case <-time.After(t.d): + return 0, ErrTimeout + } +} + +func (t timeoutReaderWriterCloser) Write(p []byte) (int, error) { + done := make(chan struct{}) + var n int + var err error + + go func() { + n, err = t.w.Write(p) + close(done) + }() + + select { + case <-done: + return n, err + case <-time.After(t.d): + return 0, ErrTimeout + } +} diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go new file mode 100644 index 000000000..14317182b --- /dev/null +++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go @@ -0,0 +1,104 @@ +package gbytes + +import ( + "fmt" + "regexp" + + "github.com/onsi/gomega/format" +) + +//Objects satisfying the BufferProvider can be used with the Say matcher. +type BufferProvider interface { + Buffer() *Buffer +} + +/* +Say is a Gomega matcher that operates on gbytes.Buffers: + + Expect(buffer).Should(Say("something")) + +will succeed if the unread portion of the buffer matches the regular expression "something". + +When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match. +Thus, subsequent calls to Say will only match against the unread portion of the buffer + +Say pairs very well with Eventually. To assert that a buffer eventually receives data matching "[123]-star" within 3 seconds you can: + + Eventually(buffer, 3).Should(Say("[123]-star")) + +Ditto with consistently. To assert that a buffer does not receive data matching "never-see-this" for 1 second you can: + + Consistently(buffer, 1).ShouldNot(Say("never-see-this")) + +In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface. +In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer() + +If the buffer is closed, the Say matcher will tell Eventually to abort. +*/ +func Say(expected string, args ...interface{}) *sayMatcher { + if len(args) > 0 { + expected = fmt.Sprintf(expected, args...) + } + return &sayMatcher{ + re: regexp.MustCompile(expected), + } +} + +type sayMatcher struct { + re *regexp.Regexp + receivedSayings []byte +} + +func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) { + var buffer *Buffer + + switch x := actual.(type) { + case *Buffer: + buffer = x + case BufferProvider: + buffer = x.Buffer() + default: + return nil, false + } + + return buffer, true +} + +func (m *sayMatcher) Match(actual interface{}) (success bool, err error) { + buffer, ok := m.buffer(actual) + if !ok { + return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider. Got:\n%s", format.Object(actual, 1)) + } + + didSay, sayings := buffer.didSay(m.re) + m.receivedSayings = sayings + + return didSay, nil +} + +func (m *sayMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Got stuck at:\n%s\nWaiting for:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf( + "Saw:\n%s\nWhich matches the unexpected:\n%s", + format.IndentString(string(m.receivedSayings), 1), + format.IndentString(m.re.String(), 1), + ) +} + +func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + switch x := actual.(type) { + case *Buffer: + return !x.Closed() + case BufferProvider: + return !x.Buffer().Closed() + default: + return true + } +} diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go new file mode 100644 index 000000000..869c1ead8 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/build.go @@ -0,0 +1,112 @@ +package gexec + +import ( + "errors" + "fmt" + "go/build" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + "sync" +) + +var ( + mu sync.Mutex + tmpDir string +) + +/* +Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory. +A path pointing to this binary is returned. + +Build uses the $GOPATH set in your environment. If $GOPATH is not set and you are using Go 1.8+, +it will use the default GOPATH instead. It passes the variadic args on to `go build`. +*/ +func Build(packagePath string, args ...string) (compiledPath string, err error) { + return doBuild(build.Default.GOPATH, packagePath, nil, args...) +} + +/* +BuildWithEnvironment is identical to Build but allows you to specify env vars to be set at build time. +*/ +func BuildWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) { + return doBuild(build.Default.GOPATH, packagePath, env, args...) +} + +/* +BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument). +*/ +func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) { + return doBuild(gopath, packagePath, nil, args...) +} + +func replaceGoPath(environ []string, newGoPath string) []string { + newEnviron := []string{} + for _, v := range environ { + if !strings.HasPrefix(v, "GOPATH=") { + newEnviron = append(newEnviron, v) + } + } + return append(newEnviron, "GOPATH="+newGoPath) +} + +func doBuild(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) { + tmpDir, err := temporaryDirectory() + if err != nil { + return "", err + } + + if len(gopath) == 0 { + return "", errors.New("$GOPATH not provided when building " + packagePath) + } + + executable := filepath.Join(tmpDir, path.Base(packagePath)) + if runtime.GOOS == "windows" { + executable = executable + ".exe" + } + + cmdArgs := append([]string{"build"}, args...) + cmdArgs = append(cmdArgs, "-o", executable, packagePath) + + build := exec.Command("go", cmdArgs...) + build.Env = replaceGoPath(os.Environ(), gopath) + build.Env = append(build.Env, env...) + + output, err := build.CombinedOutput() + if err != nil { + return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output)) + } + + return executable, nil +} + +/* +You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by +gexec. In Ginkgo this is typically done in an AfterSuite callback. +*/ +func CleanupBuildArtifacts() { + mu.Lock() + defer mu.Unlock() + if tmpDir != "" { + os.RemoveAll(tmpDir) + tmpDir = "" + } +} + +func temporaryDirectory() (string, error) { + var err error + mu.Lock() + defer mu.Unlock() + if tmpDir == "" { + tmpDir, err = ioutil.TempDir("", "gexec_artifacts") + if err != nil { + return "", err + } + } + + return ioutil.TempDir(tmpDir, "g") +} diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go new file mode 100644 index 000000000..98a354937 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go @@ -0,0 +1,86 @@ +package gexec + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +/* +The Exit matcher operates on a session: + + Expect(session).Should(Exit()) + +Exit passes if the session has already exited. + +If no status code is provided, then Exit will succeed if the session has exited regardless of exit code. +Otherwise, Exit will only succeed if the process has exited with the provided status code. + +Note that the process must have already exited. To wait for a process to exit, use Eventually: + + Eventually(session, 3).Should(Exit(0)) +*/ +func Exit(optionalExitCode ...int) *exitMatcher { + exitCode := -1 + if len(optionalExitCode) > 0 { + exitCode = optionalExitCode[0] + } + + return &exitMatcher{ + exitCode: exitCode, + } +} + +type exitMatcher struct { + exitCode int + didExit bool + actualExitCode int +} + +type Exiter interface { + ExitCode() int +} + +func (m *exitMatcher) Match(actual interface{}) (success bool, err error) { + exiter, ok := actual.(Exiter) + if !ok { + return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1)) + } + + m.actualExitCode = exiter.ExitCode() + + if m.actualExitCode == -1 { + return false, nil + } + + if m.exitCode == -1 { + return true, nil + } + return m.exitCode == m.actualExitCode, nil +} + +func (m *exitMatcher) FailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "Expected process to exit. It did not." + } + return format.Message(m.actualExitCode, "to match exit code:", m.exitCode) +} + +func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) { + if m.actualExitCode == -1 { + return "you really shouldn't be able to see this!" + } else { + if m.exitCode == -1 { + return "Expected process not to exit. It did." + } + return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode) + } +} + +func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + session, ok := actual.(*Session) + if ok { + return session.ExitCode() == -1 + } + return true +} diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go new file mode 100644 index 000000000..05e695abc --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go @@ -0,0 +1,53 @@ +package gexec + +import ( + "io" + "sync" +) + +/* +PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line. +This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each +session by passing in a PrefixedWriter: + +gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter)) +*/ +type PrefixedWriter struct { + prefix []byte + writer io.Writer + lock *sync.Mutex + atStartOfLine bool +} + +func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter { + return &PrefixedWriter{ + prefix: []byte(prefix), + writer: writer, + lock: &sync.Mutex{}, + atStartOfLine: true, + } +} + +func (w *PrefixedWriter) Write(b []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + + toWrite := []byte{} + + for _, c := range b { + if w.atStartOfLine { + toWrite = append(toWrite, w.prefix...) + } + + toWrite = append(toWrite, c) + + w.atStartOfLine = c == '\n' + } + + _, err := w.writer.Write(toWrite) + if err != nil { + return 0, err + } + + return len(b), nil +} diff --git a/vendor/github.com/onsi/gomega/gexec/session.go b/vendor/github.com/onsi/gomega/gexec/session.go new file mode 100644 index 000000000..5cb00ca65 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gexec/session.go @@ -0,0 +1,303 @@ +/* +Package gexec provides support for testing external processes. +*/ +package gexec + +import ( + "io" + "os" + "os/exec" + "sync" + "syscall" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" +) + +const INVALID_EXIT_CODE = 254 + +type Session struct { + //The wrapped command + Command *exec.Cmd + + //A *gbytes.Buffer connected to the command's stdout + Out *gbytes.Buffer + + //A *gbytes.Buffer connected to the command's stderr + Err *gbytes.Buffer + + //A channel that will close when the command exits + Exited <-chan struct{} + + lock *sync.Mutex + exitCode int +} + +/* +Start starts the passed-in *exec.Cmd command. It wraps the command in a *gexec.Session. + +The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err. +These buffers can be used with the gbytes.Say matcher to match against unread output: + + Expect(session.Out).Should(gbytes.Say("foo-out")) + Expect(session.Err).Should(gbytes.Say("foo-err")) + +In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer. This allows you to replace the first line, above, with: + + Expect(session).Should(gbytes.Say("foo-out")) + +When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter. +This is useful for capturing the process's output or logging it to screen. In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter: + + session, err := Start(command, GinkgoWriter, GinkgoWriter) + +This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails. + +The session wrapper is responsible for waiting on the *exec.Cmd command. You *should not* call command.Wait() yourself. +Instead, to assert that the command has exited you can use the gexec.Exit matcher: + + Expect(session).Should(gexec.Exit()) + +When the session exits it closes the stdout and stderr gbytes buffers. This will short circuit any +Eventuallys waiting for the buffers to Say something. +*/ +func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) { + exited := make(chan struct{}) + + session := &Session{ + Command: command, + Out: gbytes.NewBuffer(), + Err: gbytes.NewBuffer(), + Exited: exited, + lock: &sync.Mutex{}, + exitCode: -1, + } + + var commandOut, commandErr io.Writer + + commandOut, commandErr = session.Out, session.Err + + if outWriter != nil { + commandOut = io.MultiWriter(commandOut, outWriter) + } + + if errWriter != nil { + commandErr = io.MultiWriter(commandErr, errWriter) + } + + command.Stdout = commandOut + command.Stderr = commandErr + + err := command.Start() + if err == nil { + go session.monitorForExit(exited) + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + trackedSessions = append(trackedSessions, session) + } + + return session, err +} + +/* +Buffer implements the gbytes.BufferProvider interface and returns s.Out +This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out: + + Eventually(session).Should(gbytes.Say("foo")) +*/ +func (s *Session) Buffer() *gbytes.Buffer { + return s.Out +} + +/* +ExitCode returns the wrapped command's exit code. If the command hasn't exited yet, ExitCode returns -1. + +To assert that the command has exited it is more convenient to use the Exit matcher: + + Eventually(s).Should(gexec.Exit()) + +When the process exits because it has received a particular signal, the exit code will be 128+signal-value +(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html) + +*/ +func (s *Session) ExitCode() int { + s.lock.Lock() + defer s.lock.Unlock() + return s.exitCode +} + +/* +Wait waits until the wrapped command exits. It can be passed an optional timeout. +If the command does not exit within the timeout, Wait will trigger a test failure. + +Wait returns the session, making it possible to chain: + + session.Wait().Out.Contents() + +will wait for the command to exit then return the entirety of Out's contents. + +Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does. +*/ +func (s *Session) Wait(timeout ...interface{}) *Session { + EventuallyWithOffset(1, s, timeout...).Should(Exit()) + return s +} + +/* +Kill sends the running command a SIGKILL signal. It does not wait for the process to exit. + +If the command has already exited, Kill returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Kill() *Session { + return s.Signal(syscall.SIGKILL) +} + +/* +Interrupt sends the running command a SIGINT signal. It does not wait for the process to exit. + +If the command has already exited, Interrupt returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Interrupt() *Session { + return s.Signal(syscall.SIGINT) +} + +/* +Terminate sends the running command a SIGTERM signal. It does not wait for the process to exit. + +If the command has already exited, Terminate returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Terminate() *Session { + return s.Signal(syscall.SIGTERM) +} + +/* +Signal sends the running command the passed in signal. It does not wait for the process to exit. + +If the command has already exited, Signal returns silently. + +The session is returned to enable chaining. +*/ +func (s *Session) Signal(signal os.Signal) *Session { + if s.processIsAlive() { + s.Command.Process.Signal(signal) + } + return s +} + +func (s *Session) monitorForExit(exited chan<- struct{}) { + err := s.Command.Wait() + s.lock.Lock() + s.Out.Close() + s.Err.Close() + status := s.Command.ProcessState.Sys().(syscall.WaitStatus) + if status.Signaled() { + s.exitCode = 128 + int(status.Signal()) + } else { + exitStatus := status.ExitStatus() + if exitStatus == -1 && err != nil { + s.exitCode = INVALID_EXIT_CODE + } + s.exitCode = exitStatus + } + s.lock.Unlock() + + close(exited) +} + +func (s *Session) processIsAlive() bool { + return s.ExitCode() == -1 && s.Command.Process != nil +} + +var trackedSessions = []*Session{} +var trackedSessionsMutex = &sync.Mutex{} + +/* +Kill sends a SIGKILL signal to all the processes started by Run, and waits for them to exit. +The timeout specified is applied to each process killed. + +If any of the processes already exited, KillAndWait returns silently. +*/ +func KillAndWait(timeout ...interface{}) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Kill().Wait(timeout...) + } + trackedSessions = []*Session{} +} + +/* +Kill sends a SIGTERM signal to all the processes started by Run, and waits for them to exit. +The timeout specified is applied to each process killed. + +If any of the processes already exited, TerminateAndWait returns silently. +*/ +func TerminateAndWait(timeout ...interface{}) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Terminate().Wait(timeout...) + } +} + +/* +Kill sends a SIGKILL signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Kill returns silently. +*/ +func Kill() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Kill() + } +} + +/* +Terminate sends a SIGTERM signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Terminate returns silently. +*/ +func Terminate() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Terminate() + } +} + +/* +Signal sends the passed in signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Signal returns silently. +*/ +func Signal(signal os.Signal) { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Signal(signal) + } +} + +/* +Interrupt sends the SIGINT signal to all the processes started by Run. +It does not wait for the processes to exit. + +If any of the processes already exited, Interrupt returns silently. +*/ +func Interrupt() { + trackedSessionsMutex.Lock() + defer trackedSessionsMutex.Unlock() + for _, session := range trackedSessions { + session.Interrupt() + } +} diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go new file mode 100644 index 000000000..448d595da --- /dev/null +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -0,0 +1,429 @@ +/* +Gomega is the Ginkgo BDD-style testing framework's preferred matcher library. + +The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/ + +Gomega on Github: http://github.com/onsi/gomega + +Learn more about Ginkgo online: http://onsi.github.io/ginkgo + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Gomega is MIT-Licensed +*/ +package gomega + +import ( + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/assertion" + "github.com/onsi/gomega/internal/asyncassertion" + "github.com/onsi/gomega/internal/testingtsupport" + "github.com/onsi/gomega/types" +) + +const GOMEGA_VERSION = "1.5.0" + +const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. +If you're using Ginkgo then you probably forgot to put your assertion in an It(). +Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT(). +Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations. +` + +var globalFailWrapper *types.GomegaFailWrapper + +var defaultEventuallyTimeout = time.Second +var defaultEventuallyPollingInterval = 10 * time.Millisecond +var defaultConsistentlyDuration = 100 * time.Millisecond +var defaultConsistentlyPollingInterval = 10 * time.Millisecond + +// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails +// the fail handler passed into RegisterFailHandler is called. +func RegisterFailHandler(handler types.GomegaFailHandler) { + RegisterFailHandlerWithT(testingtsupport.EmptyTWithHelper{}, handler) +} + +// RegisterFailHandlerWithT ensures that the given types.TWithHelper and fail handler +// are used globally. +func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) { + if handler == nil { + globalFailWrapper = nil + return + } + + globalFailWrapper = &types.GomegaFailWrapper{ + Fail: handler, + TWithHelper: t, + } +} + +// RegisterTestingT connects Gomega to Golang's XUnit style +// Testing.T tests. It is now deprecated and you should use NewWithT() instead. +// +// Legacy Documentation: +// +// You'll need to call this at the top of each XUnit style test: +// +// func TestFarmHasCow(t *testing.T) { +// RegisterTestingT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +// +// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to +// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests +// in parallel as the global fail handler cannot point to more than one testing.T at a time. +// +// NewWithT() does not have this limitation +// +// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). +func RegisterTestingT(t types.GomegaTestingT) { + tWithHelper, hasHelper := t.(types.TWithHelper) + if !hasHelper { + RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) + return + } + RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) +} + +// InterceptGomegaFailures runs a given callback and returns an array of +// failure messages generated by any Gomega assertions within the callback. +// +// This is accomplished by temporarily replacing the *global* fail handler +// with a fail handler that simply annotates failures. The original fail handler +// is reset when InterceptGomegaFailures returns. +// +// This is most useful when testing custom matchers, but can also be used to check +// on a value using a Gomega assertion without causing a test failure. +func InterceptGomegaFailures(f func()) []string { + originalHandler := globalFailWrapper.Fail + failures := []string{} + RegisterFailHandler(func(message string, callerSkip ...int) { + failures = append(failures, message) + }) + f() + RegisterFailHandler(originalHandler) + return failures +} + +// Ω wraps an actual value allowing assertions to be made on it: +// Ω("foo").Should(Equal("foo")) +// +// If Ω is passed more than one argument it will pass the *first* argument to the matcher. +// All subsequent arguments will be required to be nil/zero. +// +// This is convenient if you want to make an assertion on a method/function that returns +// a value and an error - a common patter in Go. +// +// For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +// Then: +// Ω(MyAmazingThing()).Should(Equal(3)) +// Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +// Ω and Expect are identical +func Ω(actual interface{}, extra ...interface{}) Assertion { + return ExpectWithOffset(0, actual, extra...) +} + +// Expect wraps an actual value allowing assertions to be made on it: +// Expect("foo").To(Equal("foo")) +// +// If Expect is passed more than one argument it will pass the *first* argument to the matcher. +// All subsequent arguments will be required to be nil/zero. +// +// This is convenient if you want to make an assertion on a method/function that returns +// a value and an error - a common patter in Go. +// +// For example, given a function with signature: +// func MyAmazingThing() (int, error) +// +// Then: +// Expect(MyAmazingThing()).Should(Equal(3)) +// Will succeed only if `MyAmazingThing()` returns `(3, nil)` +// +// Expect and Ω are identical +func Expect(actual interface{}, extra ...interface{}) Assertion { + return ExpectWithOffset(0, actual, extra...) +} + +// ExpectWithOffset wraps an actual value allowing assertions to be made on it: +// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument +// this is used to modify the call-stack offset when computing line numbers. +// +// This is most useful in helper functions that make assertions. If you want Gomega's +// error message to refer to the calling line in the test (as opposed to the line in the helper function) +// set the first argument of `ExpectWithOffset` appropriately. +func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { + if globalFailWrapper == nil { + panic(nilFailHandlerPanic) + } + return assertion.New(actual, globalFailWrapper, offset, extra...) +} + +// Eventually wraps an actual value allowing assertions to be made on it. +// The assertion is tried periodically until it passes or a timeout occurs. +// +// Both the timeout and polling interval are configurable as optional arguments: +// The first optional argument is the timeout +// The second optional argument is the polling interval +// +// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +// last case they are interpreted as seconds. +// +// If Eventually is passed an actual that is a function taking no arguments and returning at least one value, +// then Eventually will call the function periodically and try the matcher against the function's first return value. +// +// Example: +// +// Eventually(func() int { +// return thingImPolling.Count() +// }).Should(BeNumerically(">=", 17)) +// +// Note that this example could be rewritten: +// +// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) +// +// If the function returns more than one value, then Eventually will pass the first value to the matcher and +// assert that all other values are nil/zero. +// This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. +// +// For example, consider a method that returns a value and an error: +// func FetchFromDB() (string, error) +// +// Then +// Eventually(FetchFromDB).Should(Equal("hasselhoff")) +// +// Will pass only if the the returned error is nil and the returned string passes the matcher. +// +// Eventually's default timeout is 1 second, and its default polling interval is 10ms +func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { + return EventuallyWithOffset(0, actual, intervals...) +} + +// EventuallyWithOffset operates like Eventually but takes an additional +// initial argument to indicate an offset in the call stack. This is useful when building helper +// functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { + if globalFailWrapper == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) +} + +// Consistently wraps an actual value allowing assertions to be made on it. +// The assertion is tried periodically and is required to pass for a period of time. +// +// Both the total time and polling interval are configurable as optional arguments: +// The first optional argument is the duration that Consistently will run for +// The second optional argument is the polling interval +// +// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the +// last case they are interpreted as seconds. +// +// If Consistently is passed an actual that is a function taking no arguments and returning at least one value, +// then Consistently will call the function periodically and try the matcher against the function's first return value. +// +// If the function returns more than one value, then Consistently will pass the first value to the matcher and +// assert that all other values are nil/zero. +// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. +// +// Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. +// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: +// +// Consistently(channel).ShouldNot(Receive()) +// +// Consistently's default duration is 100ms, and its default polling interval is 10ms +func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { + return ConsistentlyWithOffset(0, actual, intervals...) +} + +// ConsistentlyWithOffset operates like Consistnetly but takes an additional +// initial argument to indicate an offset in the call stack. This is useful when building helper +// functions that contain matchers. To learn more, read about `ExpectWithOffset`. +func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { + if globalFailWrapper == nil { + panic(nilFailHandlerPanic) + } + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) +} + +// SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. +func SetDefaultEventuallyTimeout(t time.Duration) { + defaultEventuallyTimeout = t +} + +// SetDefaultEventuallyPollingInterval sets the default polling interval for Eventually. +func SetDefaultEventuallyPollingInterval(t time.Duration) { + defaultEventuallyPollingInterval = t +} + +// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satsified for this long. +func SetDefaultConsistentlyDuration(t time.Duration) { + defaultConsistentlyDuration = t +} + +// SetDefaultConsistentlyPollingInterval sets the default polling interval for Consistently. +func SetDefaultConsistentlyPollingInterval(t time.Duration) { + defaultConsistentlyPollingInterval = t +} + +// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against +// the matcher passed to the Should and ShouldNot methods. +// +// Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to +// fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more +// descriptive. +// +// Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. +// +// Example: +// +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") +type AsyncAssertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +// GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. +type GomegaAsyncAssertion = AsyncAssertion + +// Assertion is returned by Ω and Expect and compares the actual value to the matcher +// passed to the Should/ShouldNot and To/ToNot/NotTo methods. +// +// Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect +// though this is not enforced. +// +// All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() +// and is used to annotate failure messages. +// +// All methods return a bool that is true if hte assertion passed and false if it failed. +// +// Example: +// +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +type Assertion interface { + Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + + To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool + NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool +} + +// GomegaAssertion is deprecated in favor of Assertion, which does not stutter. +type GomegaAssertion = Assertion + +// OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it +type OmegaMatcher types.GomegaMatcher + +// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage +// Gomega's rich ecosystem of matchers in standard `testing` test suites. +// +// Use `NewWithT` to instantiate a `WithT` +type WithT struct { + t types.GomegaTestingT +} + +// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter. +type GomegaWithT = WithT + +// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with +// Gomega's rich ecosystem of matchers in standard `testing` test suits. +// +// func TestFarmHasCow(t *testing.T) { +// g := gomega.NewWithT(t) +// +// f := farm.New([]string{"Cow", "Horse"}) +// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +func NewWithT(t types.GomegaTestingT) *WithT { + return &WithT{ + t: t, + } +} + +// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter. +func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT { + return NewWithT(t) +} + +// Expect is used to make assertions. See documentation for Expect. +func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion { + return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...) +} + +// Eventually is used to make asynchronous assertions. See documentation for Eventually. +func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { + timeoutInterval := defaultEventuallyTimeout + pollingInterval := defaultEventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) +} + +// Consistently is used to make asynchronous assertions. See documentation for Consistently. +func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { + timeoutInterval := defaultConsistentlyDuration + pollingInterval := defaultConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) +} + +func toDuration(input interface{}) time.Duration { + duration, ok := input.(time.Duration) + if ok { + return duration + } + + value := reflect.ValueOf(input) + kind := reflect.TypeOf(input).Kind() + + if reflect.Int <= kind && kind <= reflect.Int64 { + return time.Duration(value.Int()) * time.Second + } else if reflect.Uint <= kind && kind <= reflect.Uint64 { + return time.Duration(value.Uint()) * time.Second + } else if reflect.Float32 <= kind && kind <= reflect.Float64 { + return time.Duration(value.Float() * float64(time.Second)) + } else if reflect.String == kind { + duration, err := time.ParseDuration(value.String()) + if err != nil { + panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + } + return duration + } + + panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go new file mode 100644 index 000000000..00197b67a --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go @@ -0,0 +1,105 @@ +package assertion + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/types" +) + +type Assertion struct { + actualInput interface{} + failWrapper *types.GomegaFailWrapper + offset int + extra []interface{} +} + +func New(actualInput interface{}, failWrapper *types.GomegaFailWrapper, offset int, extra ...interface{}) *Assertion { + return &Assertion{ + actualInput: actualInput, + failWrapper: failWrapper, + offset: offset, + extra: extra, + } +} + +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + matches, err := matcher.Match(assertion.actualInput) + description := assertion.buildDescription(optionalDescription...) + assertion.failWrapper.TWithHelper.Helper() + if err != nil { + assertion.failWrapper.Fail(description+err.Error(), 2+assertion.offset) + return false + } + if matches != desiredMatch { + var message string + if desiredMatch { + message = matcher.FailureMessage(assertion.actualInput) + } else { + message = matcher.NegatedFailureMessage(assertion.actualInput) + } + assertion.failWrapper.Fail(description+message, 2+assertion.offset) + return false + } + + return true +} + +func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool { + success, message := vetExtras(assertion.extra) + if success { + return true + } + + description := assertion.buildDescription(optionalDescription...) + assertion.failWrapper.TWithHelper.Helper() + assertion.failWrapper.Fail(description+message, 2+assertion.offset) + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go new file mode 100644 index 000000000..cdab233eb --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go @@ -0,0 +1,194 @@ +package asyncassertion + +import ( + "errors" + "fmt" + "reflect" + "time" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AsyncAssertionType uint + +const ( + AsyncAssertionTypeEventually AsyncAssertionType = iota + AsyncAssertionTypeConsistently +) + +type AsyncAssertion struct { + asyncType AsyncAssertionType + actualInput interface{} + timeoutInterval time.Duration + pollingInterval time.Duration + failWrapper *types.GomegaFailWrapper + offset int +} + +func New(asyncType AsyncAssertionType, actualInput interface{}, failWrapper *types.GomegaFailWrapper, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { + actualType := reflect.TypeOf(actualInput) + if actualType.Kind() == reflect.Func { + if actualType.NumIn() != 0 || actualType.NumOut() == 0 { + panic("Expected a function with no arguments and one or more return values.") + } + } + + return &AsyncAssertion{ + asyncType: asyncType, + actualInput: actualInput, + failWrapper: failWrapper, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + offset: offset, + } +} + +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.failWrapper.TWithHelper.Helper() + return assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + default: + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" + } +} + +func (assertion *AsyncAssertion) actualInputIsAFunction() bool { + actualType := reflect.TypeOf(assertion.actualInput) + return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0 +} + +func (assertion *AsyncAssertion) pollActual() (interface{}, error) { + if assertion.actualInputIsAFunction() { + values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{}) + + extras := []interface{}{} + for _, value := range values[1:] { + extras = append(extras, value.Interface()) + } + + success, message := vetExtras(extras) + + if !success { + return nil, errors.New(message) + } + + return values[0].Interface(), nil + } + + return assertion.actualInput, nil +} + +func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualInputIsAFunction() { + return true + } + + return oraclematcher.MatchMayChangeInTheFuture(matcher, value) +} + +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + timer := time.Now() + timeout := time.After(assertion.timeoutInterval) + + description := assertion.buildDescription(optionalDescription...) + + var matches bool + var err error + mayChange := true + value, err := assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + + assertion.failWrapper.TWithHelper.Helper() + + fail := func(preamble string) { + errMsg := "" + message := "" + if err != nil { + errMsg = "Error: " + err.Error() + } else { + if desiredMatch { + message = matcher.FailureMessage(value) + } else { + message = matcher.NegatedFailureMessage(value) + } + } + assertion.failWrapper.TWithHelper.Helper() + assertion.failWrapper.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + } + + if assertion.asyncType == AsyncAssertionTypeEventually { + for { + if err == nil && matches == desiredMatch { + return true + } + + if !mayChange { + fail("No future change is possible. Bailing out early") + return false + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + fail("Timed out") + return false + } + } + } else if assertion.asyncType == AsyncAssertionTypeConsistently { + for { + if !(err == nil && matches == desiredMatch) { + fail("Failed") + return false + } + + if !mayChange { + return true + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + return true + } + } + } + + return false +} + +func vetExtras(extras []interface{}) (bool, string) { + for i, extra := range extras { + if extra != nil { + zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() + if !reflect.DeepEqual(zeroValue, extra) { + message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go new file mode 100644 index 000000000..66cad88a1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go @@ -0,0 +1,25 @@ +package oraclematcher + +import "github.com/onsi/gomega/types" + +/* +GomegaMatchers that also match the OracleMatcher interface can convey information about +whether or not their result will change upon future attempts. + +This allows `Eventually` and `Consistently` to short circuit if success becomes impossible. + +For example, a process' exit code can never change. So, gexec's Exit matcher returns `true` +for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. +*/ +type OracleMatcher interface { + MatchMayChangeInTheFuture(actual interface{}) bool +} + +func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool { + oracleMatcher, ok := matcher.(OracleMatcher) + if !ok { + return true + } + + return oracleMatcher.MatchMayChangeInTheFuture(value) +} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go new file mode 100644 index 000000000..bb27032f6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go @@ -0,0 +1,60 @@ +package testingtsupport + +import ( + "regexp" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/types" +) + +var StackTracePruneRE = regexp.MustCompile(`\/gomega\/|\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + +type EmptyTWithHelper struct{} + +func (e EmptyTWithHelper) Helper() {} + +type gomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +func BuildTestingTGomegaFailWrapper(t gomegaTestingT) *types.GomegaFailWrapper { + tWithHelper, hasHelper := t.(types.TWithHelper) + if !hasHelper { + tWithHelper = EmptyTWithHelper{} + } + + fail := func(message string, callerSkip ...int) { + if hasHelper { + tWithHelper.Helper() + t.Fatalf("\n%s", message) + } else { + skip := 2 + if len(callerSkip) > 0 { + skip += callerSkip[0] + } + stackTrace := pruneStack(string(debug.Stack()), skip) + t.Fatalf("\n%s\n%s\n", stackTrace, message) + } + } + + return &types.GomegaFailWrapper{ + Fail: fail, + TWithHelper: tWithHelper, + } +} + +func pruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n")[1:] + if len(stack) > 2*skip { + stack = stack[2*skip:] + } + prunedStack := []string{} + for i := 0; i < len(stack)/2; i++ { + if !StackTracePruneRE.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go new file mode 100644 index 000000000..c3a326dd4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -0,0 +1,427 @@ +package gomega + +import ( + "time" + + "github.com/onsi/gomega/matchers" + "github.com/onsi/gomega/types" +) + +//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about +//types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func Equal(expected interface{}) types.GomegaMatcher { + return &matchers.EqualMatcher{ + Expected: expected, + } +} + +//BeEquivalentTo is more lax than Equal, allowing equality between different types. +//This is done by converting actual to have the type of expected before +//attempting equality with reflect.DeepEqual. +//It is an error for actual and expected to be nil. Use BeNil() instead. +func BeEquivalentTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeEquivalentToMatcher{ + Expected: expected, + } +} + +//BeIdenticalTo uses the == operator to compare actual with expected. +//BeIdenticalTo is strict about types when performing comparisons. +//It is an error for both actual and expected to be nil. Use BeNil() instead. +func BeIdenticalTo(expected interface{}) types.GomegaMatcher { + return &matchers.BeIdenticalToMatcher{ + Expected: expected, + } +} + +//BeNil succeeds if actual is nil +func BeNil() types.GomegaMatcher { + return &matchers.BeNilMatcher{} +} + +//BeTrue succeeds if actual is true +func BeTrue() types.GomegaMatcher { + return &matchers.BeTrueMatcher{} +} + +//BeFalse succeeds if actual is false +func BeFalse() types.GomegaMatcher { + return &matchers.BeFalseMatcher{} +} + +//HaveOccurred succeeds if actual is a non-nil error +//The typical Go error checking pattern looks like: +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) +func HaveOccurred() types.GomegaMatcher { + return &matchers.HaveOccurredMatcher{} +} + +//Succeed passes if actual is a nil error +//Succeed is intended to be used with functions that return a single error value. Instead of +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) +// +//You can write: +// Expect(SomethingThatMightFail()).Should(Succeed()) +// +//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect +//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. +//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. +func Succeed() types.GomegaMatcher { + return &matchers.SucceedMatcher{} +} + +//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// +//These are valid use-cases: +// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// +//It is an error for err to be nil or an object that does not implement the Error interface +func MatchError(expected interface{}) types.GomegaMatcher { + return &matchers.MatchErrorMatcher{ + Expected: expected, + } +} + +//BeClosed succeeds if actual is a closed channel. +//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil +// +//In order to check whether or not the channel is closed, Gomega must try to read from the channel +//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about +//values coming down the channel. +// +//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before +//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). +// +//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. +func BeClosed() types.GomegaMatcher { + return &matchers.BeClosedMatcher{} +} + +//Receive succeeds if there is a value to be received on actual. +//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// +//Receive returns immediately and never blocks: +// +//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. +// +//If you have a go-routine running in the background that will write to channel `c` you can: +// Eventually(c).Should(Receive()) +// +//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) +// +//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: +// Consistently(c).ShouldNot(Receive()) +// +//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: +// Expect(c).Should(Receive(Equal("foo"))) +// +//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. +// +//Passing Receive a matcher is especially useful when paired with Eventually: +// +// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// +//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// +//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing)) +// Expect(myThing.Sprocket).Should(Equal("foo")) +// Expect(myThing.IsValid()).Should(BeTrue()) +func Receive(args ...interface{}) types.GomegaMatcher { + var arg interface{} + if len(args) > 0 { + arg = args[0] + } + + return &matchers.ReceiveMatcher{ + Arg: arg, + } +} + +//BeSent succeeds if a value can be sent to actual. +//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. +//In addition, actual must not be closed. +// +//BeSent never blocks: +// +//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately +//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout +//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately +// +//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). +//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. +func BeSent(arg interface{}) types.GomegaMatcher { + return &matchers.BeSentMatcher{ + Arg: arg, + } +} + +//MatchRegexp succeeds if actual is a string or stringer that matches the +//passed-in regexp. Optional arguments can be provided to construct a regexp +//via fmt.Sprintf(). +func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { + return &matchers.MatchRegexpMatcher{ + Regexp: regexp, + Args: args, + } +} + +//ContainSubstring succeeds if actual is a string or stringer that contains the +//passed-in substring. Optional arguments can be provided to construct the substring +//via fmt.Sprintf(). +func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { + return &matchers.ContainSubstringMatcher{ + Substr: substr, + Args: args, + } +} + +//HavePrefix succeeds if actual is a string or stringer that contains the +//passed-in string as a prefix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HavePrefixMatcher{ + Prefix: prefix, + Args: args, + } +} + +//HaveSuffix succeeds if actual is a string or stringer that contains the +//passed-in string as a suffix. Optional arguments can be provided to construct +//via fmt.Sprintf(). +func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { + return &matchers.HaveSuffixMatcher{ + Suffix: suffix, + Args: args, + } +} + +//MatchJSON succeeds if actual is a string or stringer of JSON that matches +//the expected JSON. The JSONs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchJSON(json interface{}) types.GomegaMatcher { + return &matchers.MatchJSONMatcher{ + JSONToMatch: json, + } +} + +//MatchXML succeeds if actual is a string or stringer of XML that matches +//the expected XML. The XMLs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like whitespaces shouldn't matter. +func MatchXML(xml interface{}) types.GomegaMatcher { + return &matchers.MatchXMLMatcher{ + XMLToMatch: xml, + } +} + +//MatchYAML succeeds if actual is a string or stringer of YAML that matches +//the expected YAML. The YAML's are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +func MatchYAML(yaml interface{}) types.GomegaMatcher { + return &matchers.MatchYAMLMatcher{ + YAMLToMatch: yaml, + } +} + +//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. +func BeEmpty() types.GomegaMatcher { + return &matchers.BeEmptyMatcher{} +} + +//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. +func HaveLen(count int) types.GomegaMatcher { + return &matchers.HaveLenMatcher{ + Count: count, + } +} + +//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. +func HaveCap(count int) types.GomegaMatcher { + return &matchers.HaveCapMatcher{ + Count: count, + } +} + +//BeZero succeeds if actual is the zero value for its type or if actual is nil. +func BeZero() types.GomegaMatcher { + return &matchers.BeZeroMatcher{} +} + +//ContainElement succeeds if actual contains the passed in element. +//By default ContainElement() uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// +//Actual must be an array, slice or map. +//For maps, ContainElement searches through the map's values. +func ContainElement(element interface{}) types.GomegaMatcher { + return &matchers.ContainElementMatcher{ + Element: element, + } +} + +//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. +//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// +//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. +// +//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it +//is the only element passed in to ConsistOf: +// +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// +//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. +func ConsistOf(elements ...interface{}) types.GomegaMatcher { + return &matchers.ConsistOfMatcher{ + Elements: elements, + } +} + +//HaveKey succeeds if actual is a map with the passed in key. +//By default HaveKey uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) +func HaveKey(key interface{}) types.GomegaMatcher { + return &matchers.HaveKeyMatcher{ + Key: key, + } +} + +//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. +//By default HaveKeyWithValue uses Equal() to perform the match, however a +//matcher can be passed in instead: +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) +func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { + return &matchers.HaveKeyWithValueMatcher{ + Key: key, + Value: value, + } +} + +//BeNumerically performs numerical assertions in a type-agnostic way. +//Actual and expected should be numbers, though the specific type of +//number is irrelevant (float32, float64, uint8, etc...). +// +//There are six, self-explanatory, supported comparators: +// Expect(1.0).Should(BeNumerically("==", 1)) +// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01)) +// Expect(1.0).Should(BeNumerically(">", 0.9)) +// Expect(1.0).Should(BeNumerically(">=", 1.0)) +// Expect(1.0).Should(BeNumerically("<", 3)) +// Expect(1.0).Should(BeNumerically("<=", 1.0)) +func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { + return &matchers.BeNumericallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + } +} + +//BeTemporally compares time.Time's like BeNumerically +//Actual and expected must be time.Time. The comparators are the same as for BeNumerically +// Expect(time.Now()).Should(BeTemporally(">", time.Time{})) +// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) +func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { + return &matchers.BeTemporallyMatcher{ + Comparator: comparator, + CompareTo: compareTo, + Threshold: threshold, + } +} + +//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. +//It will return an error when one of the values is nil. +// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values +// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type +// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type +// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) +func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { + return &matchers.AssignableToTypeOfMatcher{ + Expected: expected, + } +} + +//Panic succeeds if actual is a function that, when invoked, panics. +//Actual must be a function that takes no arguments and returns no results. +func Panic() types.GomegaMatcher { + return &matchers.PanicMatcher{} +} + +//BeAnExistingFile succeeds if a file exists. +//Actual must be a string representing the abs path to the file being checked. +func BeAnExistingFile() types.GomegaMatcher { + return &matchers.BeAnExistingFileMatcher{} +} + +//BeARegularFile succeeds if a file exists and is a regular file. +//Actual must be a string representing the abs path to the file being checked. +func BeARegularFile() types.GomegaMatcher { + return &matchers.BeARegularFileMatcher{} +} + +//BeADirectory succeeds if a file exists and is a directory. +//Actual must be a string representing the abs path to the file being checked. +func BeADirectory() types.GomegaMatcher { + return &matchers.BeADirectoryMatcher{} +} + +//And succeeds only if all of the given matchers succeed. +//The matchers are tried in order, and will fail-fast if one doesn't succeed. +// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func And(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.AndMatcher{Matchers: ms} +} + +//SatisfyAll is an alias for And(). +// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) +func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return And(matchers...) +} + +//Or succeeds if any of the given matchers succeed. +//The matchers are tried in order and will return immediately upon the first successful match. +// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Or(ms ...types.GomegaMatcher) types.GomegaMatcher { + return &matchers.OrMatcher{Matchers: ms} +} + +//SatisfyAny is an alias for Or(). +// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) +func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher { + return Or(matchers...) +} + +//Not negates the given matcher; it succeeds if the given matcher fails. +// Expect(1).To(Not(Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func Not(matcher types.GomegaMatcher) types.GomegaMatcher { + return &matchers.NotMatcher{Matcher: matcher} +} + +//WithTransform applies the `transform` to the actual value and matches it against `matcher`. +//The given transform must be a function of one parameter that returns one value. +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) +// +//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { + return matchers.NewWithTransformMatcher(transform, matcher) +} diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go new file mode 100644 index 000000000..d83a29164 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -0,0 +1,63 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type AndMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstFailedMatcher types.GomegaMatcher +} + +func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { + m.firstFailedMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if !success || err != nil { + m.firstFailedMatcher = matcher + return false, err + } + } + return true, nil +} + +func (m *AndMatcher) FailureMessage(actual interface{}) (message string) { + return m.firstFailedMatcher.FailureMessage(actual) +} + +func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers)) +} + +func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: T, F, => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become T + + Match eval: T, T, T => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to F. + */ + + if m.firstFailedMatcher == nil { + // so all matchers succeeded.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } + // one of the matchers failed.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) +} diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go new file mode 100644 index 000000000..51f8be6ae --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go @@ -0,0 +1,35 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type AssignableToTypeOfMatcher struct { + Expected interface{} +} + +func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } else if matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare type to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } else if actual == nil { + return false, nil + } + + actualType := reflect.TypeOf(actual) + expectedType := reflect.TypeOf(matcher.Expected) + + return actualType.AssignableTo(expectedType), nil +} + +func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected)) +} + +func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected)) +} diff --git a/vendor/github.com/onsi/gomega/matchers/attributes_slice.go b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go new file mode 100644 index 000000000..355b362f4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go @@ -0,0 +1,14 @@ +package matchers + +import ( + "encoding/xml" + "strings" +) + +type attributesSlice []xml.Attr + +func (attrs attributesSlice) Len() int { return len(attrs) } +func (attrs attributesSlice) Less(i, j int) bool { + return strings.Compare(attrs[i].Name.Local, attrs[j].Name.Local) == -1 +} +func (attrs attributesSlice) Swap(i, j int) { attrs[i], attrs[j] = attrs[j], attrs[i] } diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go new file mode 100644 index 000000000..7b6975e41 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notADirectoryError struct { + os.FileInfo +} + +func (t notADirectoryError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.Mode().IsRegular(): + return "file is a regular file" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeADirectoryMatcher struct { + expected interface{} + err error +} + +func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsDir() { + matcher.err = notADirectoryError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err)) +} + +func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a directory")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go new file mode 100644 index 000000000..e239131fb --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type notARegularFileError struct { + os.FileInfo +} + +func (t notARegularFileError) Error() string { + fileInfo := os.FileInfo(t) + switch { + case fileInfo.IsDir(): + return "file is a directory" + default: + return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String()) + } +} + +type BeARegularFileMatcher struct { + expected interface{} + err error +} + +func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path") + } + + fileInfo, err := os.Stat(actualFilename) + if err != nil { + matcher.err = err + return false, nil + } + + if !fileInfo.Mode().IsRegular() { + matcher.err = notARegularFileError{fileInfo} + return false, nil + } + return true, nil +} + +func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err)) +} + +func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not be a regular file")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go new file mode 100644 index 000000000..d42eba223 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -0,0 +1,38 @@ +package matchers + +import ( + "fmt" + "os" + + "github.com/onsi/gomega/format" +) + +type BeAnExistingFileMatcher struct { + expected interface{} +} + +func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) { + actualFilename, ok := actual.(string) + if !ok { + return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path") + } + + if _, err = os.Stat(actualFilename); err != nil { + switch { + case os.IsNotExist(err): + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to exist")) +} + +func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to exist")) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go new file mode 100644 index 000000000..80c9c8bb1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -0,0 +1,46 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeClosedMatcher struct { +} + +func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1)) + } + + winnerIndex, _, open := reflect.Select([]reflect.SelectCase{ + {Dir: reflect.SelectRecv, Chan: channelValue}, + {Dir: reflect.SelectDefault}, + }) + + var closed bool + if winnerIndex == 0 { + closed = !open + } else if winnerIndex == 1 { + closed = false + } + + return closed, nil +} + +func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be closed") +} + +func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be open") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go new file mode 100644 index 000000000..8b00311b0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -0,0 +1,27 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type BeEmptyMatcher struct { +} + +func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == 0, nil +} + +func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be empty") +} + +func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be empty") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go new file mode 100644 index 000000000..97ab20a4e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go @@ -0,0 +1,34 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeEquivalentToMatcher struct { + Expected interface{} +} + +func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Both actual and expected must not be nil.") + } + + convertedActual := actual + + if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) { + convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface() + } + + return reflect.DeepEqual(convertedActual, matcher.Expected), nil +} + +func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be equivalent to", matcher.Expected) +} + +func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be equivalent to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go new file mode 100644 index 000000000..91d3b779e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -0,0 +1,26 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type BeFalseMatcher struct { +} + +func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual == false, nil +} + +func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be false") +} + +func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be false") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go new file mode 100644 index 000000000..fdcda4d1f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go @@ -0,0 +1,37 @@ +package matchers + +import ( + "fmt" + "runtime" + + "github.com/onsi/gomega/format" +) + +type BeIdenticalToMatcher struct { + Expected interface{} +} + +func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + success = false + matchErr = nil + } + } + }() + + return actual == matcher.Expected, nil +} + +func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string { + return format.Message(actual, "to be identical to", matcher.Expected) +} + +func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string { + return format.Message(actual, "not to be identical to", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go new file mode 100644 index 000000000..7ee84fe1b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go @@ -0,0 +1,18 @@ +package matchers + +import "github.com/onsi/gomega/format" + +type BeNilMatcher struct { +} + +func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) { + return isNil(actual), nil +} + +func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be nil") +} + +func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be nil") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go new file mode 100644 index 000000000..9f4f77eec --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -0,0 +1,132 @@ +package matchers + +import ( + "fmt" + "math" + + "github.com/onsi/gomega/format" +) + +type BeNumericallyMatcher struct { + Comparator string + CompareTo []interface{} +} + +func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { + return matcher.FormatFailureMessage(actual, false) +} + +func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return matcher.FormatFailureMessage(actual, true) +} + +func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, negated bool) (message string) { + if len(matcher.CompareTo) == 1 { + message = fmt.Sprintf("to be %s", matcher.Comparator) + } else { + message = fmt.Sprintf("to be within %v of %s", matcher.CompareTo[1], matcher.Comparator) + } + if negated { + message = "not " + message + } + return format.Message(actual, message, matcher.CompareTo[0]) +} + +func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { + if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { + return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) + } + if !isNumber(actual) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1)) + } + if !isNumber(matcher.CompareTo[0]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + if isFloat(actual) || isFloat(matcher.CompareTo[0]) { + var secondOperand float64 = 1e-8 + if len(matcher.CompareTo) == 2 { + secondOperand = toFloat(matcher.CompareTo[1]) + } + success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand) + } else if isInteger(actual) { + var secondOperand int64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toInteger(matcher.CompareTo[1]) + } + success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand) + } else if isUnsignedInteger(actual) { + var secondOperand uint64 = 0 + if len(matcher.CompareTo) == 2 { + secondOperand = toUnsignedInteger(matcher.CompareTo[1]) + } + success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand) + } else { + return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1)) + } + + return success, nil +} + +func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) { + switch matcher.Comparator { + case "==", "~": + diff := actual - compareTo + return -threshold <= diff && diff <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) { + switch matcher.Comparator { + case "==", "~": + if actual < compareTo { + actual, compareTo = compareTo, actual + } + return actual-compareTo <= threshold + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} + +func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) { + switch matcher.Comparator { + case "~": + return math.Abs(actual-compareTo) <= threshold + case "==": + return (actual == compareTo) + case ">": + return (actual > compareTo) + case ">=": + return (actual >= compareTo) + case "<": + return (actual < compareTo) + case "<=": + return (actual <= compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go new file mode 100644 index 000000000..302dd1a0a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -0,0 +1,71 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeSentMatcher struct { + Arg interface{} + channelClosed bool +} + +func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.RecvDir { + return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1)) + } + + argType := reflect.TypeOf(matcher.Arg) + assignable := argType.AssignableTo(channelType.Elem()) + + if !assignable { + return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1)) + } + + argValue := reflect.ValueOf(matcher.Arg) + + defer func() { + if e := recover(); e != nil { + success = false + err = fmt.Errorf("Cannot send to a closed channel") + matcher.channelClosed = true + } + }() + + winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{ + {Dir: reflect.SelectSend, Chan: channelValue, Send: argValue}, + {Dir: reflect.SelectDefault}, + }) + + var didSend bool + if winnerIndex == 0 { + didSend = true + } + + return didSend, nil +} + +func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to send:", matcher.Arg) +} + +func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go new file mode 100644 index 000000000..cb7c038ef --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go @@ -0,0 +1,66 @@ +package matchers + +import ( + "fmt" + "time" + + "github.com/onsi/gomega/format" +) + +type BeTemporallyMatcher struct { + Comparator string + CompareTo time.Time + Threshold []time.Duration +} + +func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo) +} + +func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) { + // predicate to test for time.Time type + isTime := func(t interface{}) bool { + _, ok := t.(time.Time) + return ok + } + + if !isTime(actual) { + return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1)) + } + + switch matcher.Comparator { + case "==", "~", ">", ">=", "<", "<=": + default: + return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) + } + + var threshold = time.Millisecond + if len(matcher.Threshold) == 1 { + threshold = matcher.Threshold[0] + } + + return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil +} + +func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) { + switch matcher.Comparator { + case "==": + return actual.Equal(compareTo) + case "~": + diff := actual.Sub(compareTo) + return -threshold <= diff && diff <= threshold + case ">": + return actual.After(compareTo) + case ">=": + return !actual.Before(compareTo) + case "<": + return actual.Before(compareTo) + case "<=": + return !actual.After(compareTo) + } + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go new file mode 100644 index 000000000..ec57c5db4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -0,0 +1,26 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type BeTrueMatcher struct { +} + +func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { + if !isBool(actual) { + return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) + } + + return actual.(bool), nil +} + +func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be true") +} + +func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be true") +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go new file mode 100644 index 000000000..26196f168 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeZeroMatcher struct { +} + +func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return true, nil + } + zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() + + return reflect.DeepEqual(zeroValue, actual), nil + +} + +func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be zero-valued") +} + +func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be zero-valued") +} diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go new file mode 100644 index 000000000..7b0e08868 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -0,0 +1,80 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" +) + +type ConsistOfMatcher struct { + Elements []interface{} +} + +func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elements := matcher.Elements + if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) { + elements = []interface{}{} + value := reflect.ValueOf(matcher.Elements[0]) + for i := 0; i < value.Len(); i++ { + elements = append(elements, value.Index(i).Interface()) + } + } + + matchers := []interface{}{} + for _, element := range elements { + matcher, isMatcher := element.(omegaMatcher) + if !isMatcher { + matcher = &EqualMatcher{Expected: element} + } + matchers = append(matchers, matcher) + } + + values := matcher.valuesOf(actual) + + if len(values) != len(matchers) { + return false, nil + } + + neighbours := func(v, m interface{}) (bool, error) { + match, err := m.(omegaMatcher).Match(v) + return match && err == nil, nil + } + + bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours) + if err != nil { + return false, err + } + + return len(bipartiteGraph.LargestMatching()) == len(values), nil +} + +func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} { + value := reflect.ValueOf(actual) + values := []interface{}{} + if isMap(actual) { + keys := value.MapKeys() + for i := 0; i < value.Len(); i++ { + values = append(values, value.MapIndex(keys[i]).Interface()) + } + } else { + for i := 0; i < value.Len(); i++ { + values = append(values, value.Index(i).Interface()) + } + } + + return values +} + +func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to consist of", matcher.Elements) +} + +func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to consist of", matcher.Elements) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go new file mode 100644 index 000000000..4159335d0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -0,0 +1,56 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ContainElementMatcher struct { + Element interface{} +} + +func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + } + + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) + if !elementIsMatcher { + elemMatcher = &EqualMatcher{Expected: matcher.Element} + } + + value := reflect.ValueOf(actual) + var keys []reflect.Value + if isMap(actual) { + keys = value.MapKeys() + } + var lastError error + for i := 0; i < value.Len(); i++ { + var success bool + var err error + if isMap(actual) { + success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface()) + } else { + success, err = elemMatcher.Match(value.Index(i).Interface()) + } + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain element matching", matcher.Element) +} + +func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain element matching", matcher.Element) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go new file mode 100644 index 000000000..f8dc41e74 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go @@ -0,0 +1,38 @@ +package matchers + +import ( + "fmt" + "strings" + + "github.com/onsi/gomega/format" +) + +type ContainSubstringMatcher struct { + Substr string + Args []interface{} +} + +func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + + return strings.Contains(actualString, matcher.stringToMatch()), nil +} + +func (matcher *ContainSubstringMatcher) stringToMatch() string { + stringToMatch := matcher.Substr + if len(matcher.Args) > 0 { + stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...) + } + return stringToMatch +} + +func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain substring", matcher.stringToMatch()) +} + +func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain substring", matcher.stringToMatch()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go new file mode 100644 index 000000000..befb7bdfd --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go @@ -0,0 +1,42 @@ +package matchers + +import ( + "bytes" + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type EqualMatcher struct { + Expected interface{} +} + +func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + // Shortcut for byte slices. + // Comparing long byte slices with reflect.DeepEqual is very slow, + // so use bytes.Equal if actual and expected are both byte slices. + if actualByteSlice, ok := actual.([]byte); ok { + if expectedByteSlice, ok := matcher.Expected.([]byte); ok { + return bytes.Equal(actualByteSlice, expectedByteSlice), nil + } + } + return reflect.DeepEqual(actual, matcher.Expected), nil +} + +func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { + actualString, actualOK := actual.(string) + expectedString, expectedOK := matcher.Expected.(string) + if actualOK && expectedOK { + return format.MessageWithDiff(actualString, "to equal", expectedString) + } + + return format.Message(actual, "to equal", matcher.Expected) +} + +func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to equal", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go new file mode 100644 index 000000000..7ace93dc3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveCapMatcher struct { + Count int +} + +func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := capOf(actual) + if !ok { + return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go new file mode 100644 index 000000000..ea5b92336 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type HaveKeyMatcher struct { + Key interface{} +} + +func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + return true, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "to have key matching", matcher.Key) + default: + return format.Message(actual, "to have key", matcher.Key) + } +} + +func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + switch matcher.Key.(type) { + case omegaMatcher: + return format.Message(actual, "not to have key matching", matcher.Key) + default: + return format.Message(actual, "not to have key", matcher.Key) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go new file mode 100644 index 000000000..06355b1e9 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -0,0 +1,74 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type HaveKeyWithValueMatcher struct { + Key interface{} + Value interface{} +} + +func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(actual) { + return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) + } + + keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) + if !keyIsMatcher { + keyMatcher = &EqualMatcher{Expected: matcher.Key} + } + + valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher) + if !valueIsMatcher { + valueMatcher = &EqualMatcher{Expected: matcher.Value} + } + + keys := reflect.ValueOf(actual).MapKeys() + for i := 0; i < len(keys); i++ { + success, err := keyMatcher.Match(keys[i].Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error()) + } + if success { + actualValue := reflect.ValueOf(actual).MapIndex(keys[i]) + success, err := valueMatcher.Match(actualValue.Interface()) + if err != nil { + return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) + } + return success, nil + } + } + + return false, nil +} + +func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) { + str := "to have {key: value}" + if _, ok := matcher.Key.(omegaMatcher); ok { + str += " matching" + } else if _, ok := matcher.Value.(omegaMatcher); ok { + str += " matching" + } + + expect := make(map[interface{}]interface{}, 1) + expect[matcher.Key] = matcher.Value + return format.Message(actual, str, expect) +} + +func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + kStr := "not to have key" + if _, ok := matcher.Key.(omegaMatcher); ok { + kStr = "not to have key matching" + } + + vStr := "or that key's value not be" + if _, ok := matcher.Value.(omegaMatcher); ok { + vStr = "or to have that key's value not matching" + } + + return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go new file mode 100644 index 000000000..ee4276189 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -0,0 +1,28 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveLenMatcher struct { + Count int +} + +func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { + length, ok := lengthOf(actual) + if !ok { + return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + } + + return length == matcher.Count, nil +} + +func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count) +} + +func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go new file mode 100644 index 000000000..bef00ae21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveOccurredMatcher struct { +} + +func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return false, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be non-nil (or a pointer to a non-nil) + return !isNil(actual), nil +} + +func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1)) +} + +func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred") +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go new file mode 100644 index 000000000..1d8e80270 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go @@ -0,0 +1,36 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HavePrefixMatcher struct { + Prefix string + Args []interface{} +} + +func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + prefix := matcher.prefix() + return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil +} + +func (matcher *HavePrefixMatcher) prefix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Prefix, matcher.Args...) + } + return matcher.Prefix +} + +func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have prefix", matcher.prefix()) +} + +func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have prefix", matcher.prefix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go new file mode 100644 index 000000000..40a3526eb --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go @@ -0,0 +1,36 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveSuffixMatcher struct { + Suffix string + Args []interface{} +} + +func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) + } + suffix := matcher.suffix() + return len(actualString) >= len(suffix) && actualString[len(actualString)-len(suffix):] == suffix, nil +} + +func (matcher *HaveSuffixMatcher) suffix() string { + if len(matcher.Args) > 0 { + return fmt.Sprintf(matcher.Suffix, matcher.Args...) + } + return matcher.Suffix +} + +func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to have suffix", matcher.suffix()) +} + +func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to have suffix", matcher.suffix()) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go new file mode 100644 index 000000000..07499ac95 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -0,0 +1,51 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type MatchErrorMatcher struct { + Expected interface{} +} + +func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) { + if isNil(actual) { + return false, fmt.Errorf("Expected an error, got nil") + } + + if !isError(actual) { + return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) + } + + actualErr := actual.(error) + + if isError(matcher.Expected) { + return reflect.DeepEqual(actualErr, matcher.Expected), nil + } + + if isString(matcher.Expected) { + return actualErr.Error() == matcher.Expected, nil + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + if matcher.Expected != nil { + subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher) + if hasSubMatcher { + return subMatcher.Match(actualErr.Error()) + } + } + + return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings. Got:\n%s", format.Object(matcher.Expected, 1)) +} + +func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match error", matcher.Expected) +} + +func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match error", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go new file mode 100644 index 000000000..f962f139f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go @@ -0,0 +1,65 @@ +package matchers + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/onsi/gomega/format" +) + +type MatchJSONMatcher struct { + JSONToMatch interface{} + firstFailurePath []interface{} +} + +func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.prettyPrint(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + // this is guarded by prettyPrint + json.Unmarshal([]byte(actualString), &aval) + json.Unmarshal([]byte(expectedString), &eval) + var equal bool + equal, matcher.firstFailurePath = deepEqual(aval, eval) + return equal, nil +} + +func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.prettyPrint(actual) + return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.JSONToMatch) + if !ok { + return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.JSONToMatch, 1)) + } + + abuf := new(bytes.Buffer) + ebuf := new(bytes.Buffer) + + if err := json.Indent(abuf, []byte(actualString), "", " "); err != nil { + return "", "", fmt.Errorf("Actual '%s' should be valid JSON, but it is not.\nUnderlying error:%s", actualString, err) + } + + if err := json.Indent(ebuf, []byte(expectedString), "", " "); err != nil { + return "", "", fmt.Errorf("Expected '%s' should be valid JSON, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return abuf.String(), ebuf.String(), nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go new file mode 100644 index 000000000..adac5db6b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go @@ -0,0 +1,43 @@ +package matchers + +import ( + "fmt" + "regexp" + + "github.com/onsi/gomega/format" +) + +type MatchRegexpMatcher struct { + Regexp string + Args []interface{} +} + +func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) { + actualString, ok := toString(actual) + if !ok { + return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1)) + } + + match, err := regexp.Match(matcher.regexp(), []byte(actualString)) + if err != nil { + return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error()) + } + + return match, nil +} + +func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match regular expression", matcher.regexp()) +} + +func (matcher *MatchRegexpMatcher) regexp() string { + re := matcher.Regexp + if len(matcher.Args) > 0 { + re = fmt.Sprintf(matcher.Regexp, matcher.Args...) + } + return re +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go new file mode 100644 index 000000000..5c815f5af --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go @@ -0,0 +1,134 @@ +package matchers + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + + "github.com/onsi/gomega/format" + "golang.org/x/net/html/charset" +) + +type MatchXMLMatcher struct { + XMLToMatch interface{} +} + +func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.formattedPrint(actual) + if err != nil { + return false, err + } + + aval, err := parseXmlContent(actualString) + if err != nil { + return false, fmt.Errorf("Actual '%s' should be valid XML, but it is not.\nUnderlying error:%s", actualString, err) + } + + eval, err := parseXmlContent(expectedString) + if err != nil { + return false, fmt.Errorf("Expected '%s' should be valid XML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) { + var ok bool + actualString, ok = toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok = toString(matcher.XMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.XMLToMatch, 1)) + } + return actualString, expectedString, nil +} + +func parseXmlContent(content string) (*xmlNode, error) { + allNodes := []*xmlNode{} + + dec := newXmlDecoder(strings.NewReader(content)) + for { + tok, err := dec.Token() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("failed to decode next token: %v", err) // untested section + } + + lastNodeIndex := len(allNodes) - 1 + var lastNode *xmlNode + if len(allNodes) > 0 { + lastNode = allNodes[lastNodeIndex] + } else { + lastNode = &xmlNode{} + } + + switch tok := tok.(type) { + case xml.StartElement: + attrs := attributesSlice(tok.Attr) + sort.Sort(attrs) + allNodes = append(allNodes, &xmlNode{XMLName: tok.Name, XMLAttr: tok.Attr}) + case xml.EndElement: + if len(allNodes) > 1 { + allNodes[lastNodeIndex-1].Nodes = append(allNodes[lastNodeIndex-1].Nodes, lastNode) + allNodes = allNodes[:lastNodeIndex] + } + case xml.CharData: + lastNode.Content = append(lastNode.Content, tok.Copy()...) + case xml.Comment: + lastNode.Comments = append(lastNode.Comments, tok.Copy()) // untested section + case xml.ProcInst: + lastNode.ProcInsts = append(lastNode.ProcInsts, tok.Copy()) + } + } + + if len(allNodes) == 0 { + return nil, errors.New("found no nodes") + } + firstNode := allNodes[0] + trimParentNodesContentSpaces(firstNode) + + return firstNode, nil +} + +func newXmlDecoder(reader io.Reader) *xml.Decoder { + dec := xml.NewDecoder(reader) + dec.CharsetReader = charset.NewReaderLabel + return dec +} + +func trimParentNodesContentSpaces(node *xmlNode) { + if len(node.Nodes) > 0 { + node.Content = bytes.TrimSpace(node.Content) + for _, childNode := range node.Nodes { + trimParentNodesContentSpaces(childNode) + } + } +} + +type xmlNode struct { + XMLName xml.Name + Comments []xml.Comment + ProcInsts []xml.ProcInst + XMLAttr []xml.Attr + Content []byte + Nodes []*xmlNode +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go new file mode 100644 index 000000000..0c83c2b63 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -0,0 +1,76 @@ +package matchers + +import ( + "fmt" + "strings" + + "github.com/onsi/gomega/format" + "gopkg.in/yaml.v2" +) + +type MatchYAMLMatcher struct { + YAMLToMatch interface{} + firstFailurePath []interface{} +} + +func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + if err != nil { + return false, err + } + + var aval interface{} + var eval interface{} + + if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil { + return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err) + } + if err := yaml.Unmarshal([]byte(expectedString), &eval); err != nil { + return false, fmt.Errorf("Expected '%s' should be valid YAML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + var equal bool + equal, matcher.firstFailurePath = deepEqual(aval, eval) + return equal, nil +} + +func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.toNormalisedStrings(actual) + return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath) +} + +func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, expectedString, err := matcher.toStrings(actual) + return normalise(actualString), normalise(expectedString), err +} + +func normalise(input string) string { + var val interface{} + err := yaml.Unmarshal([]byte(input), &val) + if err != nil { + panic(err) // unreachable since Match already calls Unmarshal + } + output, err := yaml.Marshal(val) + if err != nil { + panic(err) // untested section, unreachable since we Unmarshal above + } + return strings.TrimSpace(string(output)) +} + +func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { + actualString, ok := toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok := toString(matcher.YAMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.YAMLToMatch, 1)) + } + + return actualString, expectedString, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go new file mode 100644 index 000000000..2c91670bd --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -0,0 +1,30 @@ +package matchers + +import ( + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type NotMatcher struct { + Matcher types.GomegaMatcher +} + +func (m *NotMatcher) Match(actual interface{}) (bool, error) { + success, err := m.Matcher.Match(actual) + if err != nil { + return false, err + } + return !success, nil +} + +func (m *NotMatcher) FailureMessage(actual interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.Matcher.FailureMessage(actual) // works beautifully +} + +func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value +} diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go new file mode 100644 index 000000000..3bf799800 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -0,0 +1,67 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type OrMatcher struct { + Matchers []types.GomegaMatcher + + // state + firstSuccessfulMatcher types.GomegaMatcher +} + +func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { + m.firstSuccessfulMatcher = nil + for _, matcher := range m.Matchers { + success, err := matcher.Match(actual) + if err != nil { + return false, err + } + if success { + m.firstSuccessfulMatcher = matcher + return true, nil + } + } + return false, nil +} + +func (m *OrMatcher) FailureMessage(actual interface{}) (message string) { + // not the most beautiful list of matchers, but not bad either... + return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers)) +} + +func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.firstSuccessfulMatcher.NegatedFailureMessage(actual) +} + +func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + /* + Example with 3 matchers: A, B, C + + Match evaluates them: F, T, => T + So match is currently T, what should MatchMayChangeInTheFuture() return? + Seems like it only depends on B, since currently B MUST change to allow the result to become F + + Match eval: F, F, F => F + So match is currently F, what should MatchMayChangeInTheFuture() return? + Seems to depend on ANY of them being able to change to T. + */ + + if m.firstSuccessfulMatcher != nil { + // one of the matchers succeeded.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) + } else { + // so all matchers failed.. Any one of them changing would change the result. + for _, matcher := range m.Matchers { + if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + return true + } + } + return false // none of were going to change + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go new file mode 100644 index 000000000..640f4db1a --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -0,0 +1,46 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type PanicMatcher struct { + object interface{} +} + +func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return false, fmt.Errorf("PanicMatcher expects a non-nil actual.") + } + + actualType := reflect.TypeOf(actual) + if actualType.Kind() != reflect.Func { + return false, fmt.Errorf("PanicMatcher expects a function. Got:\n%s", format.Object(actual, 1)) + } + if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) { + return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value. Got:\n%s", format.Object(actual, 1)) + } + + success = false + defer func() { + if e := recover(); e != nil { + matcher.object = e + success = true + } + }() + + reflect.ValueOf(actual).Call([]reflect.Value{}) + + return +} + +func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to panic") +} + +func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1))) +} diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go new file mode 100644 index 000000000..2018a6128 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -0,0 +1,128 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type ReceiveMatcher struct { + Arg interface{} + receivedValue reflect.Value + channelClosed bool +} + +func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) { + if !isChan(actual) { + return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1)) + } + + channelType := reflect.TypeOf(actual) + channelValue := reflect.ValueOf(actual) + + if channelType.ChanDir() == reflect.SendDir { + return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel. Got:\n%s", format.Object(actual, 1)) + } + + var subMatcher omegaMatcher + var hasSubMatcher bool + + if matcher.Arg != nil { + subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + if !hasSubMatcher { + argType := reflect.TypeOf(matcher.Arg) + if argType.Kind() != reflect.Ptr { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) + } + } + } + + winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ + {Dir: reflect.SelectRecv, Chan: channelValue}, + {Dir: reflect.SelectDefault}, + }) + + var closed bool + var didReceive bool + if winnerIndex == 0 { + closed = !open + didReceive = open + } + matcher.channelClosed = closed + + if closed { + return false, nil + } + + if hasSubMatcher { + if didReceive { + matcher.receivedValue = value + return subMatcher.Match(matcher.receivedValue.Interface()) + } + return false, nil + } + + if didReceive { + if matcher.Arg != nil { + outValue := reflect.ValueOf(matcher.Arg) + + if value.Type().AssignableTo(outValue.Elem().Type()) { + outValue.Elem().Set(value) + return true, nil + } + if value.Type().Kind() == reflect.Interface && value.Elem().Type().AssignableTo(outValue.Elem().Type()) { + outValue.Elem().Set(value.Elem()) + return true, nil + } else { + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1)) + } + + } + + return true, nil + } + return false, nil +} + +func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.FailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } + return format.Message(actual, "to receive something."+closedAddendum) +} + +func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { + subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + + closedAddendum := "" + if matcher.channelClosed { + closedAddendum = " The channel is closed." + } + + if hasSubMatcher { + if matcher.receivedValue.IsValid() { + return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface()) + } + return "When passed a matcher, ReceiveMatcher's channel *must* receive something." + } + return format.Message(actual, "not to receive anything."+closedAddendum) +} + +func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { + if !isChan(actual) { + return false + } + + return !matcher.channelClosed +} diff --git a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go new file mode 100644 index 000000000..639295684 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go @@ -0,0 +1,92 @@ +package matchers + +import ( + "fmt" + "reflect" + "strings" +) + +func formattedMessage(comparisonMessage string, failurePath []interface{}) string { + var diffMessage string + if len(failurePath) == 0 { + diffMessage = "" + } else { + diffMessage = fmt.Sprintf("\n\nfirst mismatched key: %s", formattedFailurePath(failurePath)) + } + return fmt.Sprintf("%s%s", comparisonMessage, diffMessage) +} + +func formattedFailurePath(failurePath []interface{}) string { + formattedPaths := []string{} + for i := len(failurePath) - 1; i >= 0; i-- { + switch p := failurePath[i].(type) { + case int: + formattedPaths = append(formattedPaths, fmt.Sprintf(`[%d]`, p)) + default: + if i != len(failurePath)-1 { + formattedPaths = append(formattedPaths, ".") + } + formattedPaths = append(formattedPaths, fmt.Sprintf(`"%s"`, p)) + } + } + return strings.Join(formattedPaths, "") +} + +func deepEqual(a interface{}, b interface{}) (bool, []interface{}) { + var errorPath []interface{} + if reflect.TypeOf(a) != reflect.TypeOf(b) { + return false, errorPath + } + + switch a.(type) { + case []interface{}: + if len(a.([]interface{})) != len(b.([]interface{})) { + return false, errorPath + } + + for i, v := range a.([]interface{}) { + elementEqual, keyPath := deepEqual(v, b.([]interface{})[i]) + if !elementEqual { + return false, append(keyPath, i) + } + } + return true, errorPath + + case map[interface{}]interface{}: + if len(a.(map[interface{}]interface{})) != len(b.(map[interface{}]interface{})) { + return false, errorPath + } + + for k, v1 := range a.(map[interface{}]interface{}) { + v2, ok := b.(map[interface{}]interface{})[k] + if !ok { + return false, errorPath + } + elementEqual, keyPath := deepEqual(v1, v2) + if !elementEqual { + return false, append(keyPath, k) + } + } + return true, errorPath + + case map[string]interface{}: + if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) { + return false, errorPath + } + + for k, v1 := range a.(map[string]interface{}) { + v2, ok := b.(map[string]interface{})[k] + if !ok { + return false, errorPath + } + elementEqual, keyPath := deepEqual(v1, v2) + if !elementEqual { + return false, append(keyPath, k) + } + } + return true, errorPath + + default: + return a == b, errorPath + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go new file mode 100644 index 000000000..721ed5529 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -0,0 +1,33 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type SucceedMatcher struct { +} + +func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return true, nil + } + + // must be an 'error' type + if !isError(actual) { + return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1)) + } + + // must be nil (or a pointer to a nil) + return isNil(actual), nil +} + +func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) +} + +func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return "Expected failure, but got no error." +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go new file mode 100644 index 000000000..8aaf8759d --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -0,0 +1,41 @@ +package bipartitegraph + +import "errors" +import "fmt" + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" + +type BipartiteGraph struct { + Left NodeOrderedSet + Right NodeOrderedSet + Edges EdgeSet +} + +func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { + left := NodeOrderedSet{} + for i := range leftValues { + left = append(left, Node{Id: i}) + } + + right := NodeOrderedSet{} + for j := range rightValues { + right = append(right, Node{Id: j + len(left)}) + } + + edges := EdgeSet{} + for i, leftValue := range leftValues { + for j, rightValue := range rightValues { + neighbours, err := neighbours(leftValue, rightValue) + if err != nil { + return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())) + } + + if neighbours { + edges = append(edges, Edge{Node1: left[i], Node2: right[j]}) + } + } + } + + return &BipartiteGraph{left, right, edges}, nil +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go new file mode 100644 index 000000000..8181f43a4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -0,0 +1,159 @@ +package bipartitegraph + +import . "github.com/onsi/gomega/matchers/support/goraph/node" +import . "github.com/onsi/gomega/matchers/support/goraph/edge" +import "github.com/onsi/gomega/matchers/support/goraph/util" + +func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) { + paths := bg.maximalDisjointSLAPCollection(matching) + + for len(paths) > 0 { + for _, path := range paths { + matching = matching.SymmetricDifference(path) + } + paths = bg.maximalDisjointSLAPCollection(matching) + } + + return +} + +func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) { + guideLayers := bg.createSLAPGuideLayers(matching) + if len(guideLayers) == 0 { + return + } + + used := make(map[Node]bool) + + for _, u := range guideLayers[len(guideLayers)-1] { + slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used) + if found { + for _, edge := range slap { + used[edge.Node1] = true + used[edge.Node2] = true + } + result = append(result, slap) + } + } + + return +} + +func (bg *BipartiteGraph) findDisjointSLAP( + start Node, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) ([]Edge, bool) { + return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used) +} + +func (bg *BipartiteGraph) findDisjointSLAPHelper( + currentNode Node, + currentSLAP EdgeSet, + currentLevel int, + matching EdgeSet, + guideLayers []NodeOrderedSet, + used map[Node]bool, +) (EdgeSet, bool) { + used[currentNode] = true + + if currentLevel == 0 { + return currentSLAP, true + } + + for _, nextNode := range guideLayers[currentLevel-1] { + if used[nextNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(currentNode, nextNode) + if !found { + continue + } + + if matching.Contains(edge) == util.Odd(currentLevel) { + continue + } + + currentSLAP = append(currentSLAP, edge) + slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used) + if found { + return slap, true + } + currentSLAP = currentSLAP[:len(currentSLAP)-1] + } + + used[currentNode] = false + return nil, false +} + +func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) { + used := make(map[Node]bool) + currentLayer := NodeOrderedSet{} + + for _, node := range bg.Left { + if matching.Free(node) { + used[node] = true + currentLayer = append(currentLayer, node) + } + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } + guideLayers = append(guideLayers, currentLayer) + + done := false + + for !done { + lastLayer := currentLayer + currentLayer = NodeOrderedSet{} + + if util.Odd(len(guideLayers)) { + for _, leftNode := range lastLayer { + for _, rightNode := range bg.Right { + if used[rightNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, rightNode) + used[rightNode] = true + + if matching.Free(rightNode) { + done = true + } + } + } + } else { + for _, rightNode := range lastLayer { + for _, leftNode := range bg.Left { + if used[leftNode] { + continue + } + + edge, found := bg.Edges.FindByNodes(leftNode, rightNode) + if !found || !matching.Contains(edge) { + continue + } + + currentLayer = append(currentLayer, leftNode) + used[leftNode] = true + } + } + + } + + if len(currentLayer) == 0 { + return []NodeOrderedSet{} + } + guideLayers = append(guideLayers, currentLayer) + } + + return +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go new file mode 100644 index 000000000..4fd15cc06 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go @@ -0,0 +1,61 @@ +package edge + +import . "github.com/onsi/gomega/matchers/support/goraph/node" + +type Edge struct { + Node1 Node + Node2 Node +} + +type EdgeSet []Edge + +func (ec EdgeSet) Free(node Node) bool { + for _, e := range ec { + if e.Node1 == node || e.Node2 == node { + return false + } + } + + return true +} + +func (ec EdgeSet) Contains(edge Edge) bool { + for _, e := range ec { + if e == edge { + return true + } + } + + return false +} + +func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { + for _, e := range ec { + if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) { + return e, true + } + } + + return Edge{}, false +} + +func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet { + edgesToInclude := make(map[Edge]bool) + + for _, e := range ec { + edgesToInclude[e] = true + } + + for _, e := range ec2 { + edgesToInclude[e] = !edgesToInclude[e] + } + + result := EdgeSet{} + for e, include := range edgesToInclude { + if include { + result = append(result, e) + } + } + + return result +} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go new file mode 100644 index 000000000..800c2ea8c --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go @@ -0,0 +1,7 @@ +package node + +type Node struct { + Id int +} + +type NodeOrderedSet []Node diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go new file mode 100644 index 000000000..d76a1ee00 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/util/util.go @@ -0,0 +1,7 @@ +package util + +import "math" + +func Odd(n int) bool { + return math.Mod(float64(n), 2.0) == 1.0 +} diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go new file mode 100644 index 000000000..75afcd844 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -0,0 +1,179 @@ +/* +Gomega matchers + +This package implements the Gomega matchers and does not typically need to be imported. +See the docs for Gomega for documentation on the matchers + +http://onsi.github.io/gomega/ +*/ +package matchers + +import ( + "encoding/json" + "fmt" + "reflect" +) + +type omegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} + +func isBool(a interface{}) bool { + return reflect.TypeOf(a).Kind() == reflect.Bool +} + +func isNumber(a interface{}) bool { + if a == nil { + return false + } + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Float64 +} + +func isInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Int <= kind && kind <= reflect.Int64 +} + +func isUnsignedInteger(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Uint <= kind && kind <= reflect.Uint64 +} + +func isFloat(a interface{}) bool { + kind := reflect.TypeOf(a).Kind() + return reflect.Float32 <= kind && kind <= reflect.Float64 +} + +func toInteger(a interface{}) int64 { + if isInteger(a) { + return reflect.ValueOf(a).Int() + } else if isUnsignedInteger(a) { + return int64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return int64(reflect.ValueOf(a).Float()) + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func toUnsignedInteger(a interface{}) uint64 { + if isInteger(a) { + return uint64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return reflect.ValueOf(a).Uint() + } else if isFloat(a) { + return uint64(reflect.ValueOf(a).Float()) + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func toFloat(a interface{}) float64 { + if isInteger(a) { + return float64(reflect.ValueOf(a).Int()) + } else if isUnsignedInteger(a) { + return float64(reflect.ValueOf(a).Uint()) + } else if isFloat(a) { + return reflect.ValueOf(a).Float() + } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) +} + +func isError(a interface{}) bool { + _, ok := a.(error) + return ok +} + +func isChan(a interface{}) bool { + if isNil(a) { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Chan +} + +func isMap(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.Map +} + +func isArrayOrSlice(a interface{}) bool { + if a == nil { + return false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Slice: + return true + default: + return false + } +} + +func isString(a interface{}) bool { + if a == nil { + return false + } + return reflect.TypeOf(a).Kind() == reflect.String +} + +func toString(a interface{}) (string, bool) { + aString, isString := a.(string) + if isString { + return aString, true + } + + aBytes, isBytes := a.([]byte) + if isBytes { + return string(aBytes), true + } + + aStringer, isStringer := a.(fmt.Stringer) + if isStringer { + return aStringer.String(), true + } + + aJSONRawMessage, isJSONRawMessage := a.(json.RawMessage) + if isJSONRawMessage { + return string(aJSONRawMessage), true + } + + return "", false +} + +func lengthOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Len(), true + default: + return 0, false + } +} +func capOf(a interface{}) (int, bool) { + if a == nil { + return 0, false + } + switch reflect.TypeOf(a).Kind() { + case reflect.Array, reflect.Chan, reflect.Slice: + return reflect.ValueOf(a).Cap(), true + default: + return 0, false + } +} + +func isNil(a interface{}) bool { + if a == nil { + return true + } + + switch reflect.TypeOf(a).Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return reflect.ValueOf(a).IsNil() + } + + return false +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go new file mode 100644 index 000000000..8e58d8a0f --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -0,0 +1,72 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/internal/oraclematcher" + "github.com/onsi/gomega/types" +) + +type WithTransformMatcher struct { + // input + Transform interface{} // must be a function of one parameter that returns one value + Matcher types.GomegaMatcher + + // cached value + transformArgType reflect.Type + + // state + transformedValue interface{} +} + +func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { + if transform == nil { + panic("transform function cannot be nil") + } + txType := reflect.TypeOf(transform) + if txType.NumIn() != 1 { + panic("transform function must have 1 argument") + } + if txType.NumOut() != 1 { + panic("transform function must have 1 return value") + } + + return &WithTransformMatcher{ + Transform: transform, + Matcher: matcher, + transformArgType: reflect.TypeOf(transform).In(0), + } +} + +func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { + // return error if actual's type is incompatible with Transform function's argument type + actualType := reflect.TypeOf(actual) + if !actualType.AssignableTo(m.transformArgType) { + return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType) + } + + // call the Transform function with `actual` + fn := reflect.ValueOf(m.Transform) + result := fn.Call([]reflect.Value{reflect.ValueOf(actual)}) + m.transformedValue = result[0].Interface() // expect exactly one value + + return m.Matcher.Match(m.transformedValue) +} + +func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) { + return m.Matcher.FailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(m.transformedValue) +} + +func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { + // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.) + // + // Querying the next matcher is fine if the transformer always will return the same value. + // But if the transformer is non-deterministic and returns a different value each time, then there + // is no point in querying the next matcher, since it can only comment on the last transformed value. + return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) +} diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go new file mode 100644 index 000000000..ac59a3a5a --- /dev/null +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -0,0 +1,26 @@ +package types + +type TWithHelper interface { + Helper() +} + +type GomegaFailHandler func(message string, callerSkip ...int) + +type GomegaFailWrapper struct { + Fail GomegaFailHandler + TWithHelper TWithHelper +} + +//A simple *testing.T interface wrapper +type GomegaTestingT interface { + Fatalf(format string, args ...interface{}) +} + +//All Gomega matchers must implement the GomegaMatcher interface +// +//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers +type GomegaMatcher interface { + Match(actual interface{}) (success bool, err error) + FailureMessage(actual interface{}) (message string) + NegatedFailureMessage(actual interface{}) (message string) +} diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap deleted file mode 100644 index ba611cb21..000000000 --- a/vendor/github.com/opencontainers/go-digest/.mailmap +++ /dev/null @@ -1 +0,0 @@ -Stephen J Day diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml deleted file mode 100644 index 45fa4b9ec..000000000 --- a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml +++ /dev/null @@ -1,12 +0,0 @@ -approve_by_comment: true -approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)' -reject_regex: ^Rejected -reset_on_push: true -author_approval: ignored -signed_off_by: - required: true -reviewers: - teams: - - go-digest-maintainers - name: default - required: 2 diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml deleted file mode 100644 index 7ea4ed1d2..000000000 --- a/vendor/github.com/opencontainers/go-digest/.travis.yml +++ /dev/null @@ -1,4 +0,0 @@ -language: go -go: - - 1.7 - - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md deleted file mode 100644 index e4d962ac1..000000000 --- a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md +++ /dev/null @@ -1,72 +0,0 @@ -# Contributing to Docker open source projects - -Want to hack on this project? Awesome! Here are instructions to get you started. - -This project is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -For an in-depth description of our contribution process, visit the -contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -1 Letterman Drive -Suite D4700 -San Francisco, CA, 94129 - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS deleted file mode 100644 index 42a29795d..000000000 --- a/vendor/github.com/opencontainers/go-digest/MAINTAINERS +++ /dev/null @@ -1,9 +0,0 @@ -Aaron Lehmann (@aaronlehmann) -Brandon Philips (@philips) -Brendan Burns (@brendandburns) -Derek McGowan (@dmcgowan) -Jason Bouzane (@jbouzane) -John Starks (@jstarks) -Jonathan Boulle (@jonboulle) -Stephen Day (@stevvooe) -Vincent Batts (@vbatts) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md deleted file mode 100644 index 0f5a04092..000000000 --- a/vendor/github.com/opencontainers/go-digest/README.md +++ /dev/null @@ -1,104 +0,0 @@ -# go-digest - -[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) - -Common digest package used across the container ecosystem. - -Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. - -# What is a digest? - -A digest is just a hash. - -The most common use case for a digest is to create a content -identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) -systems: - -```go -id := digest.FromBytes([]byte("my content")) -``` - -In the example above, the id can be used to uniquely identify -the byte slice "my content". This allows two disparate applications -to agree on a verifiable identifier without having to trust one -another. - -An identifying digest can be verified, as follows: - -```go -if id != digest.FromBytes([]byte("my content")) { - return errors.New("the content has changed!") -} -``` - -A `Verifier` type can be used to handle cases where an `io.Reader` -makes more sense: - -```go -rd := getContent() -verifier := id.Verifier() -io.Copy(verifier, rd) - -if !verifier.Verified() { - return errors.New("the content has changed!") -} -``` - -Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this -can power a rich, safe, content distribution system. - -# Usage - -While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is -considered the best resource, a few important items need to be called -out when using this package. - -1. Make sure to import the hash implementations into your application - or the package will panic. You should have something like the - following in the main (or other entrypoint) of your application: - - ```go - import ( - _ "crypto/sha256" - _ "crypto/sha512" - ) - ``` - This may seem inconvenient but it allows you replace the hash - implementations with others, such as https://github.com/stevvooe/resumable. - -2. Even though `digest.Digest` may be assemable as a string, _always_ - verify your input with `digest.Parse` or use `Digest.Validate` - when accepting untrusted input. While there are measures to - avoid common problems, this will ensure you have valid digests - in the rest of your application. - -# Stability - -The Go API, at this stage, is considered stable, unless otherwise noted. - -As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). - -# Contributing - -This package is considered fairly complete. It has been in production -in thousands (millions?) of deployments and is fairly battle-hardened. -New additions will be met with skepticism. If you think there is a -missing feature, please file a bug clearly describing the problem and -the alternatives you tried before submitting a PR. - -# Reporting security issues - -Please DO NOT file a public issue, instead send your report privately to -security@opencontainers.org. - -The maintainers take security seriously. If you discover a security issue, -please bring it to their attention right away! - -If you are reporting a security issue, do not create an issue or file a pull -request on GitHub. Instead, disclose the issue responsibly by sending an email -to security@opencontainers.org (which is inhabited only by the maintainers of -the various OCI projects). - -# Copyright and license - -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. diff --git a/vendor/github.com/openshift/api/LICENSE b/vendor/github.com/openshift/api/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/openshift/api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/api/apps/v1/doc.go b/vendor/github.com/openshift/api/apps/v1/doc.go new file mode 100644 index 000000000..edd16297b --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/apps/apis/apps +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=apps.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/apps/v1/generated.pb.go b/vendor/github.com/openshift/api/apps/v1/generated.pb.go new file mode 100644 index 000000000..53b61c61a --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/generated.pb.go @@ -0,0 +1,6494 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/openshift/api/apps/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + github.com/openshift/api/apps/v1/generated.proto + + It has these top-level messages: + CustomDeploymentStrategyParams + DeploymentCause + DeploymentCauseImageTrigger + DeploymentCondition + DeploymentConfig + DeploymentConfigList + DeploymentConfigRollback + DeploymentConfigRollbackSpec + DeploymentConfigSpec + DeploymentConfigStatus + DeploymentDetails + DeploymentLog + DeploymentLogOptions + DeploymentRequest + DeploymentStrategy + DeploymentTriggerImageChangeParams + DeploymentTriggerPolicies + DeploymentTriggerPolicy + ExecNewPodHook + LifecycleHook + RecreateDeploymentStrategyParams + RollingDeploymentStrategyParams + TagImageHook +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *CustomDeploymentStrategyParams) Reset() { *m = CustomDeploymentStrategyParams{} } +func (*CustomDeploymentStrategyParams) ProtoMessage() {} +func (*CustomDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *DeploymentCause) Reset() { *m = DeploymentCause{} } +func (*DeploymentCause) ProtoMessage() {} +func (*DeploymentCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *DeploymentCauseImageTrigger) Reset() { *m = DeploymentCauseImageTrigger{} } +func (*DeploymentCauseImageTrigger) ProtoMessage() {} +func (*DeploymentCauseImageTrigger) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} } +func (*DeploymentConfig) ProtoMessage() {} +func (*DeploymentConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} } +func (*DeploymentConfigList) ProtoMessage() {} +func (*DeploymentConfigList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DeploymentConfigRollback) Reset() { *m = DeploymentConfigRollback{} } +func (*DeploymentConfigRollback) ProtoMessage() {} +func (*DeploymentConfigRollback) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{6} +} + +func (m *DeploymentConfigRollbackSpec) Reset() { *m = DeploymentConfigRollbackSpec{} } +func (*DeploymentConfigRollbackSpec) ProtoMessage() {} +func (*DeploymentConfigRollbackSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{7} +} + +func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} } +func (*DeploymentConfigSpec) ProtoMessage() {} +func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} } +func (*DeploymentConfigStatus) ProtoMessage() {} +func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} } +func (*DeploymentDetails) ProtoMessage() {} +func (*DeploymentDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *DeploymentLog) Reset() { *m = DeploymentLog{} } +func (*DeploymentLog) ProtoMessage() {} +func (*DeploymentLog) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} } +func (*DeploymentLogOptions) ProtoMessage() {} +func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} } +func (*DeploymentRequest) ProtoMessage() {} +func (*DeploymentRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *DeploymentTriggerImageChangeParams) Reset() { *m = DeploymentTriggerImageChangeParams{} } +func (*DeploymentTriggerImageChangeParams) ProtoMessage() {} +func (*DeploymentTriggerImageChangeParams) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{15} +} + +func (m *DeploymentTriggerPolicies) Reset() { *m = DeploymentTriggerPolicies{} } +func (*DeploymentTriggerPolicies) ProtoMessage() {} +func (*DeploymentTriggerPolicies) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{16} +} + +func (m *DeploymentTriggerPolicy) Reset() { *m = DeploymentTriggerPolicy{} } +func (*DeploymentTriggerPolicy) ProtoMessage() {} +func (*DeploymentTriggerPolicy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{17} +} + +func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} } +func (*ExecNewPodHook) ProtoMessage() {} +func (*ExecNewPodHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } +func (*LifecycleHook) ProtoMessage() {} +func (*LifecycleHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *RecreateDeploymentStrategyParams) Reset() { *m = RecreateDeploymentStrategyParams{} } +func (*RecreateDeploymentStrategyParams) ProtoMessage() {} +func (*RecreateDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{20} +} + +func (m *RollingDeploymentStrategyParams) Reset() { *m = RollingDeploymentStrategyParams{} } +func (*RollingDeploymentStrategyParams) ProtoMessage() {} +func (*RollingDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{21} +} + +func (m *TagImageHook) Reset() { *m = TagImageHook{} } +func (*TagImageHook) ProtoMessage() {} +func (*TagImageHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func init() { + proto.RegisterType((*CustomDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.CustomDeploymentStrategyParams") + proto.RegisterType((*DeploymentCause)(nil), "github.com.openshift.api.apps.v1.DeploymentCause") + proto.RegisterType((*DeploymentCauseImageTrigger)(nil), "github.com.openshift.api.apps.v1.DeploymentCauseImageTrigger") + proto.RegisterType((*DeploymentCondition)(nil), "github.com.openshift.api.apps.v1.DeploymentCondition") + proto.RegisterType((*DeploymentConfig)(nil), "github.com.openshift.api.apps.v1.DeploymentConfig") + proto.RegisterType((*DeploymentConfigList)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigList") + proto.RegisterType((*DeploymentConfigRollback)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback") + proto.RegisterType((*DeploymentConfigRollbackSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollbackSpec") + proto.RegisterType((*DeploymentConfigSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec") + proto.RegisterType((*DeploymentConfigStatus)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigStatus") + proto.RegisterType((*DeploymentDetails)(nil), "github.com.openshift.api.apps.v1.DeploymentDetails") + proto.RegisterType((*DeploymentLog)(nil), "github.com.openshift.api.apps.v1.DeploymentLog") + proto.RegisterType((*DeploymentLogOptions)(nil), "github.com.openshift.api.apps.v1.DeploymentLogOptions") + proto.RegisterType((*DeploymentRequest)(nil), "github.com.openshift.api.apps.v1.DeploymentRequest") + proto.RegisterType((*DeploymentStrategy)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy") + proto.RegisterType((*DeploymentTriggerImageChangeParams)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerImageChangeParams") + proto.RegisterType((*DeploymentTriggerPolicies)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicies") + proto.RegisterType((*DeploymentTriggerPolicy)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicy") + proto.RegisterType((*ExecNewPodHook)(nil), "github.com.openshift.api.apps.v1.ExecNewPodHook") + proto.RegisterType((*LifecycleHook)(nil), "github.com.openshift.api.apps.v1.LifecycleHook") + proto.RegisterType((*RecreateDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RecreateDeploymentStrategyParams") + proto.RegisterType((*RollingDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RollingDeploymentStrategyParams") + proto.RegisterType((*TagImageHook)(nil), "github.com.openshift.api.apps.v1.TagImageHook") +} +func (m *CustomDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + if len(m.Environment) > 0 { + for _, msg := range m.Environment { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *DeploymentCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCause) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.ImageTrigger != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ImageTrigger.Size())) + n1, err := m.ImageTrigger.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *DeploymentCauseImageTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCauseImageTrigger) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) + n2, err := m.From.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n3, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *DeploymentConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n6, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n7, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *DeploymentConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentConfigRollback) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigRollback) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.UpdatedAnnotations) > 0 { + keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) + for k := range m.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + for _, k := range keysForUpdatedAnnotations { + dAtA[i] = 0x12 + i++ + v := m.UpdatedAnnotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n9, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil +} + +func (m *DeploymentConfigRollbackSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigRollbackSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) + n10, err := m.From.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + dAtA[i] = 0x18 + i++ + if m.IncludeTriggers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x20 + i++ + if m.IncludeTemplate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x28 + i++ + if m.IncludeReplicationMeta { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x30 + i++ + if m.IncludeStrategy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *DeploymentConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n11, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + if m.Triggers != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Triggers.Size())) + n12, err := m.Triggers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + dAtA[i] = 0x28 + i++ + if m.Test { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x30 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x3a + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.Template != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n13, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *DeploymentConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LatestVersion)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if m.Details != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) + n14, err := m.Details.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + return i, nil +} + +func (m *DeploymentDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentDetails) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + if len(m.Causes) > 0 { + for _, msg := range m.Causes { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentLog) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *DeploymentLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentLogOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + dAtA[i] = 0x10 + i++ + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x18 + i++ + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SinceSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) + n15, err := m.SinceTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + dAtA[i] = 0x30 + i++ + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.TailLines != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + } + dAtA[i] = 0x48 + i++ + if m.NoWait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.Version != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + } + return i, nil +} + +func (m *DeploymentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + if m.Latest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x18 + i++ + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.ExcludeTriggers) > 0 { + for _, s := range m.ExcludeTriggers { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.CustomParams != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CustomParams.Size())) + n16, err := m.CustomParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.RecreateParams != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RecreateParams.Size())) + n17, err := m.RecreateParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.RollingParams != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingParams.Size())) + n18, err := m.RollingParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) + n19, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for _, k := range keysForLabels { + dAtA[i] = 0x32 + i++ + v := m.Labels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for _, k := range keysForAnnotations { + dAtA[i] = 0x3a + i++ + v := m.Annotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.ActiveDeadlineSeconds != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentTriggerImageChangeParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentTriggerImageChangeParams) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Automatic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.ContainerNames) > 0 { + for _, s := range m.ContainerNames { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) + n20, err := m.From.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImage))) + i += copy(dAtA[i:], m.LastTriggeredImage) + return i, nil +} + +func (m DeploymentTriggerPolicies) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m DeploymentTriggerPolicies) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, msg := range m { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentTriggerPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.ImageChangeParams != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ImageChangeParams.Size())) + n21, err := m.ImageChangeParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + return i, nil +} + +func (m *ExecNewPodHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecNewPodHook) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i += copy(dAtA[i:], m.ContainerName) + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LifecycleHook) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailurePolicy))) + i += copy(dAtA[i:], m.FailurePolicy) + if m.ExecNewPod != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExecNewPod.Size())) + n22, err := m.ExecNewPod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.TagImages) > 0 { + for _, msg := range m.TagImages { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RecreateDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RecreateDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TimeoutSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } + if m.Pre != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pre.Size())) + n23, err := m.Pre.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.Mid != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Mid.Size())) + n24, err := m.Mid.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.Post != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Post.Size())) + n25, err := m.Post.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *RollingDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UpdatePeriodSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatePeriodSeconds)) + } + if m.IntervalSeconds != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntervalSeconds)) + } + if m.TimeoutSeconds != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } + if m.MaxUnavailable != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n26, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.MaxSurge != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n27, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.Pre != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pre.Size())) + n28, err := m.Pre.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.Post != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Post.Size())) + n29, err := m.Post.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *TagImageHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagImageHook) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i += copy(dAtA[i:], m.ContainerName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.To.Size())) + n30, err := m.To.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CustomDeploymentStrategyParams) Size() (n int) { + var l int + _ = l + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Environment) > 0 { + for _, e := range m.Environment { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentCause) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ImageTrigger != nil { + l = m.ImageTrigger.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeploymentCauseImageTrigger) Size() (n int) { + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfig) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfigList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentConfigRollback) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfigRollbackSpec) Size() (n int) { + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + n += 2 + n += 2 + n += 2 + n += 2 + return n +} + +func (m *DeploymentConfigSpec) Size() (n int) { + var l int + _ = l + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Triggers != nil { + l = m.Triggers.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + n += 2 + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *DeploymentConfigStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.LatestVersion)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n +} + +func (m *DeploymentDetails) Size() (n int) { + var l int + _ = l + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentLog) Size() (n int) { + var l int + _ = l + return n +} + +func (m *DeploymentLogOptions) Size() (n int) { + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + if m.Version != nil { + n += 1 + sovGenerated(uint64(*m.Version)) + } + return n +} + +func (m *DeploymentRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if len(m.ExcludeTriggers) > 0 { + for _, s := range m.ExcludeTriggers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.CustomParams != nil { + l = m.CustomParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RecreateParams != nil { + l = m.RecreateParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RollingParams != nil { + l = m.RollingParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + return n +} + +func (m *DeploymentTriggerImageChangeParams) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.ContainerNames) > 0 { + for _, s := range m.ContainerNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LastTriggeredImage) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m DeploymentTriggerPolicies) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, e := range m { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentTriggerPolicy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ImageChangeParams != nil { + l = m.ImageChangeParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExecNewPodHook) Size() (n int) { + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LifecycleHook) Size() (n int) { + var l int + _ = l + l = len(m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExecNewPod != nil { + l = m.ExecNewPod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TagImages) > 0 { + for _, e := range m.TagImages { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RecreateDeploymentStrategyParams) Size() (n int) { + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.Pre != nil { + l = m.Pre.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mid != nil { + l = m.Mid.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Post != nil { + l = m.Post.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingDeploymentStrategyParams) Size() (n int) { + var l int + _ = l + if m.UpdatePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.UpdatePeriodSeconds)) + } + if m.IntervalSeconds != nil { + n += 1 + sovGenerated(uint64(*m.IntervalSeconds)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pre != nil { + l = m.Pre.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Post != nil { + l = m.Post.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TagImageHook) Size() (n int) { + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CustomDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomDeploymentStrategyParams{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Environment:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Environment), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCause{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ImageTrigger:` + strings.Replace(fmt.Sprintf("%v", this.ImageTrigger), "DeploymentCauseImageTrigger", "DeploymentCauseImageTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCauseImageTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCauseImageTrigger{`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigSpec", "DeploymentConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentConfigStatus", "DeploymentConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DeploymentConfig", "DeploymentConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentConfigRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigRollbackSpec", "DeploymentConfigRollbackSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigRollbackSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfigRollbackSpec{`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `IncludeTriggers:` + fmt.Sprintf("%v", this.IncludeTriggers) + `,`, + `IncludeTemplate:` + fmt.Sprintf("%v", this.IncludeTemplate) + `,`, + `IncludeReplicationMeta:` + fmt.Sprintf("%v", this.IncludeReplicationMeta) + `,`, + `IncludeStrategy:` + fmt.Sprintf("%v", this.IncludeStrategy) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&DeploymentConfigSpec{`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `Triggers:` + strings.Replace(fmt.Sprintf("%v", this.Triggers), "DeploymentTriggerPolicies", "DeploymentTriggerPolicies", 1) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Test:` + fmt.Sprintf("%v", this.Test) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfigStatus{`, + `LatestVersion:` + fmt.Sprintf("%v", this.LatestVersion) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "DeploymentDetails", "DeploymentDetails", 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentDetails) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentDetails{`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "DeploymentCause", "DeploymentCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentLog) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentLog{`, + `}`, + }, "") + return s +} +func (this *DeploymentLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Latest:` + fmt.Sprintf("%v", this.Latest) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `ExcludeTriggers:` + fmt.Sprintf("%v", this.ExcludeTriggers) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `CustomParams:` + strings.Replace(fmt.Sprintf("%v", this.CustomParams), "CustomDeploymentStrategyParams", "CustomDeploymentStrategyParams", 1) + `,`, + `RecreateParams:` + strings.Replace(fmt.Sprintf("%v", this.RecreateParams), "RecreateDeploymentStrategyParams", "RecreateDeploymentStrategyParams", 1) + `,`, + `RollingParams:` + strings.Replace(fmt.Sprintf("%v", this.RollingParams), "RollingDeploymentStrategyParams", "RollingDeploymentStrategyParams", 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "k8s_io_api_core_v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentTriggerImageChangeParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentTriggerImageChangeParams{`, + `Automatic:` + fmt.Sprintf("%v", this.Automatic) + `,`, + `ContainerNames:` + fmt.Sprintf("%v", this.ContainerNames) + `,`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `LastTriggeredImage:` + fmt.Sprintf("%v", this.LastTriggeredImage) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentTriggerPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentTriggerPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ImageChangeParams:` + strings.Replace(fmt.Sprintf("%v", this.ImageChangeParams), "DeploymentTriggerImageChangeParams", "DeploymentTriggerImageChangeParams", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExecNewPodHook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecNewPodHook{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `}`, + }, "") + return s +} +func (this *LifecycleHook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LifecycleHook{`, + `FailurePolicy:` + fmt.Sprintf("%v", this.FailurePolicy) + `,`, + `ExecNewPod:` + strings.Replace(fmt.Sprintf("%v", this.ExecNewPod), "ExecNewPodHook", "ExecNewPodHook", 1) + `,`, + `TagImages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TagImages), "TagImageHook", "TagImageHook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RecreateDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RecreateDeploymentStrategyParams{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `Pre:` + strings.Replace(fmt.Sprintf("%v", this.Pre), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Mid:` + strings.Replace(fmt.Sprintf("%v", this.Mid), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(fmt.Sprintf("%v", this.Post), "LifecycleHook", "LifecycleHook", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingDeploymentStrategyParams{`, + `UpdatePeriodSeconds:` + valueToStringGenerated(this.UpdatePeriodSeconds) + `,`, + `IntervalSeconds:` + valueToStringGenerated(this.IntervalSeconds) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Pre:` + strings.Replace(fmt.Sprintf("%v", this.Pre), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(fmt.Sprintf("%v", this.Post), "LifecycleHook", "LifecycleHook", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TagImageHook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagImageHook{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Environment = append(m.Environment, k8s_io_api_core_v1.EnvVar{}) + if err := m.Environment[len(m.Environment)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageTrigger", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageTrigger == nil { + m.ImageTrigger = &DeploymentCauseImageTrigger{} + } + if err := m.ImageTrigger.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeploymentConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.UpdatedAnnotations[mapkey] = mapvalue + } else { + var mapvalue string + m.UpdatedAnnotations[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigRollbackSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigRollbackSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTriggers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeTriggers = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTemplate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeTemplate = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeReplicationMeta", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeReplicationMeta = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeStrategy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeStrategy = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Triggers == nil { + m.Triggers = DeploymentTriggerPolicies{} + } + if err := m.Triggers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Test", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Test = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Selector == nil { + m.Selector = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &k8s_io_api_core_v1.PodTemplateSpec{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestVersion", wireType) + } + m.LatestVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LatestVersion |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &DeploymentDetails{} + } + if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, DeploymentCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Version = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Latest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Latest = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTriggers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeTriggers = append(m.ExcludeTriggers, DeploymentTriggerType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CustomParams == nil { + m.CustomParams = &CustomDeploymentStrategyParams{} + } + if err := m.CustomParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecreateParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RecreateParams == nil { + m.RecreateParams = &RecreateDeploymentStrategyParams{} + } + if err := m.RecreateParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingParams == nil { + m.RollingParams = &RollingDeploymentStrategyParams{} + } + if err := m.RollingParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Annotations[mapkey] = mapvalue + } else { + var mapvalue string + m.Annotations[mapkey] = mapvalue + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Automatic", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Automatic = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerNames = append(m.ContainerNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerPolicies: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerPolicies: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, DeploymentTriggerPolicy{}) + if err := (*m)[len(*m)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChangeParams == nil { + m.ImageChangeParams = &DeploymentTriggerImageChangeParams{} + } + if err := m.ImageChangeParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecNewPodHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecNewPodHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LifecycleHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LifecycleHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LifecycleHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailurePolicy = LifecycleHookFailurePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecNewPod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExecNewPod == nil { + m.ExecNewPod = &ExecNewPodHook{} + } + if err := m.ExecNewPod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TagImages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TagImages = append(m.TagImages, TagImageHook{}) + if err := m.TagImages[len(m.TagImages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RecreateDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RecreateDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pre == nil { + m.Pre = &LifecycleHook{} + } + if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mid == nil { + m.Mid = &LifecycleHook{} + } + if err := m.Mid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Post == nil { + m.Post = &LifecycleHook{} + } + if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.UpdatePeriodSeconds = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IntervalSeconds = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pre == nil { + m.Pre = &LifecycleHook{} + } + if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Post == nil { + m.Post = &LifecycleHook{} + } + if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagImageHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagImageHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagImageHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2517 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcb, 0x6f, 0x1c, 0x49, + 0x19, 0x77, 0x7b, 0x66, 0xec, 0x99, 0xcf, 0xaf, 0xb8, 0x9c, 0xc7, 0xac, 0x17, 0xd9, 0xd6, 0xac, + 0x36, 0x18, 0x58, 0x66, 0x12, 0x27, 0xac, 0xf2, 0xd0, 0x2e, 0x78, 0x1c, 0x67, 0xd7, 0xd1, 0x38, + 0x31, 0x65, 0x27, 0x21, 0x11, 0x82, 0x94, 0x7b, 0xca, 0xe3, 0x5a, 0x77, 0x77, 0x0d, 0xdd, 0x35, + 0x93, 0x0c, 0x42, 0x68, 0x2f, 0x20, 0x21, 0xed, 0x81, 0x23, 0x5c, 0x10, 0x07, 0xae, 0x20, 0x0e, + 0xdc, 0x11, 0x07, 0xa4, 0x1c, 0x40, 0x5a, 0x09, 0x09, 0x56, 0x08, 0x59, 0x1b, 0x73, 0xe3, 0x4f, + 0xc8, 0x09, 0xd5, 0xa3, 0x5f, 0xf3, 0x88, 0x3d, 0x4e, 0x6e, 0xd3, 0xdf, 0xe3, 0xf7, 0x55, 0x7d, + 0xaf, 0xfa, 0xaa, 0x06, 0x2e, 0x35, 0x98, 0xd8, 0x6f, 0xed, 0x96, 0x6d, 0xee, 0x56, 0x78, 0x93, + 0x7a, 0xc1, 0x3e, 0xdb, 0x13, 0x15, 0xd2, 0x64, 0x15, 0xd2, 0x6c, 0x06, 0x95, 0xf6, 0xe5, 0x4a, + 0x83, 0x7a, 0xd4, 0x27, 0x82, 0xd6, 0xcb, 0x4d, 0x9f, 0x0b, 0x8e, 0x96, 0x62, 0x8d, 0x72, 0xa4, + 0x51, 0x26, 0x4d, 0x56, 0x96, 0x1a, 0xe5, 0xf6, 0xe5, 0xf9, 0x6f, 0x26, 0x30, 0x1b, 0xbc, 0xc1, + 0x2b, 0x4a, 0x71, 0xb7, 0xb5, 0xa7, 0xbe, 0xd4, 0x87, 0xfa, 0xa5, 0x01, 0xe7, 0x4b, 0x07, 0xd7, + 0x82, 0x32, 0xe3, 0xca, 0xa8, 0xcd, 0x7d, 0xda, 0xc7, 0xe8, 0xfc, 0xd5, 0x58, 0xc6, 0x25, 0xf6, + 0x3e, 0xf3, 0xa8, 0xdf, 0xa9, 0x34, 0x0f, 0x1a, 0x92, 0x10, 0x54, 0x5c, 0x2a, 0x48, 0x3f, 0xad, + 0xca, 0x20, 0x2d, 0xbf, 0xe5, 0x09, 0xe6, 0xd2, 0x1e, 0x85, 0xf7, 0x8f, 0x53, 0x08, 0xec, 0x7d, + 0xea, 0x92, 0x1e, 0xbd, 0x2b, 0x83, 0xf4, 0x5a, 0x82, 0x39, 0x15, 0xe6, 0x89, 0x40, 0xf8, 0xdd, + 0x4a, 0xa5, 0x3f, 0x5b, 0xb0, 0xb0, 0xd6, 0x0a, 0x04, 0x77, 0x6f, 0xd1, 0xa6, 0xc3, 0x3b, 0x2e, + 0xf5, 0xc4, 0xb6, 0x90, 0x12, 0x8d, 0xce, 0x16, 0xf1, 0x89, 0x1b, 0xa0, 0x77, 0x20, 0xc7, 0x5c, + 0xd2, 0xa0, 0x45, 0x6b, 0xc9, 0x5a, 0x2e, 0x54, 0xa7, 0x9e, 0x1f, 0x2e, 0x8e, 0x1c, 0x1d, 0x2e, + 0xe6, 0x36, 0x24, 0x11, 0x6b, 0x1e, 0xfa, 0x2e, 0x4c, 0x50, 0xaf, 0xcd, 0x7c, 0xee, 0x49, 0x84, + 0xe2, 0xe8, 0x52, 0x66, 0x79, 0x62, 0x65, 0xbe, 0xac, 0x97, 0xa4, 0x02, 0x23, 0xbd, 0x5a, 0x6e, + 0x5f, 0x2e, 0xaf, 0x7b, 0xed, 0x07, 0xc4, 0xaf, 0xce, 0x19, 0x98, 0x89, 0xf5, 0x58, 0x0d, 0x27, + 0x31, 0xd0, 0xbb, 0x30, 0x6e, 0x73, 0xd7, 0x25, 0x5e, 0xbd, 0x98, 0x59, 0xca, 0x2c, 0x17, 0xaa, + 0x13, 0x47, 0x87, 0x8b, 0xe3, 0x6b, 0x9a, 0x84, 0x43, 0x5e, 0xe9, 0x2f, 0x16, 0xcc, 0xc4, 0x6b, + 0x5f, 0x23, 0xad, 0x80, 0xa2, 0xeb, 0x90, 0x15, 0x9d, 0x66, 0xb8, 0xe2, 0x77, 0x8d, 0xa9, 0xec, + 0x4e, 0xa7, 0x49, 0x5f, 0x1e, 0x2e, 0x9e, 0x8b, 0xc5, 0x77, 0x7c, 0xd6, 0x68, 0x50, 0x5f, 0x32, + 0xb0, 0x52, 0x41, 0x01, 0x4c, 0xaa, 0x1d, 0x19, 0x4e, 0x71, 0x74, 0xc9, 0x5a, 0x9e, 0x58, 0xf9, + 0xa0, 0x7c, 0x5c, 0xc2, 0x95, 0xbb, 0xd6, 0xb0, 0x91, 0x00, 0xa9, 0x9e, 0x39, 0x3a, 0x5c, 0x9c, + 0x4c, 0x52, 0x70, 0xca, 0x48, 0xa9, 0x0e, 0x6f, 0xbf, 0x42, 0x1d, 0xad, 0x43, 0x76, 0xcf, 0xe7, + 0xae, 0xda, 0xce, 0xc4, 0xca, 0x3b, 0xfd, 0xbc, 0x7a, 0x6f, 0xf7, 0x13, 0x6a, 0x0b, 0x4c, 0xf7, + 0xa8, 0x4f, 0x3d, 0x9b, 0x56, 0x27, 0xc3, 0x3d, 0xdf, 0xf6, 0xb9, 0x8b, 0x95, 0x7a, 0xe9, 0x5f, + 0x19, 0x98, 0x4b, 0x98, 0xe1, 0x5e, 0x9d, 0x09, 0xc6, 0x3d, 0x74, 0x33, 0xe5, 0xad, 0xaf, 0x76, + 0x79, 0xeb, 0x42, 0x1f, 0x95, 0x84, 0xbf, 0x6a, 0x30, 0x16, 0x08, 0x22, 0x5a, 0x81, 0xf2, 0x54, + 0xa1, 0x7a, 0xd5, 0xa8, 0x8f, 0x6d, 0x2b, 0xea, 0xcb, 0xc3, 0xc5, 0x3e, 0xa5, 0x55, 0x8e, 0x90, + 0xb4, 0x14, 0x36, 0x18, 0xa8, 0x0d, 0xc8, 0x21, 0x81, 0xd8, 0xf1, 0x89, 0x17, 0x68, 0x4b, 0xcc, + 0xa5, 0xc5, 0x8c, 0xda, 0xf7, 0xd7, 0x13, 0xfb, 0x8e, 0x12, 0xbc, 0xdc, 0x3c, 0x68, 0x48, 0x42, + 0x50, 0x96, 0xf5, 0x27, 0x3d, 0x21, 0x35, 0xaa, 0xf3, 0x66, 0x15, 0xa8, 0xd6, 0x83, 0x86, 0xfb, + 0x58, 0x40, 0x17, 0x61, 0xcc, 0xa7, 0x24, 0xe0, 0x5e, 0x31, 0xab, 0x76, 0x31, 0x1d, 0xee, 0x02, + 0x2b, 0x2a, 0x36, 0x5c, 0xf4, 0x35, 0x18, 0x77, 0x69, 0x10, 0xc8, 0x6a, 0xc8, 0x29, 0xc1, 0x19, + 0x23, 0x38, 0xbe, 0xa9, 0xc9, 0x38, 0xe4, 0xa3, 0x4f, 0x60, 0x5a, 0x1a, 0xba, 0xdf, 0xac, 0x13, + 0x41, 0xd5, 0x36, 0xc6, 0x86, 0xde, 0xc6, 0x79, 0x83, 0x3e, 0x5d, 0x4b, 0x21, 0xe1, 0x2e, 0xe4, + 0xd2, 0x1f, 0x47, 0xe1, 0x4c, 0x2a, 0x4c, 0x7b, 0xac, 0x81, 0x9e, 0x40, 0x5e, 0x82, 0xd5, 0x89, + 0x20, 0x26, 0x73, 0x2e, 0x9d, 0xcc, 0xb4, 0xce, 0xa5, 0x4d, 0x2a, 0x48, 0x15, 0x99, 0x05, 0x40, + 0x4c, 0xc3, 0x11, 0x2a, 0xfa, 0x1e, 0x64, 0x83, 0x26, 0xb5, 0x4d, 0x8d, 0xbc, 0x3f, 0x54, 0x8d, + 0xa8, 0x35, 0x6e, 0x37, 0xa9, 0x1d, 0xa7, 0xaa, 0xfc, 0xc2, 0x0a, 0x11, 0x3d, 0x89, 0xb2, 0x4a, + 0xc7, 0xfe, 0xda, 0x29, 0xb0, 0x95, 0x7e, 0x1c, 0xc9, 0x74, 0xa6, 0x95, 0xfe, 0x6e, 0xc1, 0xd9, + 0x6e, 0x95, 0x1a, 0x0b, 0x04, 0xfa, 0x7e, 0x8f, 0xdb, 0xca, 0x27, 0x73, 0x9b, 0xd4, 0x56, 0x4e, + 0x3b, 0x63, 0x4c, 0xe6, 0x43, 0x4a, 0xc2, 0x65, 0x0f, 0x21, 0xc7, 0x04, 0x75, 0x03, 0xd3, 0x21, + 0x57, 0x86, 0xdf, 0x57, 0xa2, 0x01, 0x4b, 0x20, 0xac, 0xf1, 0x4a, 0x3f, 0xcf, 0x40, 0xb1, 0x5b, + 0x14, 0x73, 0xc7, 0xd9, 0x25, 0xf6, 0x01, 0x5a, 0x82, 0xac, 0x47, 0xdc, 0xb0, 0xc2, 0x23, 0x87, + 0xdf, 0x25, 0x2e, 0xc5, 0x8a, 0x83, 0x7e, 0x63, 0x01, 0x6a, 0xa9, 0x84, 0xaa, 0xaf, 0x7a, 0x1e, + 0x17, 0x44, 0x96, 0x46, 0xb8, 0x4a, 0x3c, 0xfc, 0x2a, 0x43, 0xd3, 0xe5, 0xfb, 0x3d, 0xa0, 0xeb, + 0x9e, 0xf0, 0x3b, 0x71, 0x85, 0xf6, 0x0a, 0xe0, 0x3e, 0x2b, 0x41, 0x4f, 0x4c, 0xae, 0xe9, 0x7c, + 0xf8, 0xf0, 0xf4, 0x2b, 0x1a, 0x94, 0x73, 0xf3, 0xeb, 0x70, 0x61, 0xc0, 0x62, 0xd1, 0x19, 0xc8, + 0x1c, 0xd0, 0x8e, 0x76, 0x1f, 0x96, 0x3f, 0xd1, 0x59, 0xc8, 0xb5, 0x89, 0xd3, 0xa2, 0xba, 0xeb, + 0x61, 0xfd, 0x71, 0x63, 0xf4, 0x9a, 0x55, 0xfa, 0x53, 0x06, 0xbe, 0xf2, 0x2a, 0xdb, 0x6f, 0xa8, + 0x9b, 0xa3, 0xf7, 0x20, 0xef, 0xd3, 0x36, 0x0b, 0x18, 0xf7, 0xd4, 0x22, 0x32, 0x71, 0xde, 0x61, + 0x43, 0xc7, 0x91, 0x04, 0x5a, 0x85, 0x19, 0xe6, 0xd9, 0x4e, 0xab, 0x1e, 0x1e, 0x2a, 0xba, 0xb2, + 0xf2, 0xd5, 0x0b, 0x46, 0x69, 0x66, 0x23, 0xcd, 0xc6, 0xdd, 0xf2, 0x49, 0x08, 0xea, 0x36, 0x1d, + 0x22, 0xa8, 0x6a, 0x96, 0x7d, 0x20, 0x0c, 0x1b, 0x77, 0xcb, 0xa3, 0x07, 0x70, 0xde, 0x90, 0x30, + 0x6d, 0x3a, 0xcc, 0x56, 0x3e, 0x96, 0x15, 0xa2, 0xba, 0x69, 0xbe, 0xba, 0x60, 0x90, 0xce, 0x6f, + 0xf4, 0x95, 0xc2, 0x03, 0xb4, 0x13, 0x4b, 0x0b, 0x67, 0x17, 0xd5, 0x6c, 0x7b, 0x97, 0x16, 0xb2, + 0x71, 0xb7, 0x7c, 0xe9, 0x7f, 0xb9, 0xde, 0x7e, 0xa0, 0xc2, 0xb5, 0x0b, 0xf9, 0x20, 0x04, 0xd5, + 0x21, 0xbb, 0x3a, 0x4c, 0xf2, 0x85, 0x06, 0xe2, 0xe8, 0x44, 0x6b, 0x88, 0x70, 0x11, 0x85, 0xbc, + 0x08, 0xc3, 0xa2, 0x9b, 0xe9, 0xcd, 0x61, 0x6c, 0x98, 0x10, 0x6d, 0x71, 0x87, 0xd9, 0x8c, 0x06, + 0xd5, 0x49, 0x69, 0x26, 0x0a, 0x64, 0x04, 0xad, 0x53, 0x46, 0x79, 0x4e, 0x47, 0x3f, 0x97, 0x4c, + 0x19, 0x4d, 0xc7, 0x91, 0x04, 0xaa, 0xc1, 0xd9, 0x30, 0x7d, 0x3e, 0x66, 0x81, 0xe0, 0x7e, 0xa7, + 0xc6, 0x5c, 0x26, 0x54, 0xd0, 0x73, 0xd5, 0xe2, 0xd1, 0xe1, 0xe2, 0x59, 0xdc, 0x87, 0x8f, 0xfb, + 0x6a, 0xc9, 0x16, 0x24, 0x68, 0x20, 0x4c, 0xa0, 0xa3, 0x84, 0xde, 0xa1, 0x81, 0xc0, 0x8a, 0x23, + 0xcf, 0xe0, 0xa6, 0x1c, 0x7d, 0xea, 0x26, 0x76, 0x51, 0xe7, 0xde, 0x52, 0x54, 0x6c, 0xb8, 0xc8, + 0x87, 0x7c, 0x40, 0x1d, 0x6a, 0x0b, 0xee, 0x17, 0xc7, 0x55, 0x7f, 0xba, 0x75, 0xba, 0x93, 0xa7, + 0xbc, 0x6d, 0x60, 0x74, 0x47, 0x8a, 0x03, 0x64, 0xc8, 0x38, 0xb2, 0x83, 0x36, 0x21, 0x2f, 0xc2, + 0xa4, 0xcf, 0x0f, 0xae, 0xdb, 0x2d, 0x5e, 0x0f, 0x73, 0x5d, 0xb7, 0x19, 0x15, 0x88, 0xb0, 0x1c, + 0x22, 0x08, 0x99, 0xaf, 0x2e, 0xf3, 0x30, 0x25, 0xf5, 0xce, 0x36, 0xb5, 0xb9, 0x57, 0x0f, 0x8a, + 0x05, 0xe5, 0xd5, 0x28, 0x5f, 0x37, 0xd3, 0x6c, 0xdc, 0x2d, 0x3f, 0x7f, 0x13, 0xa6, 0x52, 0xcb, + 0x1f, 0xaa, 0x47, 0xfd, 0x21, 0x07, 0xe7, 0xfb, 0x9f, 0x97, 0xe8, 0x26, 0x4c, 0xc9, 0x25, 0x06, + 0xe2, 0x01, 0xf5, 0x55, 0x6f, 0xb1, 0x54, 0x6f, 0x39, 0x67, 0x16, 0x36, 0x55, 0x4b, 0x32, 0x71, + 0x5a, 0x16, 0xdd, 0x01, 0xc4, 0x77, 0x03, 0xea, 0xb7, 0x69, 0xfd, 0x23, 0x7d, 0xd1, 0x88, 0xbb, + 0x53, 0xd4, 0xf0, 0xef, 0xf5, 0x48, 0xe0, 0x3e, 0x5a, 0x43, 0x26, 0xeb, 0x2a, 0xcc, 0x98, 0x43, + 0x23, 0x64, 0x9a, 0x3c, 0x8d, 0x3c, 0x7a, 0x3f, 0xcd, 0xc6, 0xdd, 0xf2, 0xe8, 0x23, 0x98, 0x25, + 0x6d, 0xc2, 0x1c, 0xb2, 0xeb, 0xd0, 0x08, 0x24, 0xa7, 0x40, 0xde, 0x32, 0x20, 0xb3, 0xab, 0xdd, + 0x02, 0xb8, 0x57, 0x07, 0x6d, 0xc2, 0x5c, 0xcb, 0xeb, 0x85, 0x1a, 0x53, 0x50, 0x6f, 0x1b, 0xa8, + 0xb9, 0xfb, 0xbd, 0x22, 0xb8, 0x9f, 0x1e, 0x7a, 0x0c, 0xe3, 0x75, 0x2a, 0x08, 0x73, 0x82, 0xe2, + 0xb8, 0x4a, 0xbd, 0x2b, 0xc3, 0xa4, 0xfb, 0x2d, 0xad, 0xaa, 0x2f, 0x4f, 0xe6, 0x03, 0x87, 0x80, + 0x88, 0x01, 0xd8, 0xe1, 0x28, 0x1e, 0x14, 0xf3, 0xaa, 0x9a, 0xbe, 0x35, 0x64, 0x35, 0x69, 0xed, + 0x78, 0x54, 0x8c, 0x48, 0x01, 0x4e, 0x80, 0xcb, 0xc4, 0xf2, 0x65, 0x02, 0x47, 0xfe, 0xd0, 0x19, + 0x1f, 0x25, 0x16, 0x4e, 0x32, 0x71, 0x5a, 0xb6, 0xf4, 0x6b, 0x0b, 0x66, 0x7b, 0xf6, 0x94, 0x9c, + 0xc6, 0xad, 0x63, 0xa6, 0xf1, 0x47, 0x30, 0x66, 0xcb, 0xf6, 0x11, 0x8e, 0x34, 0x97, 0x87, 0xbe, + 0xd0, 0xc5, 0xfd, 0x48, 0x7d, 0x06, 0xd8, 0x00, 0x96, 0x66, 0x60, 0x2a, 0x16, 0xad, 0xf1, 0x46, + 0xe9, 0xb3, 0x6c, 0xf2, 0x28, 0xa9, 0xf1, 0xc6, 0xbd, 0xa6, 0x76, 0x41, 0x05, 0x0a, 0x36, 0xf7, + 0x04, 0x91, 0x03, 0xa4, 0x59, 0xf1, 0xac, 0x01, 0x2d, 0xac, 0x85, 0x0c, 0x1c, 0xcb, 0xc8, 0x96, + 0xb8, 0xc7, 0x1d, 0x87, 0x3f, 0x55, 0x35, 0x94, 0x68, 0x89, 0xb7, 0x15, 0x15, 0x1b, 0xae, 0xac, + 0x95, 0xa6, 0xec, 0xba, 0xbc, 0x15, 0x1e, 0xeb, 0x51, 0xad, 0x6c, 0x19, 0x3a, 0x8e, 0x24, 0xd0, + 0x55, 0x98, 0x0c, 0x98, 0x67, 0xd3, 0xb0, 0xf5, 0x64, 0xf5, 0xf4, 0x20, 0xef, 0xa8, 0xdb, 0x09, + 0x3a, 0x4e, 0x49, 0xa1, 0x87, 0x50, 0x50, 0xdf, 0xea, 0x2a, 0x93, 0x1b, 0xfa, 0x2a, 0x33, 0x25, + 0x37, 0xb9, 0x1d, 0x02, 0xe0, 0x18, 0x0b, 0xad, 0x00, 0x08, 0xe6, 0xd2, 0x40, 0x10, 0xb7, 0x19, + 0x98, 0xde, 0x1f, 0x25, 0xd3, 0x4e, 0xc4, 0xc1, 0x09, 0x29, 0xf4, 0x0d, 0x28, 0xc8, 0x14, 0xa8, + 0x31, 0x8f, 0xea, 0xaa, 0xc8, 0x68, 0x03, 0x3b, 0x21, 0x11, 0xc7, 0x7c, 0x54, 0x06, 0x70, 0xe4, + 0x19, 0x54, 0xed, 0x08, 0x1a, 0xa8, 0xf6, 0x9d, 0xa9, 0x4e, 0x4b, 0xf0, 0x5a, 0x44, 0xc5, 0x09, + 0x09, 0xe9, 0x75, 0x8f, 0x3f, 0x25, 0x4c, 0xa8, 0x14, 0x4d, 0x78, 0xfd, 0x2e, 0x7f, 0x48, 0x98, + 0xc0, 0x86, 0x8b, 0xde, 0x85, 0xf1, 0xb6, 0x69, 0x92, 0xa0, 0x40, 0x55, 0x8d, 0x85, 0xad, 0x31, + 0xe4, 0x95, 0xfe, 0x9d, 0xca, 0x5d, 0x4c, 0x7f, 0xd4, 0x92, 0xa7, 0xdd, 0xf1, 0x23, 0xf9, 0x45, + 0x18, 0xd3, 0xdd, 0xb5, 0x3b, 0xf8, 0xba, 0x05, 0x63, 0xc3, 0x45, 0xef, 0x40, 0x6e, 0x8f, 0xfb, + 0x36, 0x35, 0x91, 0x8f, 0xae, 0x07, 0xb7, 0x25, 0x11, 0x6b, 0x1e, 0x7a, 0x00, 0x33, 0xf4, 0x59, + 0x7a, 0xfe, 0xcb, 0xaa, 0x47, 0x95, 0xf7, 0x64, 0x6f, 0x5c, 0x4f, 0xb3, 0x06, 0xbf, 0x91, 0x74, + 0x83, 0x94, 0xfe, 0x31, 0x0e, 0xa8, 0x77, 0xd8, 0x41, 0x37, 0x52, 0x4f, 0x0a, 0x17, 0xbb, 0x9e, + 0x14, 0xce, 0xf7, 0x6a, 0x24, 0x5e, 0x14, 0xda, 0x30, 0x69, 0xab, 0x17, 0x29, 0xfd, 0xfe, 0x64, + 0x06, 0xa2, 0xef, 0x1c, 0x5f, 0xb0, 0xaf, 0x7e, 0xc7, 0xd2, 0x09, 0xbe, 0x96, 0x40, 0xc6, 0x29, + 0x3b, 0xe8, 0xa7, 0x30, 0xed, 0x53, 0xdb, 0xa7, 0x44, 0x50, 0x63, 0x59, 0xdf, 0x35, 0xaa, 0xc7, + 0x5b, 0xc6, 0x46, 0x6f, 0xa0, 0x6d, 0x24, 0x2f, 0xf1, 0x38, 0x85, 0x8e, 0xbb, 0xac, 0xa1, 0x1f, + 0xc3, 0x94, 0xcf, 0x1d, 0x87, 0x79, 0x0d, 0x63, 0x3e, 0xab, 0xcc, 0xaf, 0x9e, 0xc0, 0xbc, 0x56, + 0x1b, 0x68, 0x7d, 0x56, 0xf5, 0xd7, 0x24, 0x36, 0x4e, 0x9b, 0x42, 0x8f, 0xa0, 0xe0, 0xd3, 0x80, + 0xb7, 0x7c, 0x9b, 0x06, 0xa6, 0xb8, 0x97, 0xfb, 0x0d, 0x38, 0xd8, 0x08, 0xc9, 0x2c, 0x66, 0x3e, + 0x95, 0xb6, 0x82, 0xb8, 0x87, 0x85, 0xdc, 0x00, 0xc7, 0x68, 0x68, 0x5f, 0xa6, 0xf1, 0x2e, 0x75, + 0x64, 0x69, 0x67, 0x4e, 0x16, 0xc8, 0xde, 0x8d, 0x94, 0x6b, 0x0a, 0x42, 0x0f, 0x6a, 0x89, 0x42, + 0x90, 0x44, 0x6c, 0xf0, 0xd1, 0x4f, 0x60, 0x82, 0x24, 0xee, 0xae, 0x7a, 0x36, 0x5c, 0x3f, 0x95, + 0xb9, 0x9e, 0xeb, 0x6a, 0xf4, 0x5c, 0x99, 0xbc, 0xa7, 0x26, 0xcd, 0xa1, 0x7b, 0x70, 0x8e, 0xd8, + 0x82, 0xb5, 0xe9, 0x2d, 0x4a, 0xea, 0x0e, 0xf3, 0xa2, 0xf6, 0xaa, 0x1b, 0xce, 0x5b, 0x47, 0x87, + 0x8b, 0xe7, 0x56, 0xfb, 0x09, 0xe0, 0xfe, 0x7a, 0xf3, 0xd7, 0x61, 0x22, 0xb1, 0xeb, 0x61, 0xe6, + 0xbb, 0xf9, 0x0f, 0xe1, 0xcc, 0x6b, 0xdd, 0x61, 0x7f, 0x37, 0x0a, 0xa5, 0x9e, 0x06, 0xa0, 0x9e, + 0x24, 0xd7, 0xf6, 0x89, 0xd7, 0x08, 0x33, 0xb6, 0x02, 0x05, 0xd2, 0x12, 0xdc, 0x25, 0x82, 0xd9, + 0x0a, 0x38, 0x1f, 0xe7, 0xc2, 0x6a, 0xc8, 0xc0, 0xb1, 0x0c, 0xba, 0x01, 0xd3, 0xd1, 0xe1, 0x26, + 0x3b, 0x9d, 0x3e, 0x8d, 0x0b, 0xba, 0x3c, 0xd6, 0x52, 0x1c, 0xdc, 0x25, 0x19, 0x5d, 0x9b, 0x33, + 0xaf, 0x77, 0x6d, 0xbe, 0x13, 0xbe, 0x30, 0xaa, 0x3d, 0xd1, 0xba, 0xda, 0x95, 0x79, 0xf5, 0xeb, + 0x7a, 0x35, 0x4c, 0x4a, 0xe0, 0x3e, 0x5a, 0xa5, 0x9f, 0x59, 0xf0, 0xd6, 0xc0, 0x5b, 0x18, 0xfa, + 0x41, 0xf8, 0xd4, 0x63, 0xa9, 0x44, 0xbc, 0x7e, 0xda, 0x1b, 0x5d, 0xa7, 0xff, 0x8b, 0xcf, 0x8d, + 0xfc, 0xaf, 0x7e, 0xbb, 0x38, 0xf2, 0xe9, 0x7f, 0x96, 0x46, 0x4a, 0x5f, 0x5a, 0x70, 0x61, 0x80, + 0xee, 0xeb, 0x3c, 0x85, 0xff, 0xc2, 0x82, 0x59, 0xd6, 0x1d, 0x74, 0xd3, 0x8e, 0x6f, 0x9d, 0x62, + 0x37, 0x3d, 0x09, 0x54, 0x3d, 0x27, 0x67, 0xea, 0x1e, 0x32, 0xee, 0xb5, 0x5a, 0xfa, 0xa7, 0x05, + 0xd3, 0xeb, 0xcf, 0xa8, 0x7d, 0x97, 0x3e, 0xdd, 0xe2, 0xf5, 0x8f, 0x39, 0x3f, 0x48, 0xfe, 0x3f, + 0x60, 0x0d, 0xfe, 0x7f, 0x00, 0x5d, 0x87, 0x0c, 0xf5, 0xda, 0x27, 0xf8, 0x47, 0x62, 0xc2, 0xf8, + 0x26, 0xb3, 0xee, 0xb5, 0xb1, 0xd4, 0x91, 0x23, 0x6b, 0x2a, 0x09, 0x55, 0xee, 0x15, 0xe2, 0x91, + 0x35, 0x95, 0xb1, 0x38, 0x2d, 0xab, 0xa6, 0x03, 0xee, 0xb4, 0x64, 0x92, 0x67, 0xe3, 0xe5, 0x3d, + 0xd0, 0x24, 0x1c, 0xf2, 0x4a, 0xbf, 0x1f, 0x85, 0xa9, 0x1a, 0xdb, 0xa3, 0x76, 0xc7, 0x76, 0xa8, + 0xda, 0xd7, 0x23, 0x98, 0xda, 0x23, 0xcc, 0x69, 0xf9, 0x54, 0x87, 0xd0, 0x84, 0xee, 0x4a, 0x68, + 0xf5, 0x76, 0x92, 0xf9, 0xf2, 0x70, 0x71, 0x3e, 0xa5, 0x9e, 0xe2, 0xe2, 0x34, 0x12, 0x7a, 0x02, + 0x40, 0x23, 0x27, 0x9a, 0x48, 0x5e, 0x3a, 0x3e, 0x92, 0x69, 0xc7, 0xeb, 0xd9, 0x29, 0xa6, 0xe1, + 0x04, 0x26, 0xfa, 0xa1, 0x1c, 0xcc, 0x1a, 0x2a, 0xa4, 0x81, 0xfa, 0xdb, 0x66, 0x62, 0xa5, 0x7c, + 0xbc, 0x81, 0x1d, 0xa3, 0xa2, 0xe0, 0xa3, 0x16, 0x12, 0x52, 0xd5, 0x30, 0x67, 0x7e, 0x96, 0xfe, + 0x3a, 0x0a, 0x4b, 0xc7, 0x1d, 0xb7, 0xb2, 0xcf, 0xc8, 0x61, 0x91, 0xb7, 0x44, 0xd8, 0x84, 0xf5, + 0x2d, 0x56, 0xf5, 0x99, 0x9d, 0x14, 0x07, 0x77, 0x49, 0xa2, 0x3b, 0x90, 0x69, 0xfa, 0xd4, 0x38, + 0xa7, 0x72, 0xfc, 0xda, 0x53, 0xde, 0xaf, 0x8e, 0xcb, 0x04, 0xda, 0xf2, 0x29, 0x96, 0x20, 0x12, + 0xcb, 0x65, 0x75, 0xd3, 0xb2, 0x4e, 0x87, 0xb5, 0xc9, 0xea, 0x58, 0x82, 0xa0, 0x4d, 0xc8, 0x36, + 0x79, 0x20, 0xcc, 0x54, 0x30, 0x34, 0x58, 0x5e, 0x56, 0xfd, 0x16, 0x0f, 0x04, 0x56, 0x30, 0xa5, + 0xbf, 0x65, 0x61, 0xf1, 0x98, 0xb9, 0x01, 0x6d, 0xc0, 0x9c, 0xbe, 0x24, 0x6f, 0x51, 0x9f, 0xf1, + 0x7a, 0xda, 0x97, 0x17, 0xd4, 0x25, 0xb6, 0x97, 0x8d, 0xfb, 0xe9, 0xa0, 0x0f, 0x60, 0x86, 0x79, + 0x82, 0xfa, 0x6d, 0xe2, 0x84, 0x30, 0xfa, 0x59, 0x60, 0x4e, 0xbf, 0xce, 0xa5, 0x58, 0xb8, 0x5b, + 0xb6, 0x4f, 0x40, 0x33, 0x27, 0x0e, 0xa8, 0x03, 0xd3, 0x2e, 0x79, 0x96, 0xb8, 0x6e, 0x1b, 0x17, + 0x0e, 0xfe, 0x37, 0xa4, 0x25, 0x98, 0x53, 0xd6, 0x7f, 0x98, 0x96, 0x37, 0x3c, 0x71, 0xcf, 0xdf, + 0x16, 0x3e, 0xf3, 0x1a, 0xda, 0xda, 0x66, 0x0a, 0x0b, 0x77, 0x61, 0xa3, 0xc7, 0x90, 0x77, 0xc9, + 0xb3, 0xed, 0x96, 0xdf, 0x08, 0x6f, 0x49, 0xc3, 0xdb, 0x51, 0xcf, 0x46, 0x9b, 0x06, 0x05, 0x47, + 0x78, 0x61, 0x6a, 0x8e, 0xbf, 0x89, 0xd4, 0x0c, 0xd3, 0x29, 0xff, 0x66, 0xd2, 0xe9, 0x33, 0x0b, + 0x26, 0x93, 0x55, 0xdc, 0xdb, 0x3b, 0xad, 0x21, 0x7a, 0xe7, 0xb7, 0x61, 0x54, 0x70, 0x53, 0x82, + 0x27, 0x3a, 0xe9, 0xc1, 0xc0, 0x8e, 0xee, 0x70, 0x3c, 0x2a, 0x78, 0x75, 0xf9, 0xf9, 0x8b, 0x85, + 0x91, 0xcf, 0x5f, 0x2c, 0x8c, 0x7c, 0xf1, 0x62, 0x61, 0xe4, 0xd3, 0xa3, 0x05, 0xeb, 0xf9, 0xd1, + 0x82, 0xf5, 0xf9, 0xd1, 0x82, 0xf5, 0xc5, 0xd1, 0x82, 0xf5, 0xe5, 0xd1, 0x82, 0xf5, 0xcb, 0xff, + 0x2e, 0x8c, 0x3c, 0x1e, 0x6d, 0x5f, 0xfe, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb6, 0x32, + 0x5f, 0x7c, 0x20, 0x00, 0x00, +} diff --git a/vendor/github.com/openshift/api/apps/v1/register.go b/vendor/github.com/openshift/api/apps/v1/register.go new file mode 100644 index 000000000..88e40b4d4 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/register.go @@ -0,0 +1,58 @@ +package v1 + +import ( + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + LegacyGroupName = "" + GroupName = "apps.openshift.io" +) + +var ( + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"} + + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + } + scheme.AddKnownTypes(LegacySchemeGroupVersion, types...) + return nil +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go new file mode 100644 index 000000000..4f70b3d40 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -0,0 +1,494 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=DeploymentRequest +// +genclient:method=Rollback,verb=create,subresource=rollback,input=DeploymentConfigRollback +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Deployment Configs define the template for a pod and manages deploying new images or configuration changes. +// A single deployment configuration is usually analogous to a single micro-service. Can support many different +// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as +// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. +// +// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. +// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment +// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment +// is triggered by any means. +type DeploymentConfig struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec represents a desired deployment state and how to deploy to it. + Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current deployment state. + Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentConfigSpec represents the desired state of the deployment. +type DeploymentConfigSpec struct { + // Strategy describes how a deployment is executed. + // +optional + Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"` + + // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // be ready without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // are defined, a new deployment can only occur as a result of an explicit client update to the + // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. + // +optional + Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"` + + // Replicas is the number of desired replicas. + // +optional + Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + + // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // This field is a pointer to allow for differentiation between an explicit zero and not specified. + // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"` + + // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding + // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. + // +optional + Test bool `json:"test" protobuf:"varint,5,opt,name=test"` + + // Paused indicates that the deployment config is paused resulting in no new deployments on template + // changes or changes in the template caused by other triggers. + Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"` + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + Template *corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"` +} + +// DeploymentStrategy describes how to perform a deployment. +type DeploymentStrategy struct { + // Type is the name of a deployment strategy. + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // CustomParams are the input to the Custom deployment strategy, and may also + // be specified for the Recreate and Rolling strategies to customize the execution + // process that runs the deployment. + CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"` + // RecreateParams are the input to the Recreate deployment strategy. + RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"` + // RollingParams are the input to the Rolling deployment strategy. + RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"` + + // Resources contains resource requirements to execute the deployment and any hooks. + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"` + // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"` + // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"` + + // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // config may be active on a node before the system actively tries to terminate them. + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=activeDeadlineSeconds"` +} + +// DeploymentStrategyType refers to a specific DeploymentStrategy implementation. +type DeploymentStrategyType string + +const ( + // DeploymentStrategyTypeRecreate is a simple strategy suitable as a default. + DeploymentStrategyTypeRecreate DeploymentStrategyType = "Recreate" + // DeploymentStrategyTypeCustom is a user defined strategy. + DeploymentStrategyTypeCustom DeploymentStrategyType = "Custom" + // DeploymentStrategyTypeRolling uses the Kubernetes RollingUpdater. + DeploymentStrategyTypeRolling DeploymentStrategyType = "Rolling" +) + +// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. +type CustomDeploymentStrategyParams struct { + // Image specifies a Docker image which can carry out a deployment. + Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` + // Environment holds the environment which will be given to the container for Image. + Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"` + // Command is optional and overrides CMD in the container Image. + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` +} + +// RecreateDeploymentStrategyParams are the input to the Recreate deployment +// strategy. +type RecreateDeploymentStrategyParams struct { + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"` + // Pre is a lifecycle hook which is executed before the strategy manipulates + // the deployment. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"` + // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // pod is created. All LifecycleHookFailurePolicy values are supported. + Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"` + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. + Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"` +} + +// RollingDeploymentStrategyParams are the input to the Rolling deployment +// strategy. +type RollingDeploymentStrategyParams struct { + // UpdatePeriodSeconds is the time to wait between individual pod updates. + // If the value is nil, a default will be used. + UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"` + // IntervalSeconds is the time to wait between polling deployment status + // after update. If the value is nil, a default will be used. + IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"` + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` + // MaxUnavailable is the maximum number of pods that can be unavailable + // during the update. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of update (ex: 10%). Absolute + // number is calculated from percentage by rounding down. + // + // This cannot be 0 if MaxSurge is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old + // RC can be scaled down further, followed by scaling up the new RC, + // ensuring that at least 70% of original number of pods are available at + // all times during the update. + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"` + // MaxSurge is the maximum number of pods that can be scheduled above the + // original number of pods. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // + // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been + // killed, new RC can be scaled up further, ensuring that total number of + // pods running at any time during the update is atmost 130% of original + // pods. + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"` + // Pre is a lifecycle hook which is executed before the deployment process + // begins. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"` + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values + // are supported. + Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"` +} + +// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. +type LifecycleHook struct { + // FailurePolicy specifies what action to take if the hook fails. + FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"` + + // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"` + + // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"` +} + +// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails. +type LifecycleHookFailurePolicy string + +const ( + // LifecycleHookFailurePolicyRetry means retry the hook until it succeeds. + LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry" + // LifecycleHookFailurePolicyAbort means abort the deployment. + LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort" + // LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment. + LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore" +) + +// ExecNewPodHook is a hook implementation which runs a command in a new pod +// based on the specified container which is assumed to be part of the +// deployment template. +type ExecNewPodHook struct { + // Command is the action command and its arguments. + Command []string `json:"command" protobuf:"bytes,1,rep,name=command"` + // Env is a set of environment variables to supply to the hook pod's container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` + // ContainerName is the name of a container in the deployment pod template + // whose Docker image will be used for the hook pod's container. + ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` + // Volumes is a list of named volumes from the pod template which should be + // copied to the hook pod. Volumes names not found in pod spec are ignored. + // An empty list means no volumes will be copied. + Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` +} + +// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. +type TagImageHook struct { + // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // container this value will be defaulted to the name of that container. + ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + // To is the target ImageStreamTag to set the container's image onto. + To corev1.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"` +} + +// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type DeploymentTriggerPolicies []DeploymentTriggerPolicy + +func (t DeploymentTriggerPolicies) String() string { + return fmt.Sprintf("%v", []DeploymentTriggerPolicy(t)) +} + +// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. +type DeploymentTriggerPolicy struct { + // Type of the trigger + Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` + // ImageChangeParams represents the parameters for the ImageChange trigger. + ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"` +} + +// DeploymentTriggerType refers to a specific DeploymentTriggerPolicy implementation. +type DeploymentTriggerType string + +const ( + // DeploymentTriggerOnImageChange will create new deployments in response to updated tags from + // a Docker image repository. + DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange" + // DeploymentTriggerOnConfigChange will create new deployments in response to changes to + // the ControllerTemplate of a DeploymentConfig. + DeploymentTriggerOnConfigChange DeploymentTriggerType = "ConfigChange" +) + +// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. +type DeploymentTriggerImageChangeParams struct { + // Automatic means that the detection of a new tag value should result in an image update + // inside the pod template. + Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"` + // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. + // If multiple triggers point to the same containers, the resulting behavior is undefined. Future + // API versions will make this a validation error. If ContainerNames does not point to a valid container, + // the trigger will be ignored. Future API versions will make this a validation error. + ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"` + // From is a reference to an image stream tag to watch for changes. From.Name is the only + // required subfield - if From.Namespace is blank, the namespace of the current deployment + // trigger will be used. + From corev1.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"` + // LastTriggeredImage is the last image to be triggered. + LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"` +} + +// DeploymentConfigStatus represents the current deployment state. +type DeploymentConfigStatus struct { + // LatestVersion is used to determine whether the current deployment associated with a deployment + // config is out of sync. + LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"` + // ObservedGeneration is the most recent generation observed by the deployment config controller. + ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"` + // Replicas is the total number of pods targeted by this deployment config. + Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // that have the desired template spec. + UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"` + // AvailableReplicas is the total number of available pods targeted by this deployment config. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"` + // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"` + // Details are the reasons for the update to this deployment config. + // This could be based on a change made by the user or caused by an automatic trigger + Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` + // Conditions represents the latest available observations of a deployment config's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"` + // Total number of ready pods targeted by this deployment. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` +} + +// DeploymentDetails captures information about the causes of a deployment. +type DeploymentDetails struct { + // Message is the user specified change message, if this deployment was triggered manually by the user + Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` + // Causes are extended data associated with all the causes for creating a new deployment + Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"` +} + +// DeploymentCause captures information about a particular cause of a deployment. +type DeploymentCause struct { + // Type of the trigger that resulted in the creation of a new deployment + Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` + // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change + ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"` +} + +// DeploymentCauseImageTrigger represents details about the cause of a deployment originating +// from an image change trigger +type DeploymentCauseImageTrigger struct { + // From is a reference to the changed object which triggered a deployment. The field may have + // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` +} + +type DeploymentConditionType string + +// These are valid conditions of a DeploymentConfig. +const ( + // DeploymentAvailable means the DeploymentConfig is available, ie. at least the minimum available + // replicas required (dc.spec.replicas in case the DeploymentConfig is of Recreate type, + // dc.spec.replicas - dc.spec.strategy.rollingParams.maxUnavailable in case it's Rolling) are up and + // running for at least dc.spec.minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // DeploymentProgressing is: + // * True: the DeploymentConfig has been successfully deployed or is amidst getting deployed. + // The two different states can be determined by looking at the Reason of the Condition. + // For example, a complete DC will have {Status: True, Reason: NewReplicationControllerAvailable} + // and a DC in the middle of a rollout {Status: True, Reason: ReplicationControllerUpdated}. + // TODO: Represent a successfully deployed DC by using something else for Status like Unknown? + // * False: the DeploymentConfig has failed to deploy its latest version. + // + // This condition is purely informational and depends on the dc.spec.strategy.*params.timeoutSeconds + // field, which is responsible for the time in seconds to wait for a rollout before deciding that + // no progress can be made, thus the rollout is aborted. + // + // Progress for a DeploymentConfig is considered when new pods scale up or old pods scale down. + DeploymentProgressing DeploymentConditionType = "Progressing" + // DeploymentReplicaFailure is added in a deployment config when one of its pods + // fails to be created or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment config at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // The last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentConfigList is a collection of deployment configs. +type DeploymentConfigList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of deployment configs + Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentConfigRollback provides the input to rollback generation. +type DeploymentConfigRollback struct { + metav1.TypeMeta `json:",inline"` + // Name of the deployment config that will be rolled back. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // Spec defines the options to rollback generation. + Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"` +} + +// DeploymentConfigRollbackSpec represents the options for rollback generation. +type DeploymentConfigRollbackSpec struct { + // From points to a ReplicationController which is a deployment. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + // Revision to rollback to. If set to 0, rollback to the last revision. + Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"` + // IncludeTriggers specifies whether to include config Triggers. + IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"` + // IncludeTemplate specifies whether to include the PodTemplateSpec. + IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"` + // IncludeReplicationMeta specifies whether to include the replica count and selector. + IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"` + // IncludeStrategy specifies whether to include the deployment Strategy. + IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentRequest is a request to a deployment config for a new deployment. +type DeploymentRequest struct { + metav1.TypeMeta `json:",inline"` + // Name of the deployment config for requesting a new deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Latest will update the deployment config with the latest state from all triggers. + Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"` + // Force will try to force a new deployment to run. If the deployment config is paused, + // then setting this to true will return an Invalid error. + Force bool `json:"force" protobuf:"varint,3,opt,name=force"` + // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. + // This field overrides the triggers from latest and allows clients to control specific + // logic. This field is ignored if not specified. + ExcludeTriggers []DeploymentTriggerType `json:"excludeTriggers,omitempty" protobuf:"bytes,4,rep,name=excludeTriggers,casttype=DeploymentTriggerType"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentLog represents the logs for a deployment +type DeploymentLog struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentLogOptions is the REST options for a deployment log +type DeploymentLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // Follow if true indicates that the build log should be streamed until + // the build terminates. + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // Return previous deployment logs. Defaults to false. + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // NoWait if true causes the call to return immediately even if the deployment + // is not available yet. Otherwise the server will wait until the deployment has started. + // TODO: Fix the tag to 'noWait' in v2 + NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` + + // Version of the deployment for which to view logs. + Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` +} diff --git a/vendor/github.com/openshift/api/apps/v1/types_swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..78fe5ddc0 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/types_swagger_doc_generated.go @@ -0,0 +1,284 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CustomDeploymentStrategyParams = map[string]string{ + "": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.", + "image": "Image specifies a Docker image which can carry out a deployment.", + "environment": "Environment holds the environment which will be given to the container for Image.", + "command": "Command is optional and overrides CMD in the container Image.", +} + +func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_CustomDeploymentStrategyParams +} + +var map_DeploymentCause = map[string]string{ + "": "DeploymentCause captures information about a particular cause of a deployment.", + "type": "Type of the trigger that resulted in the creation of a new deployment", + "imageTrigger": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change", +} + +func (DeploymentCause) SwaggerDoc() map[string]string { + return map_DeploymentCause +} + +var map_DeploymentCauseImageTrigger = map[string]string{ + "": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger", + "from": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", +} + +func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string { + return map_DeploymentCauseImageTrigger +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment config at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentConfig = map[string]string{ + "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.", + "metadata": "Standard object's metadata.", + "spec": "Spec represents a desired deployment state and how to deploy to it.", + "status": "Status represents the current deployment state.", +} + +func (DeploymentConfig) SwaggerDoc() map[string]string { + return map_DeploymentConfig +} + +var map_DeploymentConfigList = map[string]string{ + "": "DeploymentConfigList is a collection of deployment configs.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of deployment configs", +} + +func (DeploymentConfigList) SwaggerDoc() map[string]string { + return map_DeploymentConfigList +} + +var map_DeploymentConfigRollback = map[string]string{ + "": "DeploymentConfigRollback provides the input to rollback generation.", + "name": "Name of the deployment config that will be rolled back.", + "updatedAnnotations": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.", + "spec": "Spec defines the options to rollback generation.", +} + +func (DeploymentConfigRollback) SwaggerDoc() map[string]string { + return map_DeploymentConfigRollback +} + +var map_DeploymentConfigRollbackSpec = map[string]string{ + "": "DeploymentConfigRollbackSpec represents the options for rollback generation.", + "from": "From points to a ReplicationController which is a deployment.", + "revision": "Revision to rollback to. If set to 0, rollback to the last revision.", + "includeTriggers": "IncludeTriggers specifies whether to include config Triggers.", + "includeTemplate": "IncludeTemplate specifies whether to include the PodTemplateSpec.", + "includeReplicationMeta": "IncludeReplicationMeta specifies whether to include the replica count and selector.", + "includeStrategy": "IncludeStrategy specifies whether to include the deployment Strategy.", +} + +func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string { + return map_DeploymentConfigRollbackSpec +} + +var map_DeploymentConfigSpec = map[string]string{ + "": "DeploymentConfigSpec represents the desired state of the deployment.", + "strategy": "Strategy describes how a deployment is executed.", + "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "triggers": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", + "replicas": "Replicas is the number of desired replicas.", + "revisionHistoryLimit": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", + "test": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", + "paused": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", + "selector": "Selector is a label query over pods that should match the Replicas count.", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected.", +} + +func (DeploymentConfigSpec) SwaggerDoc() map[string]string { + return map_DeploymentConfigSpec +} + +var map_DeploymentConfigStatus = map[string]string{ + "": "DeploymentConfigStatus represents the current deployment state.", + "latestVersion": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", + "observedGeneration": "ObservedGeneration is the most recent generation observed by the deployment config controller.", + "replicas": "Replicas is the total number of pods targeted by this deployment config.", + "updatedReplicas": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", + "availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.", + "unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.", + "details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", + "conditions": "Conditions represents the latest available observations of a deployment config's current state.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", +} + +func (DeploymentConfigStatus) SwaggerDoc() map[string]string { + return map_DeploymentConfigStatus +} + +var map_DeploymentDetails = map[string]string{ + "": "DeploymentDetails captures information about the causes of a deployment.", + "message": "Message is the user specified change message, if this deployment was triggered manually by the user", + "causes": "Causes are extended data associated with all the causes for creating a new deployment", +} + +func (DeploymentDetails) SwaggerDoc() map[string]string { + return map_DeploymentDetails +} + +var map_DeploymentLog = map[string]string{ + "": "DeploymentLog represents the logs for a deployment", +} + +func (DeploymentLog) SwaggerDoc() map[string]string { + return map_DeploymentLog +} + +var map_DeploymentLogOptions = map[string]string{ + "": "DeploymentLogOptions is the REST options for a deployment log", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow if true indicates that the build log should be streamed until the build terminates.", + "previous": "Return previous deployment logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "nowait": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", + "version": "Version of the deployment for which to view logs.", +} + +func (DeploymentLogOptions) SwaggerDoc() map[string]string { + return map_DeploymentLogOptions +} + +var map_DeploymentRequest = map[string]string{ + "": "DeploymentRequest is a request to a deployment config for a new deployment.", + "name": "Name of the deployment config for requesting a new deployment.", + "latest": "Latest will update the deployment config with the latest state from all triggers.", + "force": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", + "excludeTriggers": "ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", +} + +func (DeploymentRequest) SwaggerDoc() map[string]string { + return map_DeploymentRequest +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to perform a deployment.", + "type": "Type is the name of a deployment strategy.", + "customParams": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", + "recreateParams": "RecreateParams are the input to the Recreate deployment strategy.", + "rollingParams": "RollingParams are the input to the Rolling deployment strategy.", + "resources": "Resources contains resource requirements to execute the deployment and any hooks.", + "labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "activeDeadlineSeconds": "ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_DeploymentTriggerImageChangeParams = map[string]string{ + "": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.", + "automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.", + "containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", + "from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", + "lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.", +} + +func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string { + return map_DeploymentTriggerImageChangeParams +} + +var map_DeploymentTriggerPolicy = map[string]string{ + "": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.", + "type": "Type of the trigger", + "imageChangeParams": "ImageChangeParams represents the parameters for the ImageChange trigger.", +} + +func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string { + return map_DeploymentTriggerPolicy +} + +var map_ExecNewPodHook = map[string]string{ + "": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.", + "command": "Command is the action command and its arguments.", + "env": "Env is a set of environment variables to supply to the hook pod's container.", + "containerName": "ContainerName is the name of a container in the deployment pod template whose Docker image will be used for the hook pod's container.", + "volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", +} + +func (ExecNewPodHook) SwaggerDoc() map[string]string { + return map_ExecNewPodHook +} + +var map_LifecycleHook = map[string]string{ + "": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.", + "failurePolicy": "FailurePolicy specifies what action to take if the hook fails.", + "execNewPod": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.", + "tagImages": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", +} + +func (LifecycleHook) SwaggerDoc() map[string]string { + return map_LifecycleHook +} + +var map_RecreateDeploymentStrategyParams = map[string]string{ + "": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.", + "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "pre": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", + "mid": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", + "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", +} + +func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_RecreateDeploymentStrategyParams +} + +var map_RollingDeploymentStrategyParams = map[string]string{ + "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", + "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", + "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", + "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", + "maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", + "pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", + "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", +} + +func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_RollingDeploymentStrategyParams +} + +var map_TagImageHook = map[string]string{ + "": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.", + "containerName": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", + "to": "To is the target ImageStreamTag to set the container's image onto.", +} + +func (TagImageHook) SwaggerDoc() map[string]string { + return map_TagImageHook +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..383c2f516 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go @@ -0,0 +1,875 @@ +// +build !ignore_autogenerated + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + unsafe "unsafe" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDeploymentStrategyParams) DeepCopyInto(out *CustomDeploymentStrategyParams) { + *out = *in + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make([]core_v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDeploymentStrategyParams. +func (in *CustomDeploymentStrategyParams) DeepCopy() *CustomDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(CustomDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCause) DeepCopyInto(out *DeploymentCause) { + *out = *in + if in.ImageTrigger != nil { + in, out := &in.ImageTrigger, &out.ImageTrigger + if *in == nil { + *out = nil + } else { + *out = new(DeploymentCauseImageTrigger) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCause. +func (in *DeploymentCause) DeepCopy() *DeploymentCause { + if in == nil { + return nil + } + out := new(DeploymentCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCauseImageTrigger) DeepCopyInto(out *DeploymentCauseImageTrigger) { + *out = *in + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCauseImageTrigger. +func (in *DeploymentCauseImageTrigger) DeepCopy() *DeploymentCauseImageTrigger { + if in == nil { + return nil + } + out := new(DeploymentCauseImageTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConditionType) DeepCopyInto(out *DeploymentConditionType) { + { + in := (*string)(unsafe.Pointer(in)) + out := (*string)(unsafe.Pointer(out)) + *out = *in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConditionType. +func (in *DeploymentConditionType) DeepCopy() *DeploymentConditionType { + if in == nil { + return nil + } + out := new(DeploymentConditionType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfig. +func (in *DeploymentConfig) DeepCopy() *DeploymentConfig { + if in == nil { + return nil + } + out := new(DeploymentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigList) DeepCopyInto(out *DeploymentConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeploymentConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigList. +func (in *DeploymentConfigList) DeepCopy() *DeploymentConfigList { + if in == nil { + return nil + } + out := new(DeploymentConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigRollback) DeepCopyInto(out *DeploymentConfigRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollback. +func (in *DeploymentConfigRollback) DeepCopy() *DeploymentConfigRollback { + if in == nil { + return nil + } + out := new(DeploymentConfigRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfigRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigRollbackSpec) DeepCopyInto(out *DeploymentConfigRollbackSpec) { + *out = *in + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollbackSpec. +func (in *DeploymentConfigRollbackSpec) DeepCopy() *DeploymentConfigRollbackSpec { + if in == nil { + return nil + } + out := new(DeploymentConfigRollbackSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) { + *out = *in + in.Strategy.DeepCopyInto(&out.Strategy) + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make(DeploymentTriggerPolicies, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + if *in == nil { + *out = nil + } else { + *out = new(core_v1.PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigSpec. +func (in *DeploymentConfigSpec) DeepCopy() *DeploymentConfigSpec { + if in == nil { + return nil + } + out := new(DeploymentConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigStatus) DeepCopyInto(out *DeploymentConfigStatus) { + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + if *in == nil { + *out = nil + } else { + *out = new(DeploymentDetails) + (*in).DeepCopyInto(*out) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigStatus. +func (in *DeploymentConfigStatus) DeepCopy() *DeploymentConfigStatus { + if in == nil { + return nil + } + out := new(DeploymentConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentDetails) DeepCopyInto(out *DeploymentDetails) { + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]DeploymentCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentDetails. +func (in *DeploymentDetails) DeepCopy() *DeploymentDetails { + if in == nil { + return nil + } + out := new(DeploymentDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentLog) DeepCopyInto(out *DeploymentLog) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLog. +func (in *DeploymentLog) DeepCopy() *DeploymentLog { + if in == nil { + return nil + } + out := new(DeploymentLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentLogOptions) DeepCopyInto(out *DeploymentLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.Time) + (*in).DeepCopyInto(*out) + } + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.Version != nil { + in, out := &in.Version, &out.Version + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLogOptions. +func (in *DeploymentLogOptions) DeepCopy() *DeploymentLogOptions { + if in == nil { + return nil + } + out := new(DeploymentLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRequest) DeepCopyInto(out *DeploymentRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ExcludeTriggers != nil { + in, out := &in.ExcludeTriggers, &out.ExcludeTriggers + *out = make([]DeploymentTriggerType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRequest. +func (in *DeploymentRequest) DeepCopy() *DeploymentRequest { + if in == nil { + return nil + } + out := new(DeploymentRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.CustomParams != nil { + in, out := &in.CustomParams, &out.CustomParams + if *in == nil { + *out = nil + } else { + *out = new(CustomDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + } + if in.RecreateParams != nil { + in, out := &in.RecreateParams, &out.RecreateParams + if *in == nil { + *out = nil + } else { + *out = new(RecreateDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + } + if in.RollingParams != nil { + in, out := &in.RollingParams, &out.RollingParams + if *in == nil { + *out = nil + } else { + *out = new(RollingDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategyType) DeepCopyInto(out *DeploymentStrategyType) { + { + in := (*string)(unsafe.Pointer(in)) + out := (*string)(unsafe.Pointer(out)) + *out = *in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategyType. +func (in *DeploymentStrategyType) DeepCopy() *DeploymentStrategyType { + if in == nil { + return nil + } + out := new(DeploymentStrategyType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerImageChangeParams) DeepCopyInto(out *DeploymentTriggerImageChangeParams) { + *out = *in + if in.ContainerNames != nil { + in, out := &in.ContainerNames, &out.ContainerNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerImageChangeParams. +func (in *DeploymentTriggerImageChangeParams) DeepCopy() *DeploymentTriggerImageChangeParams { + if in == nil { + return nil + } + out := new(DeploymentTriggerImageChangeParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) { + { + in := (*[]DeploymentTriggerPolicy)(unsafe.Pointer(in)) + out := (*[]DeploymentTriggerPolicy)(unsafe.Pointer(out)) + *out = make([]DeploymentTriggerPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicies. +func (in *DeploymentTriggerPolicies) DeepCopy() *DeploymentTriggerPolicies { + if in == nil { + return nil + } + out := new(DeploymentTriggerPolicies) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerPolicy) DeepCopyInto(out *DeploymentTriggerPolicy) { + *out = *in + if in.ImageChangeParams != nil { + in, out := &in.ImageChangeParams, &out.ImageChangeParams + if *in == nil { + *out = nil + } else { + *out = new(DeploymentTriggerImageChangeParams) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicy. +func (in *DeploymentTriggerPolicy) DeepCopy() *DeploymentTriggerPolicy { + if in == nil { + return nil + } + out := new(DeploymentTriggerPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerType) DeepCopyInto(out *DeploymentTriggerType) { + { + in := (*string)(unsafe.Pointer(in)) + out := (*string)(unsafe.Pointer(out)) + *out = *in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerType. +func (in *DeploymentTriggerType) DeepCopy() *DeploymentTriggerType { + if in == nil { + return nil + } + out := new(DeploymentTriggerType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]core_v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecNewPodHook. +func (in *ExecNewPodHook) DeepCopy() *ExecNewPodHook { + if in == nil { + return nil + } + out := new(ExecNewPodHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { + *out = *in + if in.ExecNewPod != nil { + in, out := &in.ExecNewPod, &out.ExecNewPod + if *in == nil { + *out = nil + } else { + *out = new(ExecNewPodHook) + (*in).DeepCopyInto(*out) + } + } + if in.TagImages != nil { + in, out := &in.TagImages, &out.TagImages + *out = make([]TagImageHook, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook. +func (in *LifecycleHook) DeepCopy() *LifecycleHook { + if in == nil { + return nil + } + out := new(LifecycleHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHookFailurePolicy) DeepCopyInto(out *LifecycleHookFailurePolicy) { + { + in := (*string)(unsafe.Pointer(in)) + out := (*string)(unsafe.Pointer(out)) + *out = *in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHookFailurePolicy. +func (in *LifecycleHookFailurePolicy) DeepCopy() *LifecycleHookFailurePolicy { + if in == nil { + return nil + } + out := new(LifecycleHookFailurePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecreateDeploymentStrategyParams) DeepCopyInto(out *RecreateDeploymentStrategyParams) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.Pre != nil { + in, out := &in.Pre, &out.Pre + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + } + if in.Mid != nil { + in, out := &in.Mid, &out.Mid + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + } + if in.Post != nil { + in, out := &in.Post, &out.Post + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecreateDeploymentStrategyParams. +func (in *RecreateDeploymentStrategyParams) DeepCopy() *RecreateDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(RecreateDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingDeploymentStrategyParams) DeepCopyInto(out *RollingDeploymentStrategyParams) { + *out = *in + if in.UpdatePeriodSeconds != nil { + in, out := &in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.Pre != nil { + in, out := &in.Pre, &out.Pre + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + } + if in.Post != nil { + in, out := &in.Post, &out.Post + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingDeploymentStrategyParams. +func (in *RollingDeploymentStrategyParams) DeepCopy() *RollingDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(RollingDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagImageHook) DeepCopyInto(out *TagImageHook) { + *out = *in + out.To = in.To + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImageHook. +func (in *TagImageHook) DeepCopy() *TagImageHook { + if in == nil { + return nil + } + out := new(TagImageHook) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/project/v1/doc.go b/vendor/github.com/openshift/api/project/v1/doc.go new file mode 100644 index 000000000..5bbd9d5ea --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/project/apis/project +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=project.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/project/v1/generated.pb.go b/vendor/github.com/openshift/api/project/v1/generated.pb.go new file mode 100644 index 000000000..07836449a --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/generated.pb.go @@ -0,0 +1,1092 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/openshift/api/project/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + github.com/openshift/api/project/v1/generated.proto + + It has these top-level messages: + Project + ProjectList + ProjectRequest + ProjectSpec + ProjectStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Project) Reset() { *m = Project{} } +func (*Project) ProtoMessage() {} +func (*Project) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ProjectList) Reset() { *m = ProjectList{} } +func (*ProjectList) ProtoMessage() {} +func (*ProjectList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ProjectRequest) Reset() { *m = ProjectRequest{} } +func (*ProjectRequest) ProtoMessage() {} +func (*ProjectRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } +func (*ProjectSpec) ProtoMessage() {} +func (*ProjectSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } +func (*ProjectStatus) ProtoMessage() {} +func (*ProjectStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*Project)(nil), "github.com.openshift.api.project.v1.Project") + proto.RegisterType((*ProjectList)(nil), "github.com.openshift.api.project.v1.ProjectList") + proto.RegisterType((*ProjectRequest)(nil), "github.com.openshift.api.project.v1.ProjectRequest") + proto.RegisterType((*ProjectSpec)(nil), "github.com.openshift.api.project.v1.ProjectSpec") + proto.RegisterType((*ProjectStatus)(nil), "github.com.openshift.api.project.v1.ProjectStatus") +} +func (m *Project) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Project) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ProjectList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ProjectRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i += copy(dAtA[i:], m.DisplayName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + return i, nil +} + +func (m *ProjectSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ProjectStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Project) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ProjectList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectRequest) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ProjectSpec) Size() (n int) { + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Project) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Project{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Project", "Project", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Project) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Project: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Project{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, k8s_io_api_core_v1.FinalizerName(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = k8s_io_api_core_v1.NamespacePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 558 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0x3f, 0x6f, 0xd3, 0x4e, + 0x18, 0xc7, 0xe3, 0xa4, 0xe9, 0xaf, 0xb9, 0xfc, 0x5a, 0x21, 0xb3, 0x44, 0x19, 0x9c, 0x60, 0x96, + 0x0c, 0x70, 0x26, 0x29, 0x20, 0x66, 0x0b, 0x21, 0x90, 0xf8, 0x53, 0xcc, 0x44, 0xc5, 0xc0, 0xc5, + 0x79, 0xe2, 0x1c, 0x89, 0xed, 0xc3, 0x77, 0x8e, 0x54, 0x26, 0x5e, 0x02, 0x3b, 0xef, 0x85, 0x39, + 0x63, 0xc7, 0x4e, 0x51, 0x63, 0xde, 0x45, 0x27, 0x74, 0xe7, 0x6b, 0x6c, 0x48, 0x2a, 0xda, 0x85, + 0x2d, 0xcf, 0xe3, 0xef, 0xe7, 0x73, 0xe7, 0xe7, 0x71, 0xd0, 0x61, 0x40, 0xc5, 0x24, 0x1d, 0x62, + 0x3f, 0x0e, 0x9d, 0x98, 0x41, 0xc4, 0x27, 0x74, 0x2c, 0x1c, 0xc2, 0xa8, 0xc3, 0x92, 0xf8, 0x13, + 0xf8, 0xc2, 0x99, 0xf7, 0x9d, 0x00, 0x22, 0x48, 0x88, 0x80, 0x11, 0x66, 0x49, 0x2c, 0x62, 0xf3, + 0x6e, 0x01, 0xe1, 0x35, 0x84, 0x09, 0xa3, 0x58, 0x43, 0x78, 0xde, 0x6f, 0xdf, 0x2f, 0x99, 0x83, + 0x38, 0x88, 0x1d, 0xc5, 0x0e, 0xd3, 0xb1, 0xaa, 0x54, 0xa1, 0x7e, 0xe5, 0xce, 0xb6, 0x3d, 0x7d, + 0xc2, 0x31, 0x8d, 0xd5, 0xd1, 0x7e, 0x9c, 0xc0, 0x96, 0x73, 0xdb, 0x0f, 0x8b, 0x4c, 0x48, 0xfc, + 0x09, 0x8d, 0x20, 0x39, 0x71, 0xd8, 0x34, 0x90, 0x0d, 0xee, 0x84, 0x20, 0xc8, 0x36, 0xca, 0xb9, + 0x8a, 0x4a, 0xd2, 0x48, 0xd0, 0x10, 0x36, 0x80, 0xc7, 0x7f, 0x03, 0xb8, 0x3f, 0x81, 0x90, 0x6c, + 0x70, 0x87, 0x57, 0x71, 0xa9, 0xa0, 0x33, 0x87, 0x46, 0x82, 0x8b, 0xe4, 0x4f, 0xc8, 0xfe, 0x5e, + 0x45, 0xff, 0x1d, 0xe5, 0x53, 0x33, 0x3f, 0xa2, 0x3d, 0xf9, 0x12, 0x23, 0x22, 0x48, 0xcb, 0xe8, + 0x1a, 0xbd, 0xe6, 0xe0, 0x01, 0xce, 0x9d, 0xb8, 0xec, 0xc4, 0x6c, 0x1a, 0xc8, 0x06, 0xc7, 0x32, + 0x8d, 0xe7, 0x7d, 0xfc, 0x66, 0x28, 0xf9, 0x57, 0x20, 0x88, 0x6b, 0x2e, 0x96, 0x9d, 0x4a, 0xb6, + 0xec, 0xa0, 0xa2, 0xe7, 0xad, 0xad, 0xa6, 0x87, 0x76, 0x38, 0x03, 0xbf, 0x55, 0xd5, 0xf6, 0x6b, + 0x2c, 0x12, 0xeb, 0xdb, 0xbd, 0x63, 0xe0, 0xbb, 0xff, 0x6b, 0xfb, 0x8e, 0xac, 0x3c, 0xe5, 0x32, + 0x8f, 0xd1, 0x2e, 0x17, 0x44, 0xa4, 0xbc, 0x55, 0x53, 0xd6, 0xc1, 0x8d, 0xac, 0x8a, 0x74, 0x0f, + 0xb4, 0x77, 0x37, 0xaf, 0x3d, 0x6d, 0xb4, 0x7f, 0x18, 0xa8, 0xa9, 0x93, 0x2f, 0x29, 0x17, 0xe6, + 0x87, 0x8d, 0x09, 0xe1, 0xeb, 0x4d, 0x48, 0xd2, 0x6a, 0x3e, 0xb7, 0xf4, 0x49, 0x7b, 0x97, 0x9d, + 0xd2, 0x74, 0xde, 0xa2, 0x3a, 0x15, 0x10, 0xf2, 0x56, 0xb5, 0x5b, 0xeb, 0x35, 0x07, 0xf7, 0x6e, + 0xf2, 0x22, 0xee, 0xbe, 0x16, 0xd7, 0x5f, 0x48, 0x85, 0x97, 0x9b, 0xec, 0x73, 0x03, 0x1d, 0xe8, + 0x84, 0x07, 0x9f, 0x53, 0xe0, 0xff, 0x62, 0xcb, 0x8f, 0x50, 0x73, 0x44, 0x39, 0x9b, 0x91, 0x93, + 0xd7, 0x24, 0x04, 0xb5, 0xec, 0x86, 0x7b, 0x5b, 0x23, 0xcd, 0xa7, 0xc5, 0x23, 0xaf, 0x9c, 0x53, + 0x18, 0x70, 0x3f, 0xa1, 0x4c, 0xd0, 0x38, 0x52, 0xdb, 0x2c, 0x63, 0xc5, 0x23, 0xaf, 0x9c, 0xb3, + 0xc9, 0x7a, 0x45, 0xf2, 0xa3, 0x30, 0x3d, 0x84, 0xc6, 0x34, 0x22, 0x33, 0xfa, 0x05, 0x12, 0xde, + 0x32, 0xba, 0xb5, 0x5e, 0xc3, 0x1d, 0xc8, 0xab, 0x3e, 0x5b, 0x77, 0x2f, 0x96, 0x9d, 0xee, 0xe6, + 0xdf, 0x1d, 0xaf, 0x03, 0xea, 0x6a, 0x25, 0x8b, 0xfd, 0x1e, 0xed, 0xff, 0xf6, 0xbd, 0x98, 0xcf, + 0x51, 0x9d, 0x4d, 0x08, 0x07, 0x35, 0xc0, 0x86, 0x3b, 0xb8, 0x9c, 0xfd, 0x91, 0x6c, 0x5e, 0x2c, + 0x3b, 0x77, 0xb6, 0xe8, 0xa5, 0x95, 0x33, 0xe2, 0x83, 0x0a, 0x79, 0xb9, 0xc0, 0xed, 0x2d, 0x56, + 0x56, 0xe5, 0x74, 0x65, 0x55, 0xce, 0x56, 0x56, 0xe5, 0x6b, 0x66, 0x19, 0x8b, 0xcc, 0x32, 0x4e, + 0x33, 0xcb, 0x38, 0xcb, 0x2c, 0xe3, 0x3c, 0xb3, 0x8c, 0x6f, 0x3f, 0xad, 0xca, 0x71, 0x75, 0xde, + 0xff, 0x15, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x82, 0x78, 0x13, 0x2b, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/openshift/api/project/v1/register.go b/vendor/github.com/openshift/api/project/v1/register.go new file mode 100644 index 000000000..07f866897 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/register.go @@ -0,0 +1,49 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + GroupName = "project.openshift.io" + LegacyGroupName = "" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"} + + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + } + scheme.AddKnownTypes(LegacySchemeGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/project/v1/types.go b/vendor/github.com/openshift/api/project/v1/types.go new file mode 100644 index 000000000..c4ed0555f --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/types.go @@ -0,0 +1,79 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProjectList is a list of Project objects. +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of projects + Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +const ( + // These are internal finalizer values to Origin + FinalizerOrigin corev1.FinalizerName = "openshift.io/origin" +) + +// ProjectSpec describes the attributes on a Project +type ProjectSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []corev1.FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=k8s.io/api/core/v1.FinalizerName"` +} + +// ProjectStatus is information about the current status of a Project +type ProjectStatus struct { + // Phase is the current lifecycle phase of the project + Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, +// a quota on the resources that the project may consume, and the security controls on the resources in +// the project. Within a project, members may have different roles - project administrators can set +// membership, editors can create and manage the resources, and viewers can see but not access running +// containers. In a normal cluster project administrators are not able to alter their quotas - that is +// restricted to cluster administrators. +// +// Listing or watching projects will return only projects the user has the reader role on. +// +// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed +// as editable to end users while namespaces are not. Direct creation of a project is typically restricted +// to administrators, while end users should use the requestproject resource. +type Project struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of the Namespace. + Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status describes the current status of a Namespace + Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=Project + +// ProjecRequest is the set of options necessary to fully qualify a project request +type ProjectRequest struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // DisplayName is the display name to apply to a project + DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` + // Description is the description to apply to a project + Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` +} diff --git a/vendor/github.com/openshift/api/project/v1/types_swagger_doc_generated.go b/vendor/github.com/openshift/api/project/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..16c9a3e9f --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/types_swagger_doc_generated.go @@ -0,0 +1,64 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Project = map[string]string{ + "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.", + "metadata": "Standard object's metadata.", + "spec": "Spec defines the behavior of the Namespace.", + "status": "Status describes the current status of a Namespace", +} + +func (Project) SwaggerDoc() map[string]string { + return map_Project +} + +var map_ProjectList = map[string]string{ + "": "ProjectList is a list of Project objects.", + "metadata": "Standard object's metadata.", + "items": "Items is the list of projects", +} + +func (ProjectList) SwaggerDoc() map[string]string { + return map_ProjectList +} + +var map_ProjectRequest = map[string]string{ + "": "ProjecRequest is the set of options necessary to fully qualify a project request", + "metadata": "Standard object's metadata.", + "displayName": "DisplayName is the display name to apply to a project", + "description": "Description is the description to apply to a project", +} + +func (ProjectRequest) SwaggerDoc() map[string]string { + return map_ProjectRequest +} + +var map_ProjectSpec = map[string]string{ + "": "ProjectSpec describes the attributes on a Project", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage", +} + +func (ProjectSpec) SwaggerDoc() map[string]string { + return map_ProjectSpec +} + +var map_ProjectStatus = map[string]string{ + "": "ProjectStatus is information about the current status of a Project", + "phase": "Phase is the current lifecycle phase of the project", +} + +func (ProjectStatus) SwaggerDoc() map[string]string { + return map_ProjectStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..9c2ef00ba --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +// +build !ignore_autogenerated + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectRequest) DeepCopyInto(out *ProjectRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectRequest. +func (in *ProjectRequest) DeepCopy() *ProjectRequest { + if in == nil { + return nil + } + out := new(ProjectRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]core_v1.FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go old mode 100755 new mode 100644 diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go old mode 100755 new mode 100644 index d8bd013e6..727d76167 --- a/vendor/github.com/pborman/uuid/doc.go +++ b/vendor/github.com/pborman/uuid/doc.go @@ -4,5 +4,10 @@ // The uuid package generates and inspects UUIDs. // -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// This package is a partial wrapper around the github.com/google/uuid package. +// This package represents a UUID as []byte while github.com/google/uuid +// represents a UUID as [16]byte. package uuid diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go index cdd4192fd..a0420c1ef 100644 --- a/vendor/github.com/pborman/uuid/hash.go +++ b/vendor/github.com/pborman/uuid/hash.go @@ -19,7 +19,7 @@ var ( NIL = Parse("00000000-0000-0000-0000-000000000000") ) -// NewHash returns a new UUID dervied from the hash of space concatenated with +// NewHash returns a new UUID derived from the hash of space concatenated with // data generated by h. The hash should be at least 16 byte in length. The // first 16 bytes of the hash are used to form the UUID. The version of the // UUID will be the lower 4 bits of version. NewHash is used to implement diff --git a/vendor/github.com/pborman/uuid/json.go b/vendor/github.com/pborman/uuid/json.go deleted file mode 100644 index 760580a50..000000000 --- a/vendor/github.com/pborman/uuid/json.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "errors" - -func (u UUID) MarshalJSON() ([]byte, error) { - if len(u) == 0 { - return []byte(`""`), nil - } - return []byte(`"` + u.String() + `"`), nil -} - -func (u *UUID) UnmarshalJSON(data []byte) error { - if len(data) == 0 || string(data) == `""` { - return nil - } - if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { - return errors.New("invalid UUID format") - } - data = data[1 : len(data)-1] - uu := Parse(string(data)) - if uu == nil { - return errors.New("invalid UUID format") - } - *u = uu - return nil -} diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go new file mode 100644 index 000000000..35b89352a --- /dev/null +++ b/vendor/github.com/pborman/uuid/marshal.go @@ -0,0 +1,85 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "errors" + "fmt" + + guuid "github.com/google/uuid" +) + +// MarshalText implements encoding.TextMarshaler. +func (u UUID) MarshalText() ([]byte, error) { + if len(u) != 16 { + return nil, nil + } + var js [36]byte + encodeHex(js[:], u) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *UUID) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } + id := Parse(string(data)) + if id == nil { + return errors.New("invalid UUID") + } + *u = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u UUID) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + var id [16]byte + copy(id[:], data) + *u = id[:] + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (u Array) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], u[:]) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *Array) UnmarshalText(data []byte) error { + id, err := guuid.ParseBytes(data) + if err != nil { + return err + } + *u = Array(id) + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u Array) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *Array) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(u[:], data) + return nil +} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go old mode 100755 new mode 100644 index dd0a8ac18..e524e0101 --- a/vendor/github.com/pborman/uuid/node.go +++ b/vendor/github.com/pborman/uuid/node.go @@ -4,19 +4,15 @@ package uuid -import "net" - -var ( - interfaces []net.Interface // cached list of interfaces - ifname string // name of interface being used - nodeID []byte // hardware for version 1 UUIDs +import ( + guuid "github.com/google/uuid" ) // NodeInterface returns the name of the interface from which the NodeID was // derived. The interface "user" is returned if the NodeID was set by // SetNodeID. func NodeInterface() string { - return ifname + return guuid.NodeInterface() } // SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. @@ -26,67 +22,20 @@ func NodeInterface() string { // // SetNodeInterface never fails when name is "". func SetNodeInterface(name string) bool { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil && name != "" { - return false - } - } - - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - if setNodeID(ifs.HardwareAddr) { - ifname = ifs.Name - return true - } - } - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - if nodeID == nil { - nodeID = make([]byte, 6) - } - randomBits(nodeID) - return true - } - return false + return guuid.SetNodeInterface(name) } // NodeID returns a slice of a copy of the current Node ID, setting the Node ID // if not already set. func NodeID() []byte { - if nodeID == nil { - SetNodeInterface("") - } - nid := make([]byte, 6) - copy(nid, nodeID) - return nid + return guuid.NodeID() } // SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes // of id are used. If id is less than 6 bytes then false is returned and the // Node ID is not set. func SetNodeID(id []byte) bool { - if setNodeID(id) { - ifname = "user" - return true - } - return false -} - -func setNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - if nodeID == nil { - nodeID = make([]byte, 6) - } - copy(nodeID, id) - return true + return guuid.SetNodeID(id) } // NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go new file mode 100644 index 000000000..929c3847e --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql.go @@ -0,0 +1,68 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + parsed := make([]byte, 16) + copy(parsed, b) + *uuid = UUID(parsed) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go old mode 100755 new mode 100644 index 7ebc9bef1..5c0960d87 --- a/vendor/github.com/pborman/uuid/time.go +++ b/vendor/github.com/pborman/uuid/time.go @@ -6,65 +6,18 @@ package uuid import ( "encoding/binary" - "sync" - "time" + + guuid "github.com/google/uuid" ) // A Time represents a time as the number of 100's of nanoseconds since 15 Oct // 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - mu sync.Mutex - lasttime uint64 // last time we returned - clock_seq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} +type Time = guuid.Time // GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and // clock sequence as well as adjusting the clock sequence as needed. An error // is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer mu.Unlock() - mu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clock_seq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clock_seq, nil -} +func GetTime() (Time, uint16, error) { return guuid.GetTime() } // ClockSequence returns the current clock sequence, generating one if not // already set. The clock sequence is only used for Version 1 UUIDs. @@ -74,39 +27,11 @@ func getTime() (Time, uint16, error) { // clock sequence is generated the first time a clock sequence is requested by // ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated // for -func ClockSequence() int { - defer mu.Unlock() - mu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clock_seq == 0 { - setClockSequence(-1) - } - return int(clock_seq & 0x3fff) -} +func ClockSequence() int { return guuid.ClockSequence() } // SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to // -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer mu.Unlock() - mu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - old_seq := clock_seq - clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if old_seq != clock_seq { - lasttime = 0 - } -} +func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in // uuid. It returns false if uuid is not valid. The time is only well defined diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go index de40b102c..255b5e248 100644 --- a/vendor/github.com/pborman/uuid/util.go +++ b/vendor/github.com/pborman/uuid/util.go @@ -4,19 +4,8 @@ package uuid -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - // xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = []byte{ +var xvalues = [256]byte{ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go old mode 100755 new mode 100644 index 2920fae63..a2429b0dd --- a/vendor/github.com/pborman/uuid/uuid.go +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -7,28 +7,43 @@ package uuid import ( "bytes" "crypto/rand" - "fmt" + "encoding/hex" "io" - "strings" + + guuid "github.com/google/uuid" ) +// Array is a pass-by-value UUID that can be used as an effecient key in a map. +type Array [16]byte + +// UUID converts uuid into a slice. +func (uuid Array) UUID() UUID { + return uuid[:] +} + +// String returns the string representation of uuid, +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (uuid Array) String() string { + return guuid.UUID(uuid).String() +} + // A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC // 4122. type UUID []byte // A Version represents a UUIDs version. -type Version byte +type Version = guuid.Version // A Variant represents a UUIDs variant. -type Variant byte +type Variant = guuid.Variant // Constants returned by Variant. const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. + Invalid = guuid.Invalid // Invalid UUID + RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 + Reserved = guuid.Reserved // Reserved, NCS backward compatibility. + Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future = guuid.Future // Reserved for future definition. ) var rander = rand.Reader // random function @@ -43,31 +58,20 @@ func New() string { // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. func Parse(s string) UUID { - if len(s) == 36+9 { - if strings.ToLower(s[:9]) != "urn:uuid:" { - return nil - } - s = s[9:] - } else if len(s) != 36 { - return nil + gu, err := guuid.Parse(s) + if err == nil { + return gu[:] } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return nil + return nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + gu, err := guuid.ParseBytes(b) + if err == nil { + return gu[:], nil } - uuid := make([]byte, 16) - for i, x := range []int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - if v, ok := xtob(s[x:]); !ok { - return nil - } else { - uuid[i] = v - } - } - return uuid + return nil, err } // Equal returns true if uuid1 and uuid2 are equal. @@ -75,26 +79,50 @@ func Equal(uuid1, uuid2 UUID) bool { return bytes.Equal(uuid1, uuid2) } +// Array returns an array representation of uuid that can be used as a map key. +// Array panics if uuid is not valid. +func (uuid UUID) Array() Array { + if len(uuid) != 16 { + panic("invalid uuid") + } + var a Array + copy(a[:], uuid) + return a +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { - if uuid == nil || len(uuid) != 16 { + if len(uuid) != 16 { return "" } - b := []byte(uuid) - return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", - b[:4], b[4:6], b[6:8], b[8:10], b[10:]) + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) } // URN returns the RFC 2141 URN form of uuid, // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. func (uuid UUID) URN() string { - if uuid == nil || len(uuid) != 16 { + if len(uuid) != 16 { return "" } - b := []byte(uuid) - return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", - b[:4], b[4:6], b[6:8], b[8:10], b[10:]) + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) } // Variant returns the variant encoded in uuid. It returns Invalid if @@ -113,10 +141,9 @@ func (uuid UUID) Variant() Variant { default: return Reserved } - panic("unreachable") } -// Version returns the verison of uuid. It returns false if uuid is not +// Version returns the version of uuid. It returns false if uuid is not // valid. func (uuid UUID) Version() (Version, bool) { if len(uuid) != 16 { @@ -125,39 +152,12 @@ func (uuid UUID) Version() (Version, bool) { return Version(uuid[6] >> 4), true } -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implents io.Reader. +// SetRand sets the random number generator to r, which implements io.Reader. // If r.Read returns an error when the package requests random data then // a panic will be issued. // // Calling SetRand with nil sets the random number generator to the default // generator. func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r + guuid.SetRand(r) } diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go index 0127eacfa..7af948da7 100644 --- a/vendor/github.com/pborman/uuid/version1.go +++ b/vendor/github.com/pborman/uuid/version1.go @@ -5,7 +5,7 @@ package uuid import ( - "encoding/binary" + guuid "github.com/google/uuid" ) // NewUUID returns a Version 1 UUID based on the current NodeID and clock @@ -15,27 +15,9 @@ import ( // SetClockSequence then it will be set automatically. If GetTime fails to // return the current NewUUID returns nil. func NewUUID() UUID { - if nodeID == nil { - SetNodeInterface("") + gu, err := guuid.NewUUID() + if err == nil { + return UUID(gu[:]) } - - now, seq, err := GetTime() - if err != nil { - return nil - } - - uuid := make([]byte, 16) - - time_low := uint32(now & 0xffffffff) - time_mid := uint16((now >> 32) & 0xffff) - time_hi := uint16((now >> 48) & 0x0fff) - time_hi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], time_low) - binary.BigEndian.PutUint16(uuid[4:], time_mid) - binary.BigEndian.PutUint16(uuid[6:], time_hi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID) - - return uuid + return nil } diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go index b3d4a368d..b459d46d1 100644 --- a/vendor/github.com/pborman/uuid/version4.go +++ b/vendor/github.com/pborman/uuid/version4.go @@ -4,12 +4,14 @@ package uuid +import guuid "github.com/google/uuid" + // Random returns a Random (Version 4) UUID or panics. // // The strength of the UUIDs is based on the strength of the crypto/rand // package. // -// A note about uniqueness derived from from the UUID Wikipedia entry: +// A note about uniqueness derived from the UUID Wikipedia entry: // // Randomly generated UUIDs have 122 random bits. One's annual risk of being // hit by a meteorite is estimated to be one chance in 17 billion, that @@ -17,9 +19,8 @@ package uuid // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() UUID { - uuid := make([]byte, 16) - randomBits([]byte(uuid)) - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid + if gu, err := guuid.NewRandom(); err == nil { + return UUID(gu[:]) + } + return nil } diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md deleted file mode 100644 index 3474739ed..000000000 --- a/vendor/github.com/peterbourgon/diskv/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# What is diskv? - -Diskv (disk-vee) is a simple, persistent key-value store written in the Go -language. It starts with an incredibly simple API for storing arbitrary data on -a filesystem by key, and builds several layers of performance-enhancing -abstraction on top. The end result is a conceptually simple, but highly -performant, disk-backed storage system. - -[![Build Status][1]][2] - -[1]: https://drone.io/github.com/peterbourgon/diskv/status.png -[2]: https://drone.io/github.com/peterbourgon/diskv/latest - - -# Installing - -Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. -Then, - -```bash -$ go get github.com/peterbourgon/diskv -``` - -[3]: http://golang.org -[4]: http://golang.org/doc/install/source -[5]: http://golang.org/doc/install - - -# Usage - -```go -package main - -import ( - "fmt" - "github.com/peterbourgon/diskv" -) - -func main() { - // Simplest transform function: put all the data files into the base dir. - flatTransform := func(s string) []string { return []string{} } - - // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. - d := diskv.New(diskv.Options{ - BasePath: "my-data-dir", - Transform: flatTransform, - CacheSizeMax: 1024 * 1024, - }) - - // Write three bytes to the key "alpha". - key := "alpha" - d.Write(key, []byte{'1', '2', '3'}) - - // Read the value back out of the store. - value, _ := d.Read(key) - fmt.Printf("%v\n", value) - - // Erase the key+value from the store (and the disk). - d.Erase(key) -} -``` - -More complex examples can be found in the "examples" subdirectory. - - -# Theory - -## Basic idea - -At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). -The data is written to a single file on disk, with the same name as the key. -The key determines where that file will be stored, via a user-provided -`TransformFunc`, which takes a key and returns a slice (`[]string`) -corresponding to a path list where the key file will be stored. The simplest -TransformFunc, - -```go -func SimpleTransform (key string) []string { - return []string{} -} -``` - -will place all keys in the same, base directory. The design is inspired by -[Redis diskstore][6]; a TransformFunc which emulates the default diskstore -behavior is available in the content-addressable-storage example. - -[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 - -**Note** that your TransformFunc should ensure that one valid key doesn't -transform to a subset of another valid key. That is, it shouldn't be possible -to construct valid keys that resolve to directory names. As a concrete example, -if your TransformFunc splits on every 3 characters, then - -```go -d.Write("abcabc", val) // OK: written to /abc/abc/abcabc -d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory -``` - -This will be addressed in an upcoming version of diskv. - -Probably the most important design principle behind diskv is that your data is -always flatly available on the disk. diskv will never do anything that would -prevent you from accessing, copying, backing up, or otherwise interacting with -your data via common UNIX commandline tools. - -## Adding a cache - -An in-memory caching layer is provided by combining the BasicStore -functionality with a simple map structure, and keeping it up-to-date as -appropriate. Since the map structure in Go is not threadsafe, it's combined -with a RWMutex to provide safe concurrent access. - -## Adding order - -diskv is a key-value store and therefore inherently unordered. An ordering -system can be injected into the store by passing something which satisfies the -diskv.Index interface. (A default implementation, using Google's -[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a -user-provided Less function) index of the keys, which can be queried. - -[7]: https://github.com/google/btree - -## Adding compression - -Something which implements the diskv.Compression interface may be passed -during store creation, so that all Writes and Reads are filtered through -a compression/decompression pipeline. Several default implementations, -using stdlib compression algorithms, are provided. Note that data is cached -compressed; the cost of decompression is borne with each Read. - -## Streaming - -diskv also now provides ReadStream and WriteStream methods, to allow very large -data to be handled efficiently. - - -# Future plans - - * Needs plenty of robust testing: huge datasets, etc... - * More thorough benchmarking - * Your suggestions for use-cases I haven't thought of diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 588ceca18..000000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.4.3 - - 1.5.4 - - 1.6.2 - - 1.7.1 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 273db3c98..000000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Contributing - -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. - -Before proposing a change, please discuss your change by raising an issue. - -## Licence - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade0..000000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go index 842ee8045..7421f326f 100644 --- a/vendor/github.com/pkg/errors/errors.go +++ b/vendor/github.com/pkg/errors/errors.go @@ -6,7 +6,7 @@ // return err // } // -// which applied recursively up the call stack results in error reports +// which when applied recursively up the call stack results in error reports // without context or debugging information. The errors package allows // programmers to add context to the failure path in their code in a way // that does not destroy the original value of the error. @@ -15,16 +15,17 @@ // // The errors.Wrap function returns a new error that adds context to the // original error by recording a stack trace at the point Wrap is called, -// and the supplied message. For example +// together with the supplied message. For example // // _, err := ioutil.ReadAll(r) // if err != nil { // return errors.Wrap(err, "read failed") // } // -// If additional control is required the errors.WithStack and errors.WithMessage -// functions destructure errors.Wrap into its component operations of annotating -// an error with a stack trace and an a message, respectively. +// If additional control is required, the errors.WithStack and +// errors.WithMessage functions destructure errors.Wrap into its component +// operations: annotating an error with a stack trace and with a message, +// respectively. // // Retrieving the cause of an error // @@ -38,7 +39,7 @@ // } // // can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error which does not implement causer, which is assumed to be +// the topmost error that does not implement causer, which is assumed to be // the original cause. For example: // // switch err := errors.Cause(err).(type) { @@ -48,16 +49,16 @@ // // unknown error // } // -// causer interface is not exported by this package, but is considered a part -// of stable public API. +// Although the causer interface is not exported by this package, it is +// considered a part of its stable public interface. // // Formatted printing of errors // // All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported +// be formatted by the fmt package. The following verbs are supported: // // %s print the error. If the error has a Cause it will be -// printed recursively +// printed recursively. // %v see %s // %+v extended format. Each Frame of the error's StackTrace will // be printed in detail. @@ -65,13 +66,13 @@ // Retrieving the stack trace of an error or wrapper // // New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface. +// invoked. This information can be retrieved with the following interface: // // type stackTracer interface { // StackTrace() errors.StackTrace // } // -// Where errors.StackTrace is defined as +// The returned errors.StackTrace type is defined as // // type StackTrace []Frame // @@ -85,8 +86,8 @@ // } // } // -// stackTracer interface is not exported by this package, but is considered a part -// of stable public API. +// Although the stackTracer interface is not exported by this package, it is +// considered a part of its stable public interface. // // See the documentation for Frame.Format for more details. package errors @@ -192,7 +193,7 @@ func Wrap(err error, message string) error { } // Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is call, and the format specifier. +// at the point Wrapf is called, and the format specifier. // If err is nil, Wrapf returns nil. func Wrapf(err error, format string, args ...interface{}) error { if err == nil { @@ -220,6 +221,18 @@ func WithMessage(err error, message string) error { } } +// WithMessagef annotates err with the format specifier. +// If err is nil, WithMessagef returns nil. +func WithMessagef(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } +} + type withMessage struct { cause error msg string diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go index 6b1f2891a..2874a048c 100644 --- a/vendor/github.com/pkg/errors/stack.go +++ b/vendor/github.com/pkg/errors/stack.go @@ -46,7 +46,8 @@ func (f Frame) line() int { // // Format accepts flags that alter the printing of some verbs, as follows: // -// %+s path of source file relative to the compile time GOPATH +// %+s function name and path of source file relative to the compile time +// GOPATH separated by \n\t (\n\t) // %+v equivalent to %+s:%d func (f Frame) Format(s fmt.State, verb rune) { switch verb { @@ -79,6 +80,14 @@ func (f Frame) Format(s fmt.State, verb rune) { // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. func (st StackTrace) Format(s fmt.State, verb rune) { switch verb { case 'v': @@ -136,43 +145,3 @@ func funcname(name string) string { i = strings.Index(name, ".") return name[i+1:] } - -func trimGOPATH(name, file string) string { - // Here we want to get the source file path relative to the compile time - // GOPATH. As of Go 1.6.x there is no direct way to know the compiled - // GOPATH at runtime, but we can infer the number of path segments in the - // GOPATH. We note that fn.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired output. We count separators from the end of the file - // path until it finds two more than in the function name and then move - // one character forward to preserve the initial path segment without a - // leading separator. - const sep = "/" - goal := strings.Count(name, sep) + 2 - i := len(file) - for n := 0; n < goal; n++ { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - // not enough separators found, set i so that the slice expression - // below leaves file unmodified - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - file = file[i+len(sep):] - return file -} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 000000000..c67dad612 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 000000000..003e99fad --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,772 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/prometheus/client_golang/api/client.go b/vendor/github.com/prometheus/client_golang/api/client.go new file mode 100644 index 000000000..09af749ca --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/api/client.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +// Package api provides clients for the HTTP APIs. +package api + +import ( + "context" + "io/ioutil" + "net" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +// DefaultRoundTripper is used if no RoundTripper is set in Config. +var DefaultRoundTripper http.RoundTripper = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSHandshakeTimeout: 10 * time.Second, +} + +// Config defines configuration parameters for a new client. +type Config struct { + // The address of the Prometheus to connect to. + Address string + + // RoundTripper is used by the Client to drive HTTP requests. If not + // provided, DefaultRoundTripper will be used. + RoundTripper http.RoundTripper +} + +func (cfg *Config) roundTripper() http.RoundTripper { + if cfg.RoundTripper == nil { + return DefaultRoundTripper + } + return cfg.RoundTripper +} + +// Client is the interface for an API client. +type Client interface { + URL(ep string, args map[string]string) *url.URL + Do(context.Context, *http.Request) (*http.Response, []byte, error) +} + +// NewClient returns a new Client. +// +// It is safe to use the returned Client from multiple goroutines. +func NewClient(cfg Config) (Client, error) { + u, err := url.Parse(cfg.Address) + if err != nil { + return nil, err + } + u.Path = strings.TrimRight(u.Path, "/") + + return &httpClient{ + endpoint: u, + client: http.Client{Transport: cfg.roundTripper()}, + }, nil +} + +type httpClient struct { + endpoint *url.URL + client http.Client +} + +func (c *httpClient) URL(ep string, args map[string]string) *url.URL { + p := path.Join(c.endpoint.Path, ep) + + for arg, val := range args { + arg = ":" + arg + p = strings.Replace(p, arg, val, -1) + } + + u := *c.endpoint + u.Path = p + + return &u +} + +func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + if ctx != nil { + req = req.WithContext(ctx) + } + resp, err := c.client.Do(req) + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + close(done) + }() + + select { + case <-ctx.Done(): + err = resp.Body.Close() + <-done + if err == nil { + err = ctx.Err() + } + case <-done: + } + + return resp, body, err +} diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go new file mode 100644 index 000000000..642418890 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -0,0 +1,519 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +// Package v1 provides bindings to the Prometheus HTTP API v1: +// http://prometheus.io/docs/querying/api/ +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/prometheus/client_golang/api" + "github.com/prometheus/common/model" +) + +const ( + statusAPIError = 422 + + apiPrefix = "/api/v1" + + epAlertManagers = apiPrefix + "/alertmanagers" + epQuery = apiPrefix + "/query" + epQueryRange = apiPrefix + "/query_range" + epLabelValues = apiPrefix + "/label/:name/values" + epSeries = apiPrefix + "/series" + epTargets = apiPrefix + "/targets" + epSnapshot = apiPrefix + "/admin/tsdb/snapshot" + epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series" + epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones" + epConfig = apiPrefix + "/status/config" + epFlags = apiPrefix + "/status/flags" +) + +// ErrorType models the different API error types. +type ErrorType string + +// HealthStatus models the health status of a scrape target. +type HealthStatus string + +const ( + // Possible values for ErrorType. + ErrBadData ErrorType = "bad_data" + ErrTimeout ErrorType = "timeout" + ErrCanceled ErrorType = "canceled" + ErrExec ErrorType = "execution" + ErrBadResponse ErrorType = "bad_response" + ErrServer ErrorType = "server_error" + ErrClient ErrorType = "client_error" + + // Possible values for HealthStatus. + HealthGood HealthStatus = "up" + HealthUnknown HealthStatus = "unknown" + HealthBad HealthStatus = "down" +) + +// Error is an error returned by the API. +type Error struct { + Type ErrorType + Msg string + Detail string +} + +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Type, e.Msg) +} + +// Range represents a sliced time range. +type Range struct { + // The boundaries of the time range. + Start, End time.Time + // The maximum time between two slices within the boundaries. + Step time.Duration +} + +// API provides bindings for Prometheus's v1 API. +type API interface { + // AlertManagers returns an overview of the current state of the Prometheus alert manager discovery. + AlertManagers(ctx context.Context) (AlertManagersResult, error) + // CleanTombstones removes the deleted data from disk and cleans up the existing tombstones. + CleanTombstones(ctx context.Context) error + // Config returns the current Prometheus configuration. + Config(ctx context.Context) (ConfigResult, error) + // DeleteSeries deletes data for a selection of series in a time range. + DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error + // Flags returns the flag values that Prometheus was launched with. + Flags(ctx context.Context) (FlagsResult, error) + // LabelValues performs a query for the values of the given label. + LabelValues(ctx context.Context, label string) (model.LabelValues, error) + // Query performs a query for the given time. + Query(ctx context.Context, query string, ts time.Time) (model.Value, error) + // QueryRange performs a query for the given range. + QueryRange(ctx context.Context, query string, r Range) (model.Value, error) + // Series finds series by label matchers. + Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) + // Snapshot creates a snapshot of all current data into snapshots/- + // under the TSDB's data directory and returns the directory as response. + Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) + // Targets returns an overview of the current state of the Prometheus target discovery. + Targets(ctx context.Context) (TargetsResult, error) +} + +// AlertManagersResult contains the result from querying the alertmanagers endpoint. +type AlertManagersResult struct { + Active []AlertManager `json:"activeAlertManagers"` + Dropped []AlertManager `json:"droppedAlertManagers"` +} + +// AlertManager models a configured Alert Manager. +type AlertManager struct { + URL string `json:"url"` +} + +// ConfigResult contains the result from querying the config endpoint. +type ConfigResult struct { + YAML string `json:"yaml"` +} + +// FlagsResult contains the result from querying the flag endpoint. +type FlagsResult map[string]string + +// SnapshotResult contains the result from querying the snapshot endpoint. +type SnapshotResult struct { + Name string `json:"name"` +} + +// TargetsResult contains the result from querying the targets endpoint. +type TargetsResult struct { + Active []ActiveTarget `json:"activeTargets"` + Dropped []DroppedTarget `json:"droppedTargets"` +} + +// ActiveTarget models an active Prometheus scrape target. +type ActiveTarget struct { + DiscoveredLabels model.LabelSet `json:"discoveredLabels"` + Labels model.LabelSet `json:"labels"` + ScrapeURL string `json:"scrapeUrl"` + LastError string `json:"lastError"` + LastScrape time.Time `json:"lastScrape"` + Health HealthStatus `json:"health"` +} + +// DroppedTarget models a dropped Prometheus scrape target. +type DroppedTarget struct { + DiscoveredLabels model.LabelSet `json:"discoveredLabels"` +} + +// queryResult contains result data for a query. +type queryResult struct { + Type model.ValueType `json:"resultType"` + Result interface{} `json:"result"` + + // The decoded value. + v model.Value +} + +func (qr *queryResult) UnmarshalJSON(b []byte) error { + v := struct { + Type model.ValueType `json:"resultType"` + Result json.RawMessage `json:"result"` + }{} + + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + + switch v.Type { + case model.ValScalar: + var sv model.Scalar + err = json.Unmarshal(v.Result, &sv) + qr.v = &sv + + case model.ValVector: + var vv model.Vector + err = json.Unmarshal(v.Result, &vv) + qr.v = vv + + case model.ValMatrix: + var mv model.Matrix + err = json.Unmarshal(v.Result, &mv) + qr.v = mv + + default: + err = fmt.Errorf("unexpected value type %q", v.Type) + } + return err +} + +// NewAPI returns a new API for the client. +// +// It is safe to use the returned API from multiple goroutines. +func NewAPI(c api.Client) API { + return &httpAPI{client: apiClient{c}} +} + +type httpAPI struct { + client api.Client +} + +func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) { + u := h.client.URL(epAlertManagers, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return AlertManagersResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return AlertManagersResult{}, err + } + + var res AlertManagersResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) CleanTombstones(ctx context.Context) error { + u := h.client.URL(epCleanTombstones, nil) + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return err + } + + _, _, err = h.client.Do(ctx, req) + return err +} + +func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) { + u := h.client.URL(epConfig, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return ConfigResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return ConfigResult{}, err + } + + var res ConfigResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error { + u := h.client.URL(epDeleteSeries, nil) + q := u.Query() + + for _, m := range matches { + q.Add("match[]", m) + } + + q.Set("start", startTime.Format(time.RFC3339Nano)) + q.Set("end", endTime.Format(time.RFC3339Nano)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return err + } + + _, _, err = h.client.Do(ctx, req) + return err +} + +func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) { + u := h.client.URL(epFlags, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return FlagsResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return FlagsResult{}, err + } + + var res FlagsResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, error) { + u := h.client.URL(epLabelValues, map[string]string{"name": label}) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + var labelValues model.LabelValues + err = json.Unmarshal(body, &labelValues) + return labelValues, err +} + +func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) { + u := h.client.URL(epQuery, nil) + q := u.Query() + + q.Set("query", query) + if !ts.IsZero() { + q.Set("time", ts.Format(time.RFC3339Nano)) + } + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} + +func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) { + u := h.client.URL(epQueryRange, nil) + q := u.Query() + + var ( + start = r.Start.Format(time.RFC3339Nano) + end = r.End.Format(time.RFC3339Nano) + step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64) + ) + + q.Set("query", query) + q.Set("start", start) + q.Set("end", end) + q.Set("step", step) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} + +func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) { + u := h.client.URL(epSeries, nil) + q := u.Query() + + for _, m := range matches { + q.Add("match[]", m) + } + + q.Set("start", startTime.Format(time.RFC3339Nano)) + q.Set("end", endTime.Format(time.RFC3339Nano)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var mset []model.LabelSet + err = json.Unmarshal(body, &mset) + return mset, err +} + +func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) { + u := h.client.URL(epSnapshot, nil) + q := u.Query() + + q.Set("skip_head", strconv.FormatBool(skipHead)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return SnapshotResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return SnapshotResult{}, err + } + + var res SnapshotResult + err = json.Unmarshal(body, &res) + return res, err +} + +func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) { + u := h.client.URL(epTargets, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return TargetsResult{}, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return TargetsResult{}, err + } + + var res TargetsResult + err = json.Unmarshal(body, &res) + return res, err +} + +// apiClient wraps a regular client and processes successful API responses. +// Successful also includes responses that errored at the API level. +type apiClient struct { + api.Client +} + +type apiResponse struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + ErrorType ErrorType `json:"errorType"` + Error string `json:"error"` +} + +func apiError(code int) bool { + // These are the codes that Prometheus sends when it returns an error. + return code == statusAPIError || code == http.StatusBadRequest +} + +func errorTypeAndMsgFor(resp *http.Response) (ErrorType, string) { + switch resp.StatusCode / 100 { + case 4: + return ErrClient, fmt.Sprintf("client error: %d", resp.StatusCode) + case 5: + return ErrServer, fmt.Sprintf("server error: %d", resp.StatusCode) + } + return ErrBadResponse, fmt.Sprintf("bad response code %d", resp.StatusCode) +} + +func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + resp, body, err := c.Client.Do(ctx, req) + if err != nil { + return resp, body, err + } + + code := resp.StatusCode + + if code/100 != 2 && !apiError(code) { + errorType, errorMsg := errorTypeAndMsgFor(resp) + return resp, body, &Error{ + Type: errorType, + Msg: errorMsg, + Detail: string(body), + } + } + + var result apiResponse + + if http.StatusNoContent != code { + if err = json.Unmarshal(body, &result); err != nil { + return resp, body, &Error{ + Type: ErrBadResponse, + Msg: err.Error(), + } + } + } + + if apiError(code) != (result.Status == "error") { + err = &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + } + } + + if apiError(code) && result.Status == "error" { + err = &Error{ + Type: result.ErrorType, + Msg: result.Error, + } + } + + return resp, []byte(result.Data), err +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f0346..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index 44986bff0..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1 +0,0 @@ -See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore deleted file mode 100644 index 25e3659ab..000000000 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 40503edbf..000000000 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) the maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md deleted file mode 100644 index f1d3b9937..000000000 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ /dev/null @@ -1,2 +0,0 @@ -* Tobias Schmidt @grobie -* Johannes 'fish' Ziemke @discordianfish diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index 314d1ba56..000000000 --- a/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include Makefile.common - -%/.unpacked: %.ttar - ./ttar -C $(dir $*) -x -f $*.ttar - touch $@ - -update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ - -.PHONY: build -build: - -.PHONY: test -test: fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common deleted file mode 100644 index 741579e60..000000000 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -unexport GOVENDOR -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif - - unexport GO111MODULE -endif -PROMU := $(FIRST_GOPATH)/bin/promu -STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck -pkgs = ./... - -GO_VERSION ?= $(shell $(GO) version) -GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) - -PROMU_VERSION ?= 0.2.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKER_REPO ?= prom - -.PHONY: all -all: precheck style staticcheck unused build test - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-test-short -common-test-short: - @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: - @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) - -.PHONY: common-format -common-format: - @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs) - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-staticcheck -common-staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) -else - $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) -endif - -.PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE - @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy - @git diff --exit-code -- go.sum go.mod -ifneq (,$(wildcard vendor)) - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker -common-docker: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . - -.PHONY: common-docker-publish -common-docker-publish: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" - -.PHONY: common-docker-tag-latest -common-docker-tag-latest: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - curl -s -L $(PROMU_URL) | tar -xvz -C /tmp - mkdir -v -p $(FIRST_GOPATH)/bin - cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU) - -.PHONY: proto -proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -.PHONY: $(STATICCHECK) -$(STATICCHECK): -ifdef GO111MODULE -# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}. -# See https://github.com/golang/go/issues/27643. -# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules. - tmpModule=$$(mktemp -d 2>&1) && \ - mkdir -p $${tmpModule}/staticcheck && \ - cd "$${tmpModule}"/staticcheck && \ - GO111MODULE=on $(GO) mod init example.com/staticcheck && \ - GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \ - rm -rf $${tmpModule}; -else - GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck -endif - -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 209549471..000000000 --- a/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# procfs - -This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index f7f84ef36..000000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,1714 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 65536 65536 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 19 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 49 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat -Lines: 26 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0] sdb3[1] - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2] sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/alarm -Lines: 1 -2503000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=12229000 -POWER_SUPPLY_POWER_NOW=4830000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=50060000 -POWER_SUPPLY_ENERGY_NOW=49450000 -POWER_SUPPLY_CAPACITY=98 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 -44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod deleted file mode 100644 index 8a1b839fd..000000000 --- a/vendor/github.com/prometheus/procfs/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/prometheus/procfs - -require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum deleted file mode 100644 index 7827dd3d5..000000000 --- a/vendor/github.com/prometheus/procfs/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar deleted file mode 100755 index b0171a12b..000000000 --- a/vendor/github.com/prometheus/procfs/ttar +++ /dev/null @@ -1,389 +0,0 @@ -#!/usr/bin/env bash - -# Purpose: plain text tar format -# Limitations: - only suitable for text files, directories, and symlinks -# - stores only filename, content, and mode -# - not designed for untrusted input -# -# Note: must work with bash version 3.2 (macOS) - -# Copyright 2017 Roger Luethi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -o nounset - -# Sanitize environment (for instance, standard sorting of glob matches) -export LC_ALL=C - -path="" -CMD="" -ARG_STRING="$*" - -#------------------------------------------------------------------------------ -# Not all sed implementations can work on null bytes. In order to make ttar -# work out of the box on macOS, use Python as a stream editor. - -USE_PYTHON=0 - -PYTHON_CREATE_FILTER=$(cat << 'PCF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'EOF', r'\EOF', line) - line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) - line = re.sub('\x00', r'NULLBYTE', line) - sys.stdout.write(line) -PCF -) - -PYTHON_EXTRACT_FILTER=$(cat << 'PEF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'(?/dev/null; then - echo "ERROR Python not found. Aborting." - exit 2 - fi - USE_PYTHON=1 - fi -} - -#------------------------------------------------------------------------------ - -function usage { - bname=$(basename "$0") - cat << USAGE -Usage: $bname [-C

] -c -f (create archive) - $bname -t -f (list archive contents) - $bname [-C ] -x -f (extract archive) - -Options: - -C (change directory) - -v (verbose) - -Example: Change to sysfs directory, create ttar file from fixtures directory - $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ -USAGE -exit "$1" -} - -function vecho { - if [ "${VERBOSE:-}" == "yes" ]; then - echo >&7 "$@" - fi -} - -function set_cmd { - if [ -n "$CMD" ]; then - echo "ERROR: more than one command given" - echo - usage 2 - fi - CMD=$1 -} - -unset VERBOSE - -while getopts :cf:htxvC: opt; do - case $opt in - c) - set_cmd "create" - ;; - f) - ARCHIVE=$OPTARG - ;; - h) - usage 0 - ;; - t) - set_cmd "list" - ;; - x) - set_cmd "extract" - ;; - v) - VERBOSE=yes - exec 7>&1 - ;; - C) - CDIR=$OPTARG - ;; - *) - echo >&2 "ERROR: invalid option -$OPTARG" - echo - usage 1 - ;; - esac -done - -# Remove processed options from arguments -shift $(( OPTIND - 1 )); - -if [ "${CMD:-}" == "" ]; then - echo >&2 "ERROR: no command given" - echo - usage 1 -elif [ "${ARCHIVE:-}" == "" ]; then - echo >&2 "ERROR: no archive name given" - echo - usage 1 -fi - -function list { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while read -r line; do - line_no=$(( line_no + 1 )) - if [ $size -gt 0 ]; then - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - echo "$path" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - echo "$path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - echo "$path -> ${BASH_REMATCH[1]}" - fi - done < "$ttar_file" -} - -function extract { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while IFS= read -r line; do - line_no=$(( line_no + 1 )) - local eof_without_newline - if [ "$size" -gt 0 ]; then - if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceeded by a backslash indicates that the line - # does not end with a newline - eof_without_newline=1 - else - eof_without_newline=0 - fi - # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceeded by backslash - # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceeded by backslash - # Remove one backslash in front of EOF - if [ $USE_PYTHON -eq 1 ]; then - echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" - else - # The repeated pattern makes up for sed's lack of negative - # lookbehind assertions (for consecutive null bytes). - echo -n "$line" | \ - sed -e 's/^NULLBYTE/\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\\NULLBYTE/NULLBYTE/g; - s/\([^\\]\)EOF/\1/g; - s/\\EOF/EOF/g; - ' >> "$path" - fi - if [[ "$eof_without_newline" -eq 0 ]]; then - echo >> "$path" - fi - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - if [ -e "$path" ] || [ -L "$path" ]; then - rm "$path" - fi - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - # Create file even if it is zero-length. - touch "$path" - vecho " $path" - elif [[ $line =~ ^Mode:\ (.*)$ ]]; then - mode=${BASH_REMATCH[1]} - chmod "$mode" "$path" - vecho "$mode" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - mkdir -p "$path" - vecho " $path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - ln -s "${BASH_REMATCH[1]}" "$path" - vecho " $path -> ${BASH_REMATCH[1]}" - elif [[ $line =~ ^# ]]; then - # Ignore comments between files - continue - else - echo >&2 "ERROR: Unknown keyword on line $line_no: $line" - exit 1 - fi - done < "$ttar_file" -} - -function div { - echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ - "- - - - - -" -} - -function get_mode { - local mfile=$1 - if [ -z "${STAT_OPTION:-}" ]; then - if stat -c '%a' "$mfile" >/dev/null 2>&1; then - # GNU stat - STAT_OPTION='-c' - STAT_FORMAT='%a' - else - # BSD stat - STAT_OPTION='-f' - # Octal output, user/group/other (omit file type, sticky bit) - STAT_FORMAT='%OLp' - fi - fi - stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" -} - -function _create { - shopt -s nullglob - local mode - local eof_without_newline - while (( "$#" )); do - file=$1 - if [ -L "$file" ]; then - echo "Path: $file" - symlinkTo=$(readlink "$file") - echo "SymlinkTo: $symlinkTo" - vecho " $file -> $symlinkTo" - div - elif [ -d "$file" ]; then - # Strip trailing slash (if there is one) - file=${file%/} - echo "Directory: $file" - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file/" - div - # Find all files and dirs, including hidden/dot files - for x in "$file/"{*,.[^.]*}; do - _create "$x" - done - elif [ -f "$file" ]; then - echo "Path: $file" - lines=$(wc -l "$file"|awk '{print $1}') - eof_without_newline=0 - if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ - [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then - eof_without_newline=1 - lines=$((lines+1)) - fi - echo "Lines: $lines" - # Add backslash in front of EOF - # Add backslash in front of NULLBYTE - # Replace null byte with NULLBYTE - if [ $USE_PYTHON -eq 1 ]; then - < "$file" python -c "$PYTHON_CREATE_FILTER" - else - < "$file" \ - sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; - ' - fi - if [[ "$eof_without_newline" -eq 1 ]]; then - # Finish line with EOF to indicate that the original line did - # not end with a linefeed - echo "EOF" - fi - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file" - div - else - echo >&2 "ERROR: file not found ($file in $(pwd))" - exit 2 - fi - shift - done -} - -function create { - ttar_file=$1 - shift - if [ -z "${1:-}" ]; then - echo >&2 "ERROR: missing arguments." - echo - usage 1 - fi - if [ -e "$ttar_file" ]; then - rm "$ttar_file" - fi - exec > "$ttar_file" - echo "# Archive created by ttar $ARG_STRING" - _create "$@" -} - -test_environment - -if [ -n "${CDIR:-}" ]; then - if [[ "$ARCHIVE" != /* ]]; then - # Relative path: preserve the archive's location before changing - # directory - ARCHIVE="$(pwd)/$ARCHIVE" - fi - cd "$CDIR" -fi - -"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/rogpeppe/go-internal/LICENSE b/vendor/github.com/rogpeppe/go-internal/LICENSE new file mode 100644 index 000000000..49ea0f928 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go b/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go new file mode 100644 index 000000000..c94b3848a --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go @@ -0,0 +1,47 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: Figure out what gopkg.in should do. + +package modfile + +import "strings" + +// ParseGopkgIn splits gopkg.in import paths into their constituent parts +func ParseGopkgIn(path string) (root, repo, major, subdir string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return + } + f := strings.Split(path, "/") + if len(f) >= 2 { + if elem, v, ok := dotV(f[1]); ok { + root = strings.Join(f[:2], "/") + repo = "github.com/go-" + elem + "/" + elem + major = v + subdir = strings.Join(f[2:], "/") + return root, repo, major, subdir, true + } + } + if len(f) >= 3 { + if elem, v, ok := dotV(f[2]); ok { + root = strings.Join(f[:3], "/") + repo = "github.com/" + f[1] + "/" + elem + major = v + subdir = strings.Join(f[3:], "/") + return root, repo, major, subdir, true + } + } + return +} + +func dotV(name string) (elem, v string, ok bool) { + i := len(name) - 1 + for i >= 0 && '0' <= name[i] && name[i] <= '9' { + i-- + } + if i <= 2 || i+1 >= len(name) || name[i-1] != '.' || name[i] != 'v' || name[i+1] == '0' && len(name) != i+2 { + return "", "", false + } + return name[:i-1], name[i:], true +} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/print.go b/vendor/github.com/rogpeppe/go-internal/modfile/print.go new file mode 100644 index 000000000..7b1dd8f95 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/print.go @@ -0,0 +1,164 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package modfile implements parsing and formatting for +// go.mod files. +package modfile + +import ( + "bytes" + "fmt" + "strings" +) + +func Format(f *FileSyntax) []byte { + pr := &printer{} + pr.file(f) + return pr.Bytes() +} + +// A printer collects the state during printing of a file or expression. +type printer struct { + bytes.Buffer // output buffer + comment []Comment // pending end-of-line comments + margin int // left margin (indent), a number of tabs +} + +// printf prints to the buffer. +func (p *printer) printf(format string, args ...interface{}) { + fmt.Fprintf(p, format, args...) +} + +// indent returns the position on the current line, in bytes, 0-indexed. +func (p *printer) indent() int { + b := p.Bytes() + n := 0 + for n < len(b) && b[len(b)-1-n] != '\n' { + n++ + } + return n +} + +// newline ends the current line, flushing end-of-line comments. +func (p *printer) newline() { + if len(p.comment) > 0 { + p.printf(" ") + for i, com := range p.comment { + if i > 0 { + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + } + p.printf("%s", strings.TrimSpace(com.Token)) + } + p.comment = p.comment[:0] + } + + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } +} + +// trim removes trailing spaces and tabs from the current line. +func (p *printer) trim() { + // Remove trailing spaces and tabs from line we're about to end. + b := p.Bytes() + n := len(b) + for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') { + n-- + } + p.Truncate(n) +} + +// file formats the given file into the print buffer. +func (p *printer) file(f *FileSyntax) { + for _, com := range f.Before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + for i, stmt := range f.Stmt { + switch x := stmt.(type) { + case *CommentBlock: + // comments already handled + p.expr(x) + + default: + p.expr(x) + p.newline() + } + + for _, com := range stmt.Comment().After { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + if i+1 < len(f.Stmt) { + p.newline() + } + } +} + +func (p *printer) expr(x Expr) { + // Emit line-comments preceding this expression. + if before := x.Comment().Before; len(before) > 0 { + // Want to print a line comment. + // Line comments must be at the current margin. + p.trim() + if p.indent() > 0 { + // There's other text on the line. Start a new line. + p.printf("\n") + } + // Re-indent to margin. + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + for _, com := range before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + } + + switch x := x.(type) { + default: + panic(fmt.Errorf("printer: unexpected type %T", x)) + + case *CommentBlock: + // done + + case *LParen: + p.printf("(") + case *RParen: + p.printf(")") + + case *Line: + sep := "" + for _, tok := range x.Token { + p.printf("%s%s", sep, tok) + sep = " " + } + + case *LineBlock: + for _, tok := range x.Token { + p.printf("%s ", tok) + } + p.expr(&x.LParen) + p.margin++ + for _, l := range x.Line { + p.newline() + p.expr(l) + } + p.margin-- + p.newline() + p.expr(&x.RParen) + } + + // Queue end-of-line comments for printing when we + // reach the end of the line. + p.comment = append(p.comment, x.Comment().Suffix...) +} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/read.go b/vendor/github.com/rogpeppe/go-internal/modfile/read.go new file mode 100644 index 000000000..1d81ff1ab --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/read.go @@ -0,0 +1,869 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Module file parser. +// This is a simplified copy of Google's buildifier parser. + +package modfile + +import ( + "bytes" + "fmt" + "os" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A Position describes the position between two bytes of input. +type Position struct { + Line int // line in input (starting at 1) + LineRune int // rune in line (starting at 1) + Byte int // byte in input (starting at 0) +} + +// add returns the position at the end of s, assuming it starts at p. +func (p Position) add(s string) Position { + p.Byte += len(s) + if n := strings.Count(s, "\n"); n > 0 { + p.Line += n + s = s[strings.LastIndex(s, "\n")+1:] + p.LineRune = 1 + } + p.LineRune += utf8.RuneCountInString(s) + return p +} + +// An Expr represents an input element. +type Expr interface { + // Span returns the start and end position of the expression, + // excluding leading or trailing comments. + Span() (start, end Position) + + // Comment returns the comments attached to the expression. + // This method would normally be named 'Comments' but that + // would interfere with embedding a type of the same name. + Comment() *Comments +} + +// A Comment represents a single // comment. +type Comment struct { + Start Position + Token string // without trailing newline + Suffix bool // an end of line (not whole line) comment +} + +// Comments collects the comments associated with an expression. +type Comments struct { + Before []Comment // whole-line comments before this expression + Suffix []Comment // end-of-line comments after this expression + + // For top-level expressions only, After lists whole-line + // comments following the expression. + After []Comment +} + +// Comment returns the receiver. This isn't useful by itself, but +// a Comments struct is embedded into all the expression +// implementation types, and this gives each of those a Comment +// method to satisfy the Expr interface. +func (c *Comments) Comment() *Comments { + return c +} + +// A FileSyntax represents an entire go.mod file. +type FileSyntax struct { + Name string // file path + Comments + Stmt []Expr +} + +func (x *FileSyntax) Span() (start, end Position) { + if len(x.Stmt) == 0 { + return + } + start, _ = x.Stmt[0].Span() + _, end = x.Stmt[len(x.Stmt)-1].Span() + return start, end +} + +func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { + if hint == nil { + // If no hint given, add to the last statement of the given type. + Loop: + for i := len(x.Stmt) - 1; i >= 0; i-- { + stmt := x.Stmt[i] + switch stmt := stmt.(type) { + case *Line: + if stmt.Token != nil && stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + case *LineBlock: + if stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + } + } + } + + if hint != nil { + for i, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt == hint { + // Convert line to line block. + stmt.InBlock = true + block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} + stmt.Token = stmt.Token[1:] + x.Stmt[i] = block + new := &Line{Token: tokens[1:], InBlock: true} + block.Line = append(block.Line, new) + return new + } + case *LineBlock: + if stmt == hint { + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line = append(stmt.Line, new) + return new + } + for j, line := range stmt.Line { + if line == hint { + // Add new line after hint. + stmt.Line = append(stmt.Line, nil) + copy(stmt.Line[j+2:], stmt.Line[j+1:]) + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line[j+1] = new + return new + } + } + } + } + } + + new := &Line{Token: tokens} + x.Stmt = append(x.Stmt, new) + return new +} + +func (x *FileSyntax) updateLine(line *Line, tokens ...string) { + if line.InBlock { + tokens = tokens[1:] + } + line.Token = tokens +} + +func (x *FileSyntax) removeLine(line *Line) { + line.Token = nil +} + +// Cleanup cleans up the file syntax x after any edit operations. +// To avoid quadratic behavior, removeLine marks the line as dead +// by setting line.Token = nil but does not remove it from the slice +// in which it appears. After edits have all been indicated, +// calling Cleanup cleans out the dead lines. +func (x *FileSyntax) Cleanup() { + w := 0 + for _, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt.Token == nil { + continue + } + case *LineBlock: + ww := 0 + for _, line := range stmt.Line { + if line.Token != nil { + stmt.Line[ww] = line + ww++ + } + } + if ww == 0 { + continue + } + if ww == 1 { + // Collapse block into single line. + line := &Line{ + Comments: Comments{ + Before: commentsAdd(stmt.Before, stmt.Line[0].Before), + Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), + After: commentsAdd(stmt.Line[0].After, stmt.After), + }, + Token: stringsAdd(stmt.Token, stmt.Line[0].Token), + } + x.Stmt[w] = line + w++ + continue + } + stmt.Line = stmt.Line[:ww] + } + x.Stmt[w] = stmt + w++ + } + x.Stmt = x.Stmt[:w] +} + +func commentsAdd(x, y []Comment) []Comment { + return append(x[:len(x):len(x)], y...) +} + +func stringsAdd(x, y []string) []string { + return append(x[:len(x):len(x)], y...) +} + +// A CommentBlock represents a top-level block of comments separate +// from any rule. +type CommentBlock struct { + Comments + Start Position +} + +func (x *CommentBlock) Span() (start, end Position) { + return x.Start, x.Start +} + +// A Line is a single line of tokens. +type Line struct { + Comments + Start Position + Token []string + InBlock bool + End Position +} + +func (x *Line) Span() (start, end Position) { + return x.Start, x.End +} + +// A LineBlock is a factored block of lines, like +// +// require ( +// "x" +// "y" +// ) +// +type LineBlock struct { + Comments + Start Position + LParen LParen + Token []string + Line []*Line + RParen RParen +} + +func (x *LineBlock) Span() (start, end Position) { + return x.Start, x.RParen.Pos.add(")") +} + +// An LParen represents the beginning of a parenthesized line block. +// It is a place to store suffix comments. +type LParen struct { + Comments + Pos Position +} + +func (x *LParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An RParen represents the end of a parenthesized line block. +// It is a place to store whole-line (before) comments. +type RParen struct { + Comments + Pos Position +} + +func (x *RParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An input represents a single input file being parsed. +type input struct { + // Lexing state. + filename string // name of input file, for errors + complete []byte // entire input + remaining []byte // remaining input + token []byte // token being scanned + lastToken string // most recently returned token, for error messages + pos Position // current input position + comments []Comment // accumulated comments + endRule int // position of end of current rule + + // Parser state. + file *FileSyntax // returned top-level syntax tree + parseError error // error encountered during parsing + + // Comment assignment state. + pre []Expr // all expressions, in preorder traversal + post []Expr // all expressions, in postorder traversal +} + +func newInput(filename string, data []byte) *input { + return &input{ + filename: filename, + complete: data, + remaining: data, + pos: Position{Line: 1, LineRune: 1, Byte: 0}, + } +} + +// parse parses the input file. +func parse(file string, data []byte) (f *FileSyntax, err error) { + in := newInput(file, data) + // The parser panics for both routine errors like syntax errors + // and for programmer bugs like array index errors. + // Turn both into error returns. Catching bug panics is + // especially important when processing many files. + defer func() { + if e := recover(); e != nil { + if e == in.parseError { + err = in.parseError + } else { + err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e) + } + } + }() + + // Invoke the parser. + in.parseFile() + if in.parseError != nil { + return nil, in.parseError + } + in.file.Name = in.filename + + // Assign comments to nearby syntax. + in.assignComments() + + return in.file, nil +} + +// Error is called to report an error. +// The reason s is often "syntax error". +// Error does not return: it panics. +func (in *input) Error(s string) { + if s == "syntax error" && in.lastToken != "" { + s += " near " + in.lastToken + } + in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s) + panic(in.parseError) +} + +// eof reports whether the input has reached end of file. +func (in *input) eof() bool { + return len(in.remaining) == 0 +} + +// peekRune returns the next rune in the input without consuming it. +func (in *input) peekRune() int { + if len(in.remaining) == 0 { + return 0 + } + r, _ := utf8.DecodeRune(in.remaining) + return int(r) +} + +// peekPrefix reports whether the remaining input begins with the given prefix. +func (in *input) peekPrefix(prefix string) bool { + // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) + // but without the allocation of the []byte copy of prefix. + for i := 0; i < len(prefix); i++ { + if i >= len(in.remaining) || in.remaining[i] != prefix[i] { + return false + } + } + return true +} + +// readRune consumes and returns the next rune in the input. +func (in *input) readRune() int { + if len(in.remaining) == 0 { + in.Error("internal lexer error: readRune at EOF") + } + r, size := utf8.DecodeRune(in.remaining) + in.remaining = in.remaining[size:] + if r == '\n' { + in.pos.Line++ + in.pos.LineRune = 1 + } else { + in.pos.LineRune++ + } + in.pos.Byte += size + return int(r) +} + +type symType struct { + pos Position + endPos Position + text string +} + +// startToken marks the beginning of the next input token. +// It must be followed by a call to endToken, once the token has +// been consumed using readRune. +func (in *input) startToken(sym *symType) { + in.token = in.remaining + sym.text = "" + sym.pos = in.pos +} + +// endToken marks the end of an input token. +// It records the actual token string in sym.text if the caller +// has not done that already. +func (in *input) endToken(sym *symType) { + if sym.text == "" { + tok := string(in.token[:len(in.token)-len(in.remaining)]) + sym.text = tok + in.lastToken = sym.text + } + sym.endPos = in.pos +} + +// lex is called from the parser to obtain the next input token. +// It returns the token value (either a rune like '+' or a symbolic token _FOR) +// and sets val to the data associated with the token. +// For all our input tokens, the associated data is +// val.Pos (the position where the token begins) +// and val.Token (the input string corresponding to the token). +func (in *input) lex(sym *symType) int { + // Skip past spaces, stopping at non-space or EOF. + countNL := 0 // number of newlines we've skipped past + for !in.eof() { + // Skip over spaces. Count newlines so we can give the parser + // information about where top-level blank lines are, + // for top-level comment assignment. + c := in.peekRune() + if c == ' ' || c == '\t' || c == '\r' { + in.readRune() + continue + } + + // Comment runs to end of line. + if in.peekPrefix("//") { + in.startToken(sym) + + // Is this comment the only thing on its line? + // Find the last \n before this // and see if it's all + // spaces from there to here. + i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) + suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 + in.readRune() + in.readRune() + + // Consume comment. + for len(in.remaining) > 0 && in.readRune() != '\n' { + } + in.endToken(sym) + + sym.text = strings.TrimRight(sym.text, "\n") + in.lastToken = "comment" + + // If we are at top level (not in a statement), hand the comment to + // the parser as a _COMMENT token. The grammar is written + // to handle top-level comments itself. + if !suffix { + // Not in a statement. Tell parser about top-level comment. + return _COMMENT + } + + // Otherwise, save comment for later attachment to syntax tree. + if countNL > 1 { + in.comments = append(in.comments, Comment{sym.pos, "", false}) + } + in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix}) + countNL = 1 + return _EOL + } + + if in.peekPrefix("/*") { + in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) + } + + // Found non-space non-comment. + break + } + + // Found the beginning of the next token. + in.startToken(sym) + defer in.endToken(sym) + + // End of file. + if in.eof() { + in.lastToken = "EOF" + return _EOF + } + + // Punctuation tokens. + switch c := in.peekRune(); c { + case '\n': + in.readRune() + return c + + case '(': + in.readRune() + return c + + case ')': + in.readRune() + return c + + case '"', '`': // quoted string + quote := c + in.readRune() + for { + if in.eof() { + in.pos = sym.pos + in.Error("unexpected EOF in string") + } + if in.peekRune() == '\n' { + in.Error("unexpected newline in string") + } + c := in.readRune() + if c == quote { + break + } + if c == '\\' && quote != '`' { + if in.eof() { + in.pos = sym.pos + in.Error("unexpected EOF in string") + } + in.readRune() + } + } + in.endToken(sym) + return _STRING + } + + // Checked all punctuation. Must be identifier token. + if c := in.peekRune(); !isIdent(c) { + in.Error(fmt.Sprintf("unexpected input character %#q", c)) + } + + // Scan over identifier. + for isIdent(in.peekRune()) { + if in.peekPrefix("//") { + break + } + if in.peekPrefix("/*") { + in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) + } + in.readRune() + } + return _IDENT +} + +// isIdent reports whether c is an identifier rune. +// We treat nearly all runes as identifier runes. +func isIdent(c int) bool { + return c != 0 && !unicode.IsSpace(rune(c)) +} + +// Comment assignment. +// We build two lists of all subexpressions, preorder and postorder. +// The preorder list is ordered by start location, with outer expressions first. +// The postorder list is ordered by end location, with outer expressions last. +// We use the preorder list to assign each whole-line comment to the syntax +// immediately following it, and we use the postorder list to assign each +// end-of-line comment to the syntax immediately preceding it. + +// order walks the expression adding it and its subexpressions to the +// preorder and postorder lists. +func (in *input) order(x Expr) { + if x != nil { + in.pre = append(in.pre, x) + } + switch x := x.(type) { + default: + panic(fmt.Errorf("order: unexpected type %T", x)) + case nil: + // nothing + case *LParen, *RParen: + // nothing + case *CommentBlock: + // nothing + case *Line: + // nothing + case *FileSyntax: + for _, stmt := range x.Stmt { + in.order(stmt) + } + case *LineBlock: + in.order(&x.LParen) + for _, l := range x.Line { + in.order(l) + } + in.order(&x.RParen) + } + if x != nil { + in.post = append(in.post, x) + } +} + +// assignComments attaches comments to nearby syntax. +func (in *input) assignComments() { + const debug = false + + // Generate preorder and postorder lists. + in.order(in.file) + + // Split into whole-line comments and suffix comments. + var line, suffix []Comment + for _, com := range in.comments { + if com.Suffix { + suffix = append(suffix, com) + } else { + line = append(line, com) + } + } + + if debug { + for _, c := range line { + fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign line comments to syntax immediately following. + for _, x := range in.pre { + start, _ := x.Span() + if debug { + fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) + } + xcom := x.Comment() + for len(line) > 0 && start.Byte >= line[0].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) + } + xcom.Before = append(xcom.Before, line[0]) + line = line[1:] + } + } + + // Remaining line comments go at end of file. + in.file.After = append(in.file.After, line...) + + if debug { + for _, c := range suffix { + fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign suffix comments to syntax immediately before. + for i := len(in.post) - 1; i >= 0; i-- { + x := in.post[i] + + start, end := x.Span() + if debug { + fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) + } + + // Do not assign suffix comments to end of line block or whole file. + // Instead assign them to the last element inside. + switch x.(type) { + case *FileSyntax: + continue + } + + // Do not assign suffix comments to something that starts + // on an earlier line, so that in + // + // x ( y + // z ) // comment + // + // we assign the comment to z and not to x ( ... ). + if start.Line != end.Line { + continue + } + xcom := x.Comment() + for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) + } + xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) + suffix = suffix[:len(suffix)-1] + } + } + + // We assigned suffix comments in reverse. + // If multiple suffix comments were appended to the same + // expression node, they are now in reverse. Fix that. + for _, x := range in.post { + reverseComments(x.Comment().Suffix) + } + + // Remaining suffix comments go at beginning of file. + in.file.Before = append(in.file.Before, suffix...) +} + +// reverseComments reverses the []Comment list. +func reverseComments(list []Comment) { + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +func (in *input) parseFile() { + in.file = new(FileSyntax) + var sym symType + var cb *CommentBlock + for { + tok := in.lex(&sym) + switch tok { + case '\n': + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + cb = nil + } + case _COMMENT: + if cb == nil { + cb = &CommentBlock{Start: sym.pos} + } + com := cb.Comment() + com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text}) + case _EOF: + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + } + return + default: + in.parseStmt(&sym) + if cb != nil { + in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before + cb = nil + } + } + } +} + +func (in *input) parseStmt(sym *symType) { + start := sym.pos + end := sym.endPos + token := []string{sym.text} + for { + tok := in.lex(sym) + switch tok { + case '\n', _EOF, _EOL: + in.file.Stmt = append(in.file.Stmt, &Line{ + Start: start, + Token: token, + End: end, + }) + return + case '(': + in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym)) + return + default: + token = append(token, sym.text) + end = sym.endPos + } + } +} + +func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock { + x := &LineBlock{ + Start: start, + Token: token, + LParen: LParen{Pos: sym.pos}, + } + var comments []Comment + for { + tok := in.lex(sym) + switch tok { + case _EOL: + // ignore + case '\n': + if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { + comments = append(comments, Comment{}) + } + case _COMMENT: + comments = append(comments, Comment{Start: sym.pos, Token: sym.text}) + case _EOF: + in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) + case ')': + x.RParen.Before = comments + x.RParen.Pos = sym.pos + tok = in.lex(sym) + if tok != '\n' && tok != _EOF && tok != _EOL { + in.Error("syntax error (expected newline after closing paren)") + } + return x + default: + l := in.parseLine(sym) + x.Line = append(x.Line, l) + l.Comment().Before = comments + comments = nil + } + } +} + +func (in *input) parseLine(sym *symType) *Line { + start := sym.pos + end := sym.endPos + token := []string{sym.text} + for { + tok := in.lex(sym) + switch tok { + case '\n', _EOF, _EOL: + return &Line{ + Start: start, + Token: token, + End: end, + InBlock: true, + } + default: + token = append(token, sym.text) + end = sym.endPos + } + } +} + +const ( + _EOF = -(1 + iota) + _EOL + _IDENT + _STRING + _COMMENT +) + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// ModulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +func ModulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/rule.go b/vendor/github.com/rogpeppe/go-internal/modfile/rule.go new file mode 100644 index 000000000..24d275f12 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/rule.go @@ -0,0 +1,724 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfile + +import ( + "bytes" + "errors" + "fmt" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + + "github.com/rogpeppe/go-internal/module" + "github.com/rogpeppe/go-internal/semver" +) + +// A File is the parsed, interpreted form of a go.mod file. +type File struct { + Module *Module + Go *Go + Require []*Require + Exclude []*Exclude + Replace []*Replace + + Syntax *FileSyntax +} + +// A Module is the module statement. +type Module struct { + Mod module.Version + Syntax *Line +} + +// A Go is the go statement. +type Go struct { + Version string // "1.23" + Syntax *Line +} + +// A Require is a single require statement. +type Require struct { + Mod module.Version + Indirect bool // has "// indirect" comment + Syntax *Line +} + +// An Exclude is a single exclude statement. +type Exclude struct { + Mod module.Version + Syntax *Line +} + +// A Replace is a single replace statement. +type Replace struct { + Old module.Version + New module.Version + Syntax *Line +} + +func (f *File) AddModuleStmt(path string) error { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + if f.Module == nil { + f.Module = &Module{ + Mod: module.Version{Path: path}, + Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)), + } + } else { + f.Module.Mod.Path = path + f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path)) + } + return nil +} + +func (f *File) AddComment(text string) { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{ + Comments: Comments{ + Before: []Comment{ + { + Token: text, + }, + }, + }, + }) +} + +type VersionFixer func(path, version string) (string, error) + +// Parse parses the data, reported in errors as being from file, +// into a File struct. It applies fix, if non-nil, to canonicalize all module versions found. +func Parse(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, true) +} + +// ParseLax is like Parse but ignores unknown statements. +// It is used when parsing go.mod files other than the main module, +// under the theory that most statement types we add in the future will +// only apply in the main module, like exclude and replace, +// and so we get better gradual deployments if old go commands +// simply ignore those statements when found in go.mod files +// in dependencies. +func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, false) +} + +func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File, error) { + fs, err := parse(file, data) + if err != nil { + return nil, err + } + f := &File{ + Syntax: fs, + } + + var errs bytes.Buffer + for _, x := range fs.Stmt { + switch x := x.(type) { + case *Line: + f.add(&errs, x, x.Token[0], x.Token[1:], fix, strict) + + case *LineBlock: + if len(x.Token) > 1 { + if strict { + fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " ")) + } + continue + } + switch x.Token[0] { + default: + if strict { + fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " ")) + } + continue + case "module", "require", "exclude", "replace": + for _, l := range x.Line { + f.add(&errs, l, x.Token[0], l.Token, fix, strict) + } + } + } + } + + if errs.Len() > 0 { + return nil, errors.New(strings.TrimRight(errs.String(), "\n")) + } + return f, nil +} + +var goVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`) + +func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) { + // If strict is false, this module is a dependency. + // We ignore all unknown directives as well as main-module-only + // directives like replace and exclude. It will work better for + // forward compatibility if we can depend on modules that have unknown + // statements (presumed relevant only when acting as the main module) + // and simply ignore those statements. + if !strict { + switch verb { + case "module", "require", "go": + // want these even for dependency go.mods + default: + return + } + } + + switch verb { + default: + fmt.Fprintf(errs, "%s:%d: unknown directive: %s\n", f.Syntax.Name, line.Start.Line, verb) + + case "go": + if f.Go != nil { + fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line) + return + } + if len(args) != 1 || !goVersionRE.MatchString(args[0]) { + fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line) + return + } + f.Go = &Go{Syntax: line} + f.Go.Version = args[0] + case "module": + if f.Module != nil { + fmt.Fprintf(errs, "%s:%d: repeated module statement\n", f.Syntax.Name, line.Start.Line) + return + } + f.Module = &Module{Syntax: line} + if len(args) != 1 { + + fmt.Fprintf(errs, "%s:%d: usage: module module/path [version]\n", f.Syntax.Name, line.Start.Line) + return + } + s, err := parseString(&args[0]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + f.Module.Mod = module.Version{Path: s} + case "require", "exclude": + if len(args) != 2 { + fmt.Fprintf(errs, "%s:%d: usage: %s module/path v1.2.3\n", f.Syntax.Name, line.Start.Line, verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + old := args[1] + v, err := parseVersion(s, &args[1], fix) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid module version %q: %v\n", f.Syntax.Name, line.Start.Line, old, err) + return + } + pathMajor, err := modulePathMajor(s) + if err != nil { + fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + if !module.MatchPathMajor(v, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v) + return + } + if verb == "require" { + f.Require = append(f.Require, &Require{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + Indirect: isIndirect(line), + }) + } else { + f.Exclude = append(f.Exclude, &Exclude{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + }) + } + case "replace": + arrow := 2 + if len(args) >= 2 && args[1] == "=>" { + arrow = 1 + } + if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" { + fmt.Fprintf(errs, "%s:%d: usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory\n", f.Syntax.Name, line.Start.Line, verb, verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + pathMajor, err := modulePathMajor(s) + if err != nil { + fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + var v string + if arrow == 2 { + old := args[1] + v, err = parseVersion(s, &args[1], fix) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err) + return + } + if !module.MatchPathMajor(v, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v) + return + } + } + ns, err := parseString(&args[arrow+1]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + nv := "" + if len(args) == arrow+2 { + if !IsDirectoryPath(ns) { + fmt.Fprintf(errs, "%s:%d: replacement module without version must be directory path (rooted or starting with ./ or ../)\n", f.Syntax.Name, line.Start.Line) + return + } + if filepath.Separator == '/' && strings.Contains(ns, `\`) { + fmt.Fprintf(errs, "%s:%d: replacement directory appears to be Windows path (on a non-windows system)\n", f.Syntax.Name, line.Start.Line) + return + } + } + if len(args) == arrow+3 { + old := args[arrow+1] + nv, err = parseVersion(ns, &args[arrow+2], fix) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err) + return + } + if IsDirectoryPath(ns) { + fmt.Fprintf(errs, "%s:%d: replacement module directory path %q cannot have version\n", f.Syntax.Name, line.Start.Line, ns) + return + } + } + f.Replace = append(f.Replace, &Replace{ + Old: module.Version{Path: s, Version: v}, + New: module.Version{Path: ns, Version: nv}, + Syntax: line, + }) + } +} + +// isIndirect reports whether line has a "// indirect" comment, +// meaning it is in go.mod only for its effect on indirect dependencies, +// so that it can be dropped entirely once the effective version of the +// indirect dependency reaches the given minimum version. +func isIndirect(line *Line) bool { + if len(line.Suffix) == 0 { + return false + } + f := strings.Fields(line.Suffix[0].Token) + return (len(f) == 2 && f[1] == "indirect" || len(f) > 2 && f[1] == "indirect;") && f[0] == "//" +} + +// setIndirect sets line to have (or not have) a "// indirect" comment. +func setIndirect(line *Line, indirect bool) { + if isIndirect(line) == indirect { + return + } + if indirect { + // Adding comment. + if len(line.Suffix) == 0 { + // New comment. + line.Suffix = []Comment{{Token: "// indirect", Suffix: true}} + return + } + // Insert at beginning of existing comment. + com := &line.Suffix[0] + space := " " + if len(com.Token) > 2 && com.Token[2] == ' ' || com.Token[2] == '\t' { + space = "" + } + com.Token = "// indirect;" + space + com.Token[2:] + return + } + + // Removing comment. + f := strings.Fields(line.Suffix[0].Token) + if len(f) == 2 { + // Remove whole comment. + line.Suffix = nil + return + } + + // Remove comment prefix. + com := &line.Suffix[0] + i := strings.Index(com.Token, "indirect;") + com.Token = "//" + com.Token[i+len("indirect;"):] +} + +// IsDirectoryPath reports whether the given path should be interpreted +// as a directory path. Just like on the go command line, relative paths +// and rooted paths are directory paths; the rest are module paths. +func IsDirectoryPath(ns string) bool { + // Because go.mod files can move from one system to another, + // we check all known path syntaxes, both Unix and Windows. + return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || + strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' +} + +// MustQuote reports whether s must be quoted in order to appear as +// a single token in a go.mod line. +func MustQuote(s string) bool { + for _, r := range s { + if !unicode.IsPrint(r) || r == ' ' || r == '"' || r == '\'' || r == '`' { + return true + } + } + return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*") +} + +// AutoQuote returns s or, if quoting is required for s to appear in a go.mod, +// the quotation of s. +func AutoQuote(s string) string { + if MustQuote(s) { + return strconv.Quote(s) + } + return s +} + +func parseString(s *string) (string, error) { + t := *s + if strings.HasPrefix(t, `"`) { + var err error + if t, err = strconv.Unquote(t); err != nil { + return "", err + } + } else if strings.ContainsAny(t, "\"'`") { + // Other quotes are reserved both for possible future expansion + // and to avoid confusion. For example if someone types 'x' + // we want that to be a syntax error and not a literal x in literal quotation marks. + return "", fmt.Errorf("unquoted string cannot contain quote") + } + *s = AutoQuote(t) + return t, nil +} + +func parseVersion(path string, s *string, fix VersionFixer) (string, error) { + t, err := parseString(s) + if err != nil { + return "", err + } + if fix != nil { + var err error + t, err = fix(path, t) + if err != nil { + return "", err + } + } + if v := module.CanonicalVersion(t); v != "" { + *s = v + return *s, nil + } + return "", fmt.Errorf("version must be of the form v1.2.3") +} + +func modulePathMajor(path string) (string, error) { + _, major, ok := module.SplitPathVersion(path) + if !ok { + return "", fmt.Errorf("invalid module path") + } + return major, nil +} + +func (f *File) Format() ([]byte, error) { + return Format(f.Syntax), nil +} + +// Cleanup cleans up the file f after any edit operations. +// To avoid quadratic behavior, modifications like DropRequire +// clear the entry but do not remove it from the slice. +// Cleanup cleans out all the cleared entries. +func (f *File) Cleanup() { + w := 0 + for _, r := range f.Require { + if r.Mod.Path != "" { + f.Require[w] = r + w++ + } + } + f.Require = f.Require[:w] + + w = 0 + for _, x := range f.Exclude { + if x.Mod.Path != "" { + f.Exclude[w] = x + w++ + } + } + f.Exclude = f.Exclude[:w] + + w = 0 + for _, r := range f.Replace { + if r.Old.Path != "" { + f.Replace[w] = r + w++ + } + } + f.Replace = f.Replace[:w] + + f.Syntax.Cleanup() +} + +func (f *File) AddRequire(path, vers string) error { + need := true + for _, r := range f.Require { + if r.Mod.Path == path { + if need { + r.Mod.Version = vers + f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers) + need = false + } else { + f.Syntax.removeLine(r.Syntax) + *r = Require{} + } + } + } + + if need { + f.AddNewRequire(path, vers, false) + } + return nil +} + +func (f *File) AddNewRequire(path, vers string, indirect bool) { + line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers) + setIndirect(line, indirect) + f.Require = append(f.Require, &Require{module.Version{Path: path, Version: vers}, indirect, line}) +} + +func (f *File) SetRequire(req []*Require) { + need := make(map[string]string) + indirect := make(map[string]bool) + for _, r := range req { + need[r.Mod.Path] = r.Mod.Version + indirect[r.Mod.Path] = r.Indirect + } + + for _, r := range f.Require { + if v, ok := need[r.Mod.Path]; ok { + r.Mod.Version = v + r.Indirect = indirect[r.Mod.Path] + } + } + + var newStmts []Expr + for _, stmt := range f.Syntax.Stmt { + switch stmt := stmt.(type) { + case *LineBlock: + if len(stmt.Token) > 0 && stmt.Token[0] == "require" { + var newLines []*Line + for _, line := range stmt.Line { + if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" { + line.Token[1] = need[p] + delete(need, p) + setIndirect(line, indirect[p]) + newLines = append(newLines, line) + } + } + if len(newLines) == 0 { + continue // drop stmt + } + stmt.Line = newLines + } + + case *Line: + if len(stmt.Token) > 0 && stmt.Token[0] == "require" { + if p, err := parseString(&stmt.Token[1]); err == nil && need[p] != "" { + stmt.Token[2] = need[p] + delete(need, p) + setIndirect(stmt, indirect[p]) + } else { + continue // drop stmt + } + } + } + newStmts = append(newStmts, stmt) + } + f.Syntax.Stmt = newStmts + + for path, vers := range need { + f.AddNewRequire(path, vers, indirect[path]) + } + f.SortBlocks() +} + +func (f *File) DropRequire(path string) error { + for _, r := range f.Require { + if r.Mod.Path == path { + f.Syntax.removeLine(r.Syntax) + *r = Require{} + } + } + return nil +} + +func (f *File) AddExclude(path, vers string) error { + var hint *Line + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + return nil + } + if x.Mod.Path == path { + hint = x.Syntax + } + } + + f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)}) + return nil +} + +func (f *File) DropExclude(path, vers string) error { + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + f.Syntax.removeLine(x.Syntax) + *x = Exclude{} + } + } + return nil +} + +func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error { + need := true + old := module.Version{Path: oldPath, Version: oldVers} + new := module.Version{Path: newPath, Version: newVers} + tokens := []string{"replace", AutoQuote(oldPath)} + if oldVers != "" { + tokens = append(tokens, oldVers) + } + tokens = append(tokens, "=>", AutoQuote(newPath)) + if newVers != "" { + tokens = append(tokens, newVers) + } + + var hint *Line + for _, r := range f.Replace { + if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) { + if need { + // Found replacement for old; update to use new. + r.New = new + f.Syntax.updateLine(r.Syntax, tokens...) + need = false + continue + } + // Already added; delete other replacements for same. + f.Syntax.removeLine(r.Syntax) + *r = Replace{} + } + if r.Old.Path == oldPath { + hint = r.Syntax + } + } + if need { + f.Replace = append(f.Replace, &Replace{Old: old, New: new, Syntax: f.Syntax.addLine(hint, tokens...)}) + } + return nil +} + +func (f *File) DropReplace(oldPath, oldVers string) error { + for _, r := range f.Replace { + if r.Old.Path == oldPath && r.Old.Version == oldVers { + f.Syntax.removeLine(r.Syntax) + *r = Replace{} + } + } + return nil +} + +func (f *File) SortBlocks() { + f.removeDups() // otherwise sorting is unsafe + + for _, stmt := range f.Syntax.Stmt { + block, ok := stmt.(*LineBlock) + if !ok { + continue + } + sort.Slice(block.Line, func(i, j int) bool { + li := block.Line[i] + lj := block.Line[j] + for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { + if li.Token[k] != lj.Token[k] { + return li.Token[k] < lj.Token[k] + } + } + return len(li.Token) < len(lj.Token) + }) + } +} + +func (f *File) removeDups() { + have := make(map[module.Version]bool) + kill := make(map[*Line]bool) + for _, x := range f.Exclude { + if have[x.Mod] { + kill[x.Syntax] = true + continue + } + have[x.Mod] = true + } + var excl []*Exclude + for _, x := range f.Exclude { + if !kill[x.Syntax] { + excl = append(excl, x) + } + } + f.Exclude = excl + + have = make(map[module.Version]bool) + // Later replacements take priority over earlier ones. + for i := len(f.Replace) - 1; i >= 0; i-- { + x := f.Replace[i] + if have[x.Old] { + kill[x.Syntax] = true + continue + } + have[x.Old] = true + } + var repl []*Replace + for _, x := range f.Replace { + if !kill[x.Syntax] { + repl = append(repl, x) + } + } + f.Replace = repl + + var stmts []Expr + for _, stmt := range f.Syntax.Stmt { + switch stmt := stmt.(type) { + case *Line: + if kill[stmt] { + continue + } + case *LineBlock: + var lines []*Line + for _, line := range stmt.Line { + if !kill[line] { + lines = append(lines, line) + } + } + stmt.Line = lines + if len(lines) == 0 { + continue + } + } + stmts = append(stmts, stmt) + } + f.Syntax.Stmt = stmts +} diff --git a/vendor/github.com/rogpeppe/go-internal/module/module.go b/vendor/github.com/rogpeppe/go-internal/module/module.go new file mode 100644 index 000000000..3ff6d9bf5 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/module/module.go @@ -0,0 +1,540 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type +// along with support code. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "fmt" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "github.com/rogpeppe/go-internal/semver" +) + +// A Version is defined by a module path and version pair. +type Version struct { + Path string + + // Version is usually a semantic version in canonical form. + // There are two exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + Version string `json:",omitempty"` +} + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return fmt.Errorf("malformed semantic version %v", version) + } + _, pathMajor, _ := SplitPathVersion(path) + if !MatchPathMajor(version, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + if pathMajor[0] == '.' { // .v1 + pathMajor = pathMajor[1:] + } + return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor) + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// pathOK reports whether r can appear in an import path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// This matches what "go get" has historically recognized in import paths. +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see note below). +func pathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "safe encoding" below. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + for i := 0; i < len(allowed); i++ { + if rune(allowed[i]) == r { + return true + } + } + return false + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +func CheckPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed module path %q: %v", path, err) + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("malformed module path %q: leading slash", path) + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("malformed module path %q: missing dot in first path element", path) + } + if path[0] == '-' { + return fmt.Errorf("malformed module path %q: leading dash in first path element", path) + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("malformed module path %q: invalid version", path) + } + return nil +} + +// CheckImportPath checks that an import path is valid. +func CheckImportPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed import path %q: %v", path, err) + } + return nil +} + +// checkPath checks that a general path is valid. +// It returns an error describing why but not mentioning path. +// Because these checks apply to both module paths and import paths, +// the caller is expected to add the "malformed ___ path %q: " prefix. +// fileName indicates whether the final element of the path is a file name +// (as opposed to a directory name). +func checkPath(path string, fileName bool) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if strings.Contains(path, "..") { + return fmt.Errorf("double dot") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], fileName); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], fileName); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +// fileName indicates whether the element is a file name (not a directory name). +func checkElem(elem string, fileName bool) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && !fileName { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + charOK := pathOK + if fileName { + charOK = fileNameOK + } + for _, r := range elem { + if !charOK(r) { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("disallowed path element %q", elem) + } + } + return nil +} + +// CheckFilePath checks whether a slash-separated file path is valid. +func CheckFilePath(path string) error { + if err := checkPath(path, true); err != nil { + return fmt.Errorf("malformed file path %q: %v", path, err) + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +func MatchPathMajor(v, pathMajor string) bool { + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return true + } + m := semver.Major(v) + if pathMajor == "" { + return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" + } + return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:] +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing Versions. +func Sort(list []Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) < 0 + } + return fi < fj + }) +} + +// Safe encodings +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the safe encoding be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe encoding that +// leaves most paths unaltered. +// +// The safe encoding is this: +// replace every uppercase letter with an exclamation mark +// followed by the letter's lowercase equivalent. +// +// For example, +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the safe encoding is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to encode a literal !. +// +// Although paths are disallowed from using Unicode (see pathOK above), +// the eventual plan is to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention. Note however that not all runes that +// are different but case-fold equivalent are an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are considered to case-fold to each other. When we do add Unicode +// letters, we must not assume that upper/lower are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would encode as "!!k", or perhaps as "(212A)". +// +// Also, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. + +// EncodePath returns the safe encoding of the given module path. +// It fails if the module path is invalid. +func EncodePath(path string) (encoding string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return encodeString(path) +} + +// EncodeVersion returns the safe encoding of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EncodeVersion(v string) (encoding string, err error) { + if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { + return "", fmt.Errorf("disallowed version string %q", v) + } + return encodeString(v) +} + +func encodeString(s string) (encoding string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the encoding loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EncodePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// DecodePath returns the module path of the given safe encoding. +// It fails if the encoding is invalid or encodes an invalid path. +func DecodePath(encoding string) (path string, err error) { + path, ok := decodeString(encoding) + if !ok { + return "", fmt.Errorf("invalid module path encoding %q", encoding) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err) + } + return path, nil +} + +// DecodeVersion returns the version string for the given safe encoding. +// It fails if the encoding is invalid or encodes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func DecodeVersion(encoding string) (v string, err error) { + v, ok := decodeString(encoding) + if !ok { + return "", fmt.Errorf("invalid version encoding %q", encoding) + } + if err := checkElem(v, true); err != nil { + return "", fmt.Errorf("disallowed version string %q", v) + } + return v, nil +} + +func decodeString(encoding string) (string, bool) { + var buf []byte + + bang := false + for _, r := range encoding { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} diff --git a/vendor/github.com/rogpeppe/go-internal/semver/semver.go b/vendor/github.com/rogpeppe/go-internal/semver/semver.go new file mode 100644 index 000000000..4af7118e5 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// according to semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore deleted file mode 100644 index 75623dccc..000000000 --- a/vendor/github.com/russross/blackfriday/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.out -*.swp -*.8 -*.6 -_obj -_test* -markdown -tags diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml deleted file mode 100644 index 2f3351d7a..000000000 --- a/vendor/github.com/russross/blackfriday/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: false -language: go -go: - - "1.9.x" - - "1.10.x" - - tip -matrix: - fast_finish: true - allow_failures: - - go: tip -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md deleted file mode 100644 index 3c62e1375..000000000 --- a/vendor/github.com/russross/blackfriday/README.md +++ /dev/null @@ -1,369 +0,0 @@ -Blackfriday -[![Build Status][BuildSVG]][BuildURL] -[![Godoc][GodocV2SVG]][GodocV2URL] -=========== - -Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It -is paranoid about its input (so you can safely feed it user-supplied -data), it is fast, it supports common extensions (tables, smart -punctuation substitutions, etc.), and it is safe for all utf-8 -(unicode) input. - -HTML output is currently supported, along with Smartypants -extensions. - -It started as a translation from C of [Sundown][3]. - - -Installation ------------- - -Blackfriday is compatible with any modern Go release. With Go and git installed: - - go get -u gopkg.in/russross/blackfriday.v2 - -will download, compile, and install the package into your `$GOPATH` directory -hierarchy. - - -Versions --------- - -Currently maintained and recommended version of Blackfriday is `v2`. It's being -developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the -documentation is available at -https://godoc.org/gopkg.in/russross/blackfriday.v2. - -It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, -but we highly recommend using package management tool like [dep][7] or -[Glide][8] and make use of semantic versioning. With package management you -should import `github.com/russross/blackfriday` and specify that you're using -version 2.0.0. - -Version 2 offers a number of improvements over v1: - -* Cleaned up API -* A separate call to [`Parse`][4], which produces an abstract syntax tree for - the document -* Latest bug fixes -* Flexibility to easily add your own rendering extensions - -Potential drawbacks: - -* Our benchmarks show v2 to be slightly slower than v1. Currently in the - ballpark of around 15%. -* API breakage. If you can't afford modifying your code to adhere to the new API - and don't care too much about the new features, v2 is probably not for you. -* Several bug fixes are trailing behind and still need to be forward-ported to - v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for - tracking. - -If you are still interested in the legacy `v1`, you can import it from -`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found -here: https://godoc.org/github.com/russross/blackfriday - -### Known issue with `dep` - -There is a known problem with using Blackfriday v1 _transitively_ and `dep`. -Currently `dep` prioritizes semver versions over anything else, and picks the -latest one, plus it does not apply a `[[constraint]]` specifier to transitively -pulled in packages. So if you're using something that uses Blackfriday v1, but -that something does not use `dep` yet, you will get Blackfriday v2 pulled in and -your first dependency will fail to build. - -There are couple of fixes for it, documented here: -https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version - -Meanwhile, `dep` team is working on a more general solution to the constraints -on transitive dependencies problem: https://github.com/golang/dep/issues/1124. - - -Usage ------ - -### v1 - -For basic usage, it is as simple as getting your input into a byte -slice and calling: - - output := blackfriday.MarkdownBasic(input) - -This renders it with no extensions enabled. To get a more useful -feature set, use this instead: - - output := blackfriday.MarkdownCommon(input) - -### v2 - -For the most sensible markdown processing, it is as simple as getting your input -into a byte slice and calling: - -```go -output := blackfriday.Run(input) -``` - -Your input will be parsed and the output rendered with a set of most popular -extensions enabled. If you want the most basic feature set, corresponding with -the bare Markdown specification, use: - -```go -output := blackfriday.Run(input, blackfriday.WithNoExtensions()) -``` - -### Sanitize untrusted content - -Blackfriday itself does nothing to protect against malicious content. If you are -dealing with user-supplied markdown, we recommend running Blackfriday's output -through HTML sanitizer such as [Bluemonday][5]. - -Here's an example of simple usage of Blackfriday together with Bluemonday: - -```go -import ( - "github.com/microcosm-cc/bluemonday" - "gopkg.in/russross/blackfriday.v2" -) - -// ... -unsafe := blackfriday.Run(input) -html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) -``` - -### Custom options, v1 - -If you want to customize the set of options, first get a renderer -(currently only the HTML output engine), then use it to -call the more general `Markdown` function. For examples, see the -implementations of `MarkdownBasic` and `MarkdownCommon` in -`markdown.go`. - -### Custom options, v2 - -If you want to customize the set of options, use `blackfriday.WithExtensions`, -`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. - -### `blackfriday-tool` - -You can also check out `blackfriday-tool` for a more complete example -of how to use it. Download and install it using: - - go get github.com/russross/blackfriday-tool - -This is a simple command-line tool that allows you to process a -markdown file using a standalone program. You can also browse the -source directly on github if you are just looking for some example -code: - -* - -Note that if you have not already done so, installing -`blackfriday-tool` will be sufficient to download and install -blackfriday in addition to the tool itself. The tool binary will be -installed in `$GOPATH/bin`. This is a statically-linked binary that -can be copied to wherever you need it without worrying about -dependencies and library versions. - -### Sanitized anchor names - -Blackfriday includes an algorithm for creating sanitized anchor names -corresponding to a given input text. This algorithm is used to create -anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The -algorithm has a specification, so that other packages can create -compatible anchor names and links to those anchors. - -The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. - -[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to -create compatible links to the anchor names generated by blackfriday. -This algorithm is also implemented in a small standalone package at -[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients -that want a small package and don't need full functionality of blackfriday. - - -Features --------- - -All features of Sundown are supported, including: - -* **Compatibility**. The Markdown v1.0.3 test suite passes with - the `--tidy` option. Without `--tidy`, the differences are - mostly in whitespace and entity escaping, where blackfriday is - more consistent and cleaner. - -* **Common extensions**, including table support, fenced code - blocks, autolinks, strikethroughs, non-strict emphasis, etc. - -* **Safety**. Blackfriday is paranoid when parsing, making it safe - to feed untrusted user input without fear of bad things - happening. The test suite stress tests this and there are no - known inputs that make it crash. If you find one, please let me - know and send me the input that does it. - - NOTE: "safety" in this context means *runtime safety only*. In order to - protect yourself against JavaScript injection in untrusted content, see - [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). - -* **Fast processing**. It is fast enough to render on-demand in - most web applications without having to cache the output. - -* **Thread safety**. You can run multiple parsers in different - goroutines without ill effect. There is no dependence on global - shared state. - -* **Minimal dependencies**. Blackfriday only depends on standard - library packages in Go. The source code is pretty - self-contained, so it is easy to add to any project, including - Google App Engine projects. - -* **Standards compliant**. Output successfully validates using the - W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. - - -Extensions ----------- - -In addition to the standard markdown syntax, this package -implements the following extensions: - -* **Intra-word emphasis supression**. The `_` character is - commonly used inside words when discussing code, so having - markdown interpret it as an emphasis command is usually the - wrong thing. Blackfriday lets you treat all emphasis markers as - normal characters when they occur inside a word. - -* **Tables**. Tables can be created by drawing them in the input - using a simple syntax: - - ``` - Name | Age - --------|------ - Bob | 27 - Alice | 23 - ``` - -* **Fenced code blocks**. In addition to the normal 4-space - indentation to mark code blocks, you can explicitly mark them - and supply a language (to make syntax highlighting simple). Just - mark it like this: - - ``` go - func getTrue() bool { - return true - } - ``` - - You can use 3 or more backticks to mark the beginning of the - block, and the same number to mark the end of the block. - - To preserve classes of fenced code blocks while using the bluemonday - HTML sanitizer, use the following policy: - - ``` go - p := bluemonday.UGCPolicy() - p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") - html := p.SanitizeBytes(unsafe) - ``` - -* **Definition lists**. A simple definition list is made of a single-line - term followed by a colon and the definition for that term. - - Cat - : Fluffy animal everyone likes - - Internet - : Vector of transmission for pictures of cats - - Terms must be separated from the previous definition by a blank line. - -* **Footnotes**. A marker in the text that will become a superscript number; - a footnote definition that will be placed in a list of footnotes at the - end of the document. A footnote looks like this: - - This is a footnote.[^1] - - [^1]: the footnote text. - -* **Autolinking**. Blackfriday can find URLs that have not been - explicitly marked as links and turn them into links. - -* **Strikethrough**. Use two tildes (`~~`) to mark text that - should be crossed out. - -* **Hard line breaks**. With this extension enabled (it is off by - default in the `MarkdownBasic` and `MarkdownCommon` convenience - functions), newlines in the input translate into line breaks in - the output. - -* **Smart quotes**. Smartypants-style punctuation substitution is - supported, turning normal double- and single-quote marks into - curly quotes, etc. - -* **LaTeX-style dash parsing** is an additional option, where `--` - is translated into `–`, and `---` is translated into - `—`. This differs from most smartypants processors, which - turn a single hyphen into an ndash and a double hyphen into an - mdash. - -* **Smart fractions**, where anything that looks like a fraction - is translated into suitable HTML (instead of just a few special - cases like most smartypant processors). For example, `4/5` - becomes `45`, which renders as - 45. - - -Other renderers ---------------- - -Blackfriday is structured to allow alternative rendering engines. Here -are a few of note: - -* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): - provides a GitHub Flavored Markdown renderer with fenced code block - highlighting, clickable heading anchor links. - - It's not customizable, and its goal is to produce HTML output - equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), - except the rendering is performed locally. - -* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, - but for markdown. - -* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): - renders output as LaTeX. - -* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience - integration with the [Chroma](https://github.com/alecthomas/chroma) code - highlighting library. bfchroma is only compatible with v2 of Blackfriday and - provides a drop-in renderer ready to use with Blackfriday, as well as - options and means for further customization. - - -TODO ----- - -* More unit testing -* Improve Unicode support. It does not understand all Unicode - rules (about what constitutes a letter, a punctuation symbol, - etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all UTF-8 input. - - -License -------- - -[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) - - - [1]: https://daringfireball.net/projects/markdown/ "Markdown" - [2]: https://golang.org/ "Go Language" - [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" - [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - [6]: https://labix.org/gopkg.in "gopkg.in" - [7]: https://github.com/golang/dep/ "dep" - [8]: https://github.com/Masterminds/glide "Glide" - - [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master - [BuildURL]: https://travis-ci.org/russross/blackfriday - [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg - [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 diff --git a/vendor/github.com/russross/blackfriday/go.mod b/vendor/github.com/russross/blackfriday/go.mod deleted file mode 100644 index b05561a06..000000000 --- a/vendor/github.com/russross/blackfriday/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/russross/blackfriday diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 000000000..298f0e266 --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 000000000..f5b5e127c --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,108 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + //Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 000000000..616ff8ff7 --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,180 @@ +package afero + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +var _ Lstater = (*BasePathFs)(nil) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +type BasePathFile struct { + File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 000000000..29a26c67d --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,290 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{Base: bfi, Layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{Base: bfh, Layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 000000000..5728243d9 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 000000000..968fc2783 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,25 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 000000000..e8108a851 --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,293 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +var _ Lstater = (*CopyOnWriteFs)(nil) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes() and Chmod()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + isNotExist := u.isNotExist(err) + if isNotExist { + return u.base.Stat(name) + } + return nil, err + } + return fi, nil +} + +func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + llayer, ok1 := u.layer.(Lstater) + lbase, ok2 := u.base.(Lstater) + + if ok1 { + fi, b, err := llayer.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + + if !u.isNotExist(err) { + return nil, b, err + } + } + + if ok2 { + fi, b, err := lbase.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + if !u.isNotExist(err) { + return nil, b, err + } + } + + fi, err := u.Stat(name) + + return fi, false, err +} + +func (u *CopyOnWriteFs) isNotExist(err error) bool { + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return true + } + return false +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return ErrFileExists + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + // This is in line with how os.MkdirAll behaves. + return nil + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 000000000..c42193688 --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 000000000..5c3a3d8ff --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,230 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, prefix string) (f File, err error) { + return TempFile(a.Fs, dir, prefix) +} + +func TempFile(fs Fs, dir, prefix string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go new file mode 100644 index 000000000..89c1bfc0a --- /dev/null +++ b/vendor/github.com/spf13/afero/lstater.go @@ -0,0 +1,27 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" +) + +// Lstater is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// Else it will call Stat. +// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +type Lstater interface { + LstatIfPossible(name string) (os.FileInfo, bool, error) +} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go new file mode 100644 index 000000000..c18a87fb7 --- /dev/null +++ b/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // Lstat not supported by a ll filesystems. + if _, err = lstatIfPossible(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.IndexAny(path, "*?[") >= 0 +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 000000000..e104013f4 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 000000000..03a57ee5b --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 000000000..7af2fb56f --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,317 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" +) + +import "time" + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + if !f.fileData.dir { + return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + } + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Read(b) +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case 0: + atomic.StoreInt64(&f.at, offset) + case 1: + atomic.AddInt64(&f.at, int64(offset)) + case 2: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 000000000..09498e70f --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,365 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, 0777) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + m.mu.Unlock() + + m.Chmod(name, perm|os.ModeDir) + + return nil +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + chmod := false + file, err := m.openWrite(name) + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + m.Chmod(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p, _ := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} + +// func debugMemMapList(fs Fs) { +// if x, ok := fs.(*MemMapFs); ok { +// x.List() +// } +// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 000000000..13cc1b84c --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,101 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +var _ Lstater = (*OsFs)(nil) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fi, err := os.Lstat(name) + return fi, true, err +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 000000000..18f60a0f6 --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,106 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfPossible(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem supports it, use Lstat, else use fs.Stat +func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { + if lfs, ok := fs.(Lstater); ok { + fi, _, err := lfs.LstatIfPossible(path) + return fi, err + } + return fs.Stat(path) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfPossible(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 000000000..c6376ec37 --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,80 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +var _ Lstater = (*ReadOnlyFs)(nil) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + if lsf, ok := r.source.(Lstater); ok { + return lsf.LstatIfPossible(name) + } + fi, err := r.Stat(name) + return fi, false, err +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 000000000..9d92dbc05 --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,214 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 000000000..abcf12d36 --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,316 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + Base File + Layer File + Merger DirsMerger + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.Base != nil { + f.Base.Close() + } + if f.Layer != nil { + return f.Layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.Layer != nil { + n, err := f.Layer.Read(s) + if (err == nil || err == io.EOF) && f.Base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.Base != nil { + return f.Base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.Layer != nil { + n, err := f.Layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.Base != nil { + return f.Base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.Layer != nil { + pos, err = f.Layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o, w) + } + return pos, err + } + if f.Base != nil { + return f.Base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.Write(s) + if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.Base.Write(s) + } + return n, err + } + if f.Base != nil { + return f.Base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteAt(s, o) + if err == nil && f.Base != nil { + _, err = f.Base.WriteAt(s, o) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.Layer != nil { + return f.Layer.Name() + } + return f.Base.Name() +} + +// DirsMerger is how UnionFile weaves two directories together. +// It takes the FileInfo slices from the layer and the base and returns a +// single view. +type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) + +var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { + var files = make(map[string]os.FileInfo) + + for _, fi := range lofi { + files[fi.Name()] = fi + } + + for _, fi := range bofi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + + rfi := make([]os.FileInfo, len(files)) + + i := 0 + for _, fi := range files { + rfi[i] = fi + i++ + } + + return rfi, nil + +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories +// At the end of the directory view, the error is io.EOF. +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + var merge DirsMerger = f.Merger + if merge == nil { + merge = defaultUnionMergeDirsFn + } + + if f.off == 0 { + var lfi []os.FileInfo + if f.Layer != nil { + lfi, err = f.Layer.Readdir(-1) + if err != nil { + return nil, err + } + } + + var bfi []os.FileInfo + if f.Base != nil { + bfi, err = f.Base.Readdir(-1) + if err != nil { + return nil, err + } + + } + merged, err := merge(lfi, bfi) + if err != nil { + return nil, err + } + f.files = append(f.files, merged...) + } + + if f.off >= len(f.files) { + return nil, io.EOF + } + + if c == -1 { + return f.files[f.off:], nil + } + + if c > len(f.files) { + c = len(f.files) + } + + defer func() { f.off += c }() + return f.files[f.off:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.Layer != nil { + return f.Layer.Stat() + } + if f.Base != nil { + return f.Base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.Layer != nil { + err = f.Layer.Sync() + if err == nil && f.Base != nil { + err = f.Base.Sync() + } + return err + } + if f.Base != nil { + return f.Base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.Layer != nil { + err = f.Layer.Truncate(s) + if err == nil && f.Base != nil { + err = f.Base.Truncate(s) + } + return err + } + if f.Base != nil { + return f.Base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteString(s) + if err == nil && f.Base != nil { + _, err = f.Base.WriteString(s) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 000000000..4f253f481 --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,330 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index 1b8c7c261..000000000 --- a/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,36 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -*.exe - -cobra.test diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap deleted file mode 100644 index 94ec53068..000000000 --- a/vendor/github.com/spf13/cobra/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -Steve Francia -Bjørn Erik Pedersen -Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index 5afcb2096..000000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go - -matrix: - include: - - go: 1.9.4 - - go: 1.10.0 - - go: tip - allow_failures: - - go: tip - -before_install: - - mkdir -p bin - - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck - - chmod +x bin/shellcheck -script: - - PATH=$PATH:$PWD/bin go test -v ./... - - go build - - diff -u <(echo -n) <(gofmt -d -s .) - - if [ -z $NOVET ]; then - diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); - fi diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index 851fcc087..000000000 --- a/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,736 +0,0 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) - -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. - -Many of the most widely used Go projects are built using Cobra including: - -* [Kubernetes](http://kubernetes.io/) -* [Hugo](http://gohugo.io) -* [rkt](https://github.com/coreos/rkt) -* [etcd](https://github.com/coreos/etcd) -* [Moby (former Docker)](https://github.com/moby/moby) -* [Docker (distribution)](https://github.com/docker/distribution) -* [OpenShift](https://www.openshift.com/) -* [Delve](https://github.com/derekparker/delve) -* [GopherJS](http://www.gopherjs.org/) -* [CockroachDB](http://www.cockroachlabs.com/) -* [Bleve](http://www.blevesearch.com/) -* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -* [GiantSwarm's swarm](https://github.com/giantswarm/cli) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -* [rclone](http://rclone.org/) -* [nehm](https://github.com/bogem/nehm) -* [Pouch](https://github.com/alibaba/pouch) - -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) - -# Table of Contents - -- [Overview](#overview) -- [Concepts](#concepts) - * [Commands](#commands) - * [Flags](#flags) -- [Installing](#installing) -- [Getting Started](#getting-started) - * [Using the Cobra Generator](#using-the-cobra-generator) - * [Using the Cobra Library](#using-the-cobra-library) - * [Working with Flags](#working-with-flags) - * [Positional and Custom Arguments](#positional-and-custom-arguments) - * [Example](#example) - * [Help Command](#help-command) - * [Usage Message](#usage-message) - * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](#generating-documentation-for-your-command) - * [Generating bash completions](#generating-bash-completions) -- [Contributing](#contributing) -- [License](#license) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated bash autocomplete for your application -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibility to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` - or -`APPNAME COMMAND ARG --FLAG` - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) - -## Flags - -A flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/spf13/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executable -along with the library and its dependencies: - - go get -u github.com/spf13/cobra/cobra - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "fmt" - "os" - - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -import ( - "fmt" - "os" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func init() { - cobra.OnInitialize(initConfig) - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") - rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") -} - -func initConfig() { - // Don't forget to read config either from cfgFile or from home directory! - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".cobra") - } - - if err := viper.ReadInConfig(); err != nil { - fmt.Println("Can't read config:", err) - os.Exit(1) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import ( - "fmt" - "os" - - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - -```go -rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default Cobra only parses local flags on the target command, any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example the persistent flag `author` is bound with `viper`. -**Note**, that the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires at least one arg") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. in the following formats: - -- [Markdown](doc/md_docs.md) -- [ReStructured Text](doc/rest_docs.md) -- [Man Page](doc/man_docs.md) - -## Generating bash completions - -Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). - -# Contributing - -1. Fork it -2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) -3. Create your feature branch (`git checkout -b my-new-feature`) -4. Make changes and add them (`git add .`) -5. Commit your changes (`git commit -m 'Add some feature'`) -6. Push to the branch (`git push origin my-new-feature`) -7. Create new pull request - -# License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index e79d4769d..000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,221 +0,0 @@ -# Generating Bash Completions For Your Own cobra.Command - -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: - -```go -package main - -import ( - "io/ioutil" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/util" -) - -func main() { - kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` - -`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. - -## Creating your own custom functions - -Some more actual code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -## Have the completions code complete your 'nouns' - -In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -node pod replicationcontroller service -``` - -## Plural form and shortcuts for nouns - -If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -# kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns -in this example again instead of the replication controllers. - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. - -# Specify custom flag completion - -Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify -a custom flag completion function with cobra.BashCompCustom: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` -# Using bash aliases for commands - -You can also configure the `bash aliases` for the commands and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$) aliasname -completion firstcommand secondcommand -``` diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go new file mode 100644 index 000000000..bc22e9732 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go @@ -0,0 +1,683 @@ +package cmd + +func initAgpl() { + Licenses["agpl"] = License{ + Name: "GNU Affero General Public License", + PossibleMatches: []string{"agpl", "affero gpl", "gnu agpl"}, + Header: ` +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see .`, + Text: ` GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go new file mode 100644 index 000000000..38393d541 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go @@ -0,0 +1,238 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initApache2() { + Licenses["apache"] = License{ + Name: "Apache 2.0", + PossibleMatches: []string{"apache", "apache20", "apache 2.0", "apache2.0", "apache-2.0"}, + Header: ` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.`, + Text: ` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go new file mode 100644 index 000000000..4a847e04a --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go @@ -0,0 +1,71 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initBsdClause2() { + Licenses["freebsd"] = License{ + Name: "Simplified BSD License", + PossibleMatches: []string{"freebsd", "simpbsd", "simple bsd", "2-clause bsd", + "2 clause bsd", "simplified bsd license"}, + Header: `All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.`, + Text: `{{ .copyright }} +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go new file mode 100644 index 000000000..c7476b31f --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go @@ -0,0 +1,78 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initBsdClause3() { + Licenses["bsd"] = License{ + Name: "NewBSD", + PossibleMatches: []string{"bsd", "newbsd", "3 clause bsd", "3-clause bsd"}, + Header: `All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.`, + Text: `{{ .copyright }} +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go new file mode 100644 index 000000000..03e05b3a7 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go @@ -0,0 +1,376 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initGpl2() { + Licenses["gpl2"] = License{ + Name: "GNU General Public License 2.0", + PossibleMatches: []string{"gpl2", "gnu gpl2", "gplv2"}, + Header: ` +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with this program. If not, see .`, + Text: ` GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type 'show c' for details. + +The hypothetical commands 'show w' and 'show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than 'show w' and 'show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + 'Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go new file mode 100644 index 000000000..ce07679c7 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go @@ -0,0 +1,711 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initGpl3() { + Licenses["gpl3"] = License{ + Name: "GNU General Public License 3.0", + PossibleMatches: []string{"gpl3", "gplv3", "gpl", "gnu gpl3", "gnu gpl"}, + Header: ` +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see .`, + Text: ` GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type 'show c' for details. + +The hypothetical commands 'show w' and 'show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go new file mode 100644 index 000000000..0f8b96cad --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go @@ -0,0 +1,186 @@ +package cmd + +func initLgpl() { + Licenses["lgpl"] = License{ + Name: "GNU Lesser General Public License", + PossibleMatches: []string{"lgpl", "lesser gpl", "gnu lgpl"}, + Header: ` +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with this program. If not, see .`, + Text: ` GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library.`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go new file mode 100644 index 000000000..bd2d0c4fa --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go @@ -0,0 +1,63 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initMit() { + Licenses["mit"] = License{ + Name: "MIT License", + PossibleMatches: []string{"mit"}, + Header: ` +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE.`, + Text: `The MIT License (MIT) + +{{ .copyright }} + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go b/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go new file mode 100644 index 000000000..a070134dd --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go @@ -0,0 +1,118 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +import ( + "strings" + "time" + + "github.com/spf13/viper" +) + +// Licenses contains all possible licenses a user can choose from. +var Licenses = make(map[string]License) + +// License represents a software license agreement, containing the Name of +// the license, its possible matches (on the command line as given to cobra), +// the header to be used with each file on the file's creating, and the text +// of the license +type License struct { + Name string // The type of license in use + PossibleMatches []string // Similar names to guess + Text string // License text data + Header string // License header for source files +} + +func init() { + // Allows a user to not use a license. + Licenses["none"] = License{"None", []string{"none", "false"}, "", ""} + + initApache2() + initMit() + initBsdClause3() + initBsdClause2() + initGpl2() + initGpl3() + initLgpl() + initAgpl() +} + +// getLicense returns license specified by user in flag or in config. +// If user didn't specify the license, it returns Apache License 2.0. +// +// TODO: Inspect project for existing license +func getLicense() License { + // If explicitly flagged, use that. + if userLicense != "" { + return findLicense(userLicense) + } + + // If user wants to have custom license, use that. + if viper.IsSet("license.header") || viper.IsSet("license.text") { + return License{Header: viper.GetString("license.header"), + Text: viper.GetString("license.text")} + } + + // If user wants to have built-in license, use that. + if viper.IsSet("license") { + return findLicense(viper.GetString("license")) + } + + // If user didn't set any license, use Apache 2.0 by default. + return Licenses["apache"] +} + +func copyrightLine() string { + author := viper.GetString("author") + + year := viper.GetString("year") // For tests. + if year == "" { + year = time.Now().Format("2006") + } + + return "Copyright © " + year + " " + author +} + +// findLicense looks for License object of built-in licenses. +// If it didn't find license, then the app will be terminated and +// error will be printed. +func findLicense(name string) License { + found := matchLicense(name) + if found == "" { + er("unknown license: " + name) + } + return Licenses[found] +} + +// matchLicense compares the given a license name +// to PossibleMatches of all built-in licenses. +// It returns blank string, if name is blank string or it didn't find +// then appropriate match to name. +func matchLicense(name string) string { + if name == "" { + return "" + } + + for key, lic := range Licenses { + for _, match := range lic.PossibleMatches { + if strings.EqualFold(name, match) { + return key + } + } + } + + return "" +} diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore deleted file mode 100644 index c3da29013..000000000 --- a/vendor/github.com/spf13/pflag/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.idea/* - diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index f8a63b308..000000000 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false - -language: go - -go: - - 1.7.3 - - 1.8.1 - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get github.com/golang/lint/golint - - export PATH=$GOPATH/bin:$PATH - - go install ./... - -script: - - verify/all.sh -v - - go test ./... diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index b052414d1..000000000 --- a/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,296 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) -[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") -} -flag.VarP(&flagVal, "varname", "v", "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Disable sorting of flags -`pflag` allows you to disable sorting of flags for help and usage message. - -**Example**: -```go -flags.BoolP("verbose", "v", false, "verbose output") -flags.String("coolflag", "yeaah", "it's really cool flag") -flags.Int("usefulflag", 777, "sometimes it's very useful") -flags.SortFlags = false -flags.PrintDefaults() -``` -**Output**: -``` - -v, --verbose verbose output - --coolflag string it's really cool flag (default "yeaah") - --usefulflag int sometimes it's very useful (default 777) -``` - - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/spf13/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/spf13/pflag -[3]: http://godoc.org/github.com/spf13/pflag diff --git a/vendor/github.com/stretchr/objx/LICENSE b/vendor/github.com/stretchr/objx/LICENSE new file mode 100644 index 000000000..44d4d9d5a --- /dev/null +++ b/vendor/github.com/stretchr/objx/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go new file mode 100644 index 000000000..204356a22 --- /dev/null +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -0,0 +1,148 @@ +package objx + +import ( + "regexp" + "strconv" + "strings" +) + +// arrayAccesRegexString is the regex used to extract the array number +// from the access path +const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true) + return m +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current, selector, value interface{}, isSet bool) interface{} { + switch selector.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + if array, ok := current.([]interface{}); ok { + index := intFromInterface(selector) + if index >= len(array) { + return nil + } + return array[index] + } + return nil + + case string: + selStr := selector.(string) + selSegs := strings.SplitN(selStr, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + var err error + + if strings.Contains(thisSel, "[") { + arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) + if len(arrayMatches) > 0 { + // Get the key into the map + thisSel = arrayMatches[1] + + // Get the index into the array at the key + index, err = strconv.Atoi(arrayMatches[2]) + + if err != nil { + // This should never happen. If it does, something has gone + // seriously wrong. Panic. + panic("objx: Array index is not an integer. Must use array[int].") + } + } + } + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } + current = curMSI[thisSel] + default: + current = nil + } + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + current = nil + } + } + } + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet) + } + } + return current +} + +// intFromInterface converts an interface object to the largest +// representation of an unsigned integer using a type switch and +// assertions +func intFromInterface(selector interface{}) int { + var value int + switch selector.(type) { + case int: + value = selector.(int) + case int8: + value = int(selector.(int8)) + case int16: + value = int(selector.(int16)) + case int32: + value = int(selector.(int32)) + case int64: + value = int(selector.(int64)) + case uint: + value = int(selector.(uint)) + case uint8: + value = int(selector.(uint8)) + case uint16: + value = int(selector.(uint16)) + case uint32: + value = int(selector.(uint32)) + case uint64: + value = int(selector.(uint64)) + default: + return 0 + } + return value +} diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go new file mode 100644 index 000000000..f9eb42a25 --- /dev/null +++ b/vendor/github.com/stretchr/objx/constants.go @@ -0,0 +1,13 @@ +package objx + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // SignatureSeparator is the character that is used to + // separate the Base64 string from the security signature. + SignatureSeparator = "_" +) diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go new file mode 100644 index 000000000..5e020f310 --- /dev/null +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -0,0 +1,108 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + result, err := json.Marshal(m) + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + return string(result), err +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + _, err = encoder.Write([]byte(jsonData)) + if err != nil { + return "", err + } + _ = encoder.Close() + + return buf.String(), nil +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + return base64 + SignatureSeparator + sig, nil +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + vals := make(url.Values) + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go new file mode 100644 index 000000000..6d6af1a83 --- /dev/null +++ b/vendor/github.com/stretchr/objx/doc.go @@ -0,0 +1,66 @@ +/* +Objx - Go package for dealing with maps, slices, JSON and other data. + +Overview + +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +a powerful `Get` method (among others) that allows you to easily and quickly get +access to data within the map, without having to worry too much about type assertions, +missing data, default values etc. + +Pattern + +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, +or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, +manipulating and selecting that data. You can find out more by exploring the index below. + +Reading data + +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +Ranging + +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. +For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } +*/ +package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go new file mode 100644 index 000000000..406bc8926 --- /dev/null +++ b/vendor/github.com/stretchr/objx/map.go @@ -0,0 +1,190 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// +// Returns nil if any key argument is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +func MSI(keyAndValuePairs ...interface{}) Map { + newMap := Map{} + keyAndValuePairsLen := len(keyAndValuePairs) + if keyAndValuePairsLen%2 != 0 { + return nil + } + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + return nil + } + newMap[keyString] = value + } + return newMap +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + var data interface{} + err := json.Unmarshal([]byte(jsonString), &data) + if err != nil { + return Nil, err + } + return New(data), nil +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + result, err := FromBase64(base64String) + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match") + } + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + result, err := FromSignedBase64(base64String, key) + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + vals, err := url.ParseQuery(query) + if err != nil { + return nil, err + } + m := Map{} + for k, vals := range vals { + m[k] = vals[0] + } + return m, nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + o, err := FromURLQuery(query) + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + return o +} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go new file mode 100644 index 000000000..c3400a3f7 --- /dev/null +++ b/vendor/github.com/stretchr/objx/mutations.go @@ -0,0 +1,77 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (m Map) Exclude(exclude []string) Map { + excluded := make(Map) + for k, v := range m { + if !contains(exclude, k) { + excluded[k] = v + } + } + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := Map{} + for k, v := range m { + copied[k] = v + } + return copied +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// MergeHere blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + for k, v := range merge { + m[k] = v + } + return m +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := Map{} + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return newMap +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + if newKey, ok := mapping[key]; ok { + return newKey, value + } + return key, value + }) +} + +// Checks if a string slice contains a string +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go new file mode 100644 index 000000000..692be8e2a --- /dev/null +++ b/vendor/github.com/stretchr/objx/security.go @@ -0,0 +1,12 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security key +func HashWithKey(data, key string) string { + d := sha1.Sum([]byte(data + ":" + key)) + return hex.EncodeToString(d[:]) +} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go new file mode 100644 index 000000000..d9e0b479a --- /dev/null +++ b/vendor/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 000000000..202a91f8c --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2501 @@ +package objx + +/* + Inter (interface{} and []interface{}) +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + var selected []interface{} + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + groups := make(map[string][]interface{}) + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + var selected []map[string]interface{} + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + groups := make(map[string][]map[string]interface{}) + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([](Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + var selected [](Map) + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + groups := make(map[string][](Map)) + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Bool (bool and []bool) +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + var selected []bool + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + groups := make(map[string][]bool) + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Str (string and []string) +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + var selected []string + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + groups := make(map[string][]string) + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int (int and []int) +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + var selected []int + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + groups := make(map[string][]int) + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + var selected []int8 + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + groups := make(map[string][]int8) + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + var selected []int16 + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + groups := make(map[string][]int16) + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + var selected []int32 + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + groups := make(map[string][]int32) + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + var selected []int64 + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + groups := make(map[string][]int64) + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint (uint and []uint) +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + var selected []uint + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + groups := make(map[string][]uint) + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + var selected []uint8 + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + groups := make(map[string][]uint8) + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + var selected []uint16 + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + groups := make(map[string][]uint16) + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + var selected []uint32 + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + groups := make(map[string][]uint32) + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + var selected []uint64 + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + groups := make(map[string][]uint64) + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + var selected []uintptr + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + groups := make(map[string][]uintptr) + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + var selected []float32 + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + groups := make(map[string][]float32) + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + var selected []float64 + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + groups := make(map[string][]float64) + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + var selected []complex64 + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + groups := make(map[string][]complex64) + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + var selected []complex128 + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + groups := make(map[string][]complex128) + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go new file mode 100644 index 000000000..e4b4a1433 --- /dev/null +++ b/vendor/github.com/stretchr/objx/value.go @@ -0,0 +1,53 @@ +package objx + +import ( + "fmt" + "strconv" +) + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} + +// String returns the value always as a string +func (v *Value) String() string { + switch { + case v.IsStr(): + return v.Str() + case v.IsBool(): + return strconv.FormatBool(v.Bool()) + case v.IsFloat32(): + return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) + case v.IsFloat64(): + return strconv.FormatFloat(v.Float64(), 'f', -1, 64) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt8(): + return strconv.FormatInt(int64(v.Int8()), 10) + case v.IsInt16(): + return strconv.FormatInt(int64(v.Int16()), 10) + case v.IsInt32(): + return strconv.FormatInt(int64(v.Int32()), 10) + case v.IsInt64(): + return strconv.FormatInt(v.Int64(), 10) + case v.IsUint(): + return strconv.FormatUint(uint64(v.Uint()), 10) + case v.IsUint8(): + return strconv.FormatUint(uint64(v.Uint8()), 10) + case v.IsUint16(): + return strconv.FormatUint(uint64(v.Uint16()), 10) + case v.IsUint32(): + return strconv.FormatUint(uint64(v.Uint32()), 10) + case v.IsUint64(): + return strconv.FormatUint(v.Uint64(), 10) + } + return fmt.Sprintf("%#v", v.Data()) +} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 000000000..f38ec5956 --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go new file mode 100644 index 000000000..aa1c2b95c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -0,0 +1,484 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Condition(t, comp, append([]interface{}{msg}, args...)...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Contains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return DirExists(t, path, append([]interface{}{msg}, args...)...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Empty(t, object, append([]interface{}{msg}, args...)...) +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Error(t, err, append([]interface{}{msg}, args...)...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return False(t, value, append([]interface{}{msg}, args...)...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return FileExists(t, path, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Len(t, object, length, append([]interface{}{msg}, args...)...) +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Nil(t, object, append([]interface{}{msg}, args...)...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NoError(t, err, append([]interface{}{msg}, args...)...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotEmpty(t, object, append([]interface{}{msg}, args...)...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotNil(t, object, append([]interface{}{msg}, args...)...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotPanics(t, f, append([]interface{}{msg}, args...)...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotZero(t, i, append([]interface{}{msg}, args...)...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Panics(t, f, append([]interface{}{msg}, args...)...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Subset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return True(t, value, append([]interface{}{msg}, args...)...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Zero(t, i, append([]interface{}{msg}, args...)...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 000000000..de39f794e --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,956 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Condition(a.t, comp, msgAndArgs...) +} + +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Conditionf(a.t, comp, msg, args...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Contains(a.t, s, contains, msgAndArgs...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ElementsMatchf(a.t, listA, listB, msg, args...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Empty(a.t, object, msgAndArgs...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Emptyf(a.t, object, msg, args...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualErrorf(a.t, theError, errString, msg, args...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123)) +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Equalf(a.t, expected, actual, msg, args...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Error(a.t, err, msgAndArgs...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Errorf(a.t, err, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Exactlyf(a.t, expected, actual, msg, args...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FailNow(a.t, failureMessage, msgAndArgs...) +} + +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Failf(a.t, failureMessage, msg, args...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool) +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return False(a.t, value, msgAndArgs...) +} + +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FileExistsf(a.t, path, msg, args...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Implementsf(a.t, interfaceObject, object, msg, args...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsTypef(a.t, expectedType, object, msg, args...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return JSONEqf(a.t, expected, actual, msg, args...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3) +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Len(a.t, object, length, msgAndArgs...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Lenf(a.t, object, length, msg, args...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err) +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Nil(a.t, object, msgAndArgs...) +} + +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Nilf(a.t, object, msg, args...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoError(a.t, err, msgAndArgs...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoErrorf(a.t, err, msg, args...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotContainsf(a.t, s, contains, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEmptyf(a.t, object, msg, args...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqualf(a.t, expected, actual, msg, args...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err) +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotNil(a.t, object, msgAndArgs...) +} + +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotNilf(a.t, object, msg, args...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ RemainCalm() }) +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotPanics(a.t, f, msgAndArgs...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotPanicsf(a.t, f, msg, args...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotZero(a.t, i, msgAndArgs...) +} + +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotZerof(a.t, i, msg, args...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ GoCrazy() }) +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Panics(a.t, f, msgAndArgs...) +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Panicsf(a.t, f, msg, args...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Regexp(a.t, rx, str, msgAndArgs...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Subsetf(a.t, list, subset, msg, args...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool) +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return True(a.t, value, msgAndArgs...) +} + +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Truef(a.t, value, msg, args...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// Zero asserts that i is the zero value for its type. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Zero(a.t, i, msgAndArgs...) +} + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 000000000..9bd4a80e4 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1416 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful +// for table driven tests. +type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool + +// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful +// for table driven tests. +type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool + +// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful +// for table driven tests. +type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool + +// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful +// for table driven tests. +type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + if expected == nil || actual == nil { + return expected == actual + } + + exp, ok := expected.([]byte) + if !ok { + return reflect.DeepEqual(expected, actual) + } + + act, ok := actual.([]byte) + if !ok { + return false + } + if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occurred in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + if len(parts) > 1 { + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + msg := msgAndArgs[0] + if msgAsStr, ok := msg.(string); ok { + return msgAsStr + } + return fmt.Sprintf("%+v", msg) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Aligns the provided message so that all lines after the first line start at the same location as the first line. +// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). +// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// basis on which the alignment occurs). +func indentMessageLines(message string, longestLabelLen int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + // no need to align first line because it starts at the correct location (after the label) + if i != 0 { + // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab + outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + content := []labeledContent{ + {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, + {"Error", failureMessage}, + } + + // Add test name if the Go version supports it + if n, ok := t.(interface { + Name() string + }); ok { + content = append(content, labeledContent{"Test", n.Name()}) + } + + message := messageFromMsgAndArgs(msgAndArgs...) + if len(message) > 0 { + content = append(content, labeledContent{"Messages", message}) + } + + t.Errorf("\n%s", ""+labeledOutput(content...)) + + return false +} + +type labeledContent struct { + label string + content string +} + +// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: +// +// \t{{label}}:{{align_spaces}}\t{{content}}\n +// +// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. +// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +// alignment is achieved, "\t{{content}}\n" is added for the output. +// +// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. +func labeledOutput(content ...labeledContent) string { + longestLabel := 0 + for _, v := range content { + if len(v.label) > longestLabel { + longestLabel = len(v.label) + } + } + var output string + for _, v := range content { + output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" + } + return output +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) + } + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf("%T(%#v)", expected, expected), + fmt.Sprintf("%T(%#v)", actual, actual) + } + + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123)) +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if !ObjectsAreEqualValues(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal in value and type. +// +// assert.Exactly(t, int32(123), int64(123)) +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err) +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// containsKind checks if a specified kind in the slice of kinds. +func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { + for i := 0; i < len(kinds); i++ { + if kind == kinds[i] { + return true + } + } + + return false +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + isNilableKind := containsKind( + []reflect.Kind{ + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice}, + kind) + + if isNilableKind && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err) +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + // get nil case out of the way + if object == nil { + return true + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + // collection types are empty when they have no element + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // pointers are empty if nil or if the value they point to is empty + case reflect.Ptr: + if objValue.IsNil() { + return true + } + deref := objValue.Elem().Interface() + return isEmpty(deref) + // for all other types, compare against the zero value + default: + zero := reflect.Zero(objValue.Type()) + return reflect.DeepEqual(object, zero.Interface()) + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool) +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool) +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if reflect.TypeOf(list).Kind() == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if subset == nil { + return true // we consider nil to be equal to the nil set + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + } + } + + return true +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if subset == nil { + return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return true + } + + aKind := reflect.TypeOf(listA).Kind() + bKind := reflect.TypeOf(listB).Kind() + + if aKind != reflect.Array && aKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) + } + + if bKind != reflect.Array && bKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) + } + + aValue := reflect.ValueOf(listA) + bValue := reflect.ValueOf(listB) + + aLen := aValue.Len() + bLen := bValue.Len() + + if aLen != bLen { + return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) + } + + // Mark indexes in bValue that we already used + visited := make([]bool, bLen) + for i := 0; i < aLen; i++ { + element := aValue.Index(i).Interface() + found := false + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + if ObjectsAreEqual(bValue.Index(j).Interface(), element) { + visited[j] = true + found = true + break + } + } + if !found { + return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) + } + } + + return true +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ GoCrazy() }) +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + + return true +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + funcDidPanic, panicValue := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + if panicValue != expected { + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ RemainCalm() }) +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + case time.Duration: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) + if !result { + return result + } + } + + return true +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Map || + reflect.TypeOf(expected).Kind() != reflect.Map { + return Fail(t, "Arguments must be maps", msgAndArgs...) + } + + expectedMap := reflect.ValueOf(expected) + actualMap := reflect.ValueOf(actual) + + if expectedMap.Len() != actualMap.Len() { + return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) + } + + for _, k := range expectedMap.MapKeys() { + ev := expectedMap.MapIndex(k) + av := actualMap.MapIndex(k) + + if !ev.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) + } + + if !av.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) + } + + if !InDelta( + t, + ev.Interface(), + av.Interface(), + delta, + msgAndArgs..., + ) { + return false + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err != nil { + return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) + } + + return true +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if err == nil { + return Fail(t, "An error is expected but got nil.", msgAndArgs...) + } + + return true +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + expected := errString + actual := theError.Error() + // don't need to use deep equals here, we know they are both strings + if expected != actual { + return Fail(t, fmt.Sprintf("Error message not equal:\n"+ + "expected: %q\n"+ + "actual : %q", expected, actual), msgAndArgs...) + } + return true +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) + } + return true +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if !info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) + } + return true +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice, array or string. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { + return "" + } + + var e, a string + if et != reflect.TypeOf("") { + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) + } else { + e = expected.(string) + a = actual.(string) + } + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} + +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + +func isFunction(arg interface{}) bool { + if arg == nil { + return false + } + return reflect.TypeOf(arg).Kind() == reflect.Func +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +type tHelper interface { + Helper() +} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 000000000..c9dccc4d6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 000000000..ac9dc9d1d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 000000000..9ad56851d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 000000000..df46fa777 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,143 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 and +// an error if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url, nil) + if err != nil { + return -1, err + } + req.URL.RawQuery = values.Encode() + handler(w, req) + return w.Code, nil +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent + if !isSuccessCode { + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isSuccessCode +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + if !isRedirectCode { + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isRedirectCode +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isErrorCode := code >= http.StatusBadRequest + if !isErrorCode { + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isErrorCode +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return !contains +} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 000000000..7324128ef --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,44 @@ +// Package mock provides a system by which it is possible to mock your objects +// and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 000000000..d6694ed78 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,886 @@ +package mock + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + Parent *Mock + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // Holds the caller info for the On() call + callerInfo []string + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Amount of times this call has been called + totalCalls int + + // Call to this method can be optional + optional bool + + // Holds a channel that will be used to block the Return until it either + // receives a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + waitTime time.Duration + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + RunFn func(Arguments) +} + +func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { + return &Call{ + Parent: parent, + Method: methodName, + Arguments: methodArguments, + ReturnArguments: make([]interface{}, 0), + callerInfo: callerInfo, + Repeatability: 0, + WaitFor: nil, + RunFn: nil, + } +} + +func (c *Call) lock() { + c.Parent.mutex.Lock() +} + +func (c *Call) unlock() { + c.Parent.mutex.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// Mock.On("DoSomething").Return(errors.New("failed")) +func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() + + c.ReturnArguments = returnArguments + + return c +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (c *Call) Once() *Call { + return c.Times(1) +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (c *Call) Twice() *Call { + return c.Times(2) +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() + c.Repeatability = i + return c +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() + c.WaitFor = w + return c +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (c *Call) After(d time.Duration) *Call { + c.lock() + defer c.unlock() + c.waitTime = d + return c +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method such as unmarshalers that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (c *Call) Run(fn func(args Arguments)) *Call { + c.lock() + defer c.unlock() + c.RunFn = fn + return c +} + +// Maybe allows the method call to be optional. Not calling an optional method +// will not cause an error while asserting expectations +func (c *Call) Maybe() *Call { + c.lock() + defer c.unlock() + c.optional = true + return c +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +//go:noinline +func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top +// of this document. +type Mock struct { + // Represents the calls that are expected of + // an object. + ExpectedCalls []*Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // test is An optional variable that holds the test struct, to be used when an + // invalid mock call was made. + test TestingT + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + mutex sync.Mutex +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// Test sets the test struct variable of the mock object +func (m *Mock) Test(t TestingT) { + m.mutex.Lock() + defer m.mutex.Unlock() + m.test = t +} + +// fail fails the current test with the given formatted format and args. +// In case that a test was defined, it uses the test APIs for failing a test, +// otherwise it uses panic. +func (m *Mock) fail(format string, args ...interface{}) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.test == nil { + panic(fmt.Sprintf(format, args...)) + } + m.test.Errorf(format, args...) + m.test.FailNow() +} + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + m.mutex.Lock() + defer m.mutex.Unlock() + c := newCall(m, methodName, assert.CallerInfo(), arguments...) + m.ExpectedCalls = append(m.ExpectedCalls, c) + return c +} + +// /* +// Recording and responding to activity +// */ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + for i, call := range m.ExpectedCalls { + if call.Method == method && call.Repeatability > -1 { + + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + return i, call + } + + } + } + return -1, nil +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { + var diffCount int + var closestCall *Call + var err string + + for _, call := range m.expectedCalls() { + if call.Method == method { + + errInfo, tempDiffCount := call.Arguments.Diff(arguments) + if tempDiffCount < diffCount || diffCount == 0 { + diffCount = tempDiffCount + closestCall = call + err = errInfo + } + + } + } + + return closestCall, err +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + + var argValsString string + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + //Next four lines are required to use GCCGO function naming conventions. + //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + //With GCCGO we need to remove interface information starting from pN
. + re := regexp.MustCompile("\\.pN\\d+_") + if re.MatchString(functionPath) { + functionPath = re.Split(functionPath, -1)[0] + } + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + return m.MethodCalled(functionName, arguments...) +} + +// MethodCalled tells the mock object that the given method has been called, and gets +// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded +// by appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { + m.mutex.Lock() + //TODO: could combine expected and closes in single loop + found, call := m.findExpectedCall(methodName, arguments...) + + if found < 0 { + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + + closestCall, mismatch := m.findClosestCall(methodName, arguments...) + m.mutex.Unlock() + + if closestCall != nil { + m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", + callString(methodName, arguments, true), + callString(methodName, closestCall.Arguments, true), + diffArguments(closestCall.Arguments, arguments), + strings.TrimSpace(mismatch), + ) + } else { + m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } + } + + if call.Repeatability == 1 { + call.Repeatability = -1 + } else if call.Repeatability > 1 { + call.Repeatability-- + } + call.totalCalls++ + + // add the call + m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) + m.mutex.Unlock() + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } else { + time.Sleep(call.waitTime) + } + + m.mutex.Lock() + runFn := call.RunFn + m.mutex.Unlock() + + if runFn != nil { + runFn(arguments) + } + + m.mutex.Lock() + returnArgs := call.ReturnArguments + m.mutex.Unlock() + + return returnArgs +} + +/* + Assertions +*/ + +type assertExpectationser interface { + AssertExpectations(TestingT) bool +} + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + for _, obj := range testObjects { + if m, ok := obj.(Mock); ok { + t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") + obj = &m + } + m := obj.(assertExpectationser) + if !m.AssertExpectations(t) { + t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) + return false + } + } + return true +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + var somethingMissing bool + var failedExpectations int + + // iterate through each expectation + expectedCalls := m.expectedCalls() + for _, expectedCall := range expectedCalls { + if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) + } else { + if expectedCall.Repeatability > 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) + } else { + t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) + } + + return !somethingMissing +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + var actualCalls int + for _, call := range m.calls() { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if !m.methodWasCalled(methodName, arguments) { + var calledWithArgs []string + for _, call := range m.calls() { + calledWithArgs = append(calledWithArgs, fmt.Sprintf("%v", call.Arguments)) + } + if len(calledWithArgs) == 0 { + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut no actual calls happened", methodName, arguments)) + } + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut actual calls were:\n %v", methodName, arguments, strings.Join(calledWithArgs, "\n"))) + } + return true +} + +// AssertNotCalled asserts that the method was not called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if m.methodWasCalled(methodName, arguments) { + return assert.Fail(t, "Should not have called with given arguments", + fmt.Sprintf("Expected %q to not have been called with:\n%v\nbut actually it was.", methodName, arguments)) + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.calls() { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +func (m *Mock) expectedCalls() []*Call { + return append([]*Call{}, m.ExpectedCalls...) +} + +func (m *Mock) calls() []Call { + return append([]Call{}, m.Calls...) +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // Anything is used in Diff and Assert when the argument being tested + // shouldn't be taken into consideration. + Anything = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// argumentMatcher performs custom argument matching, returning whether or +// not the argument is matched by the expectation fixture function. +type argumentMatcher struct { + // fn is a function which accepts one argument, and returns a bool. + fn reflect.Value +} + +func (f argumentMatcher) Matches(argument interface{}) bool { + expectType := f.fn.Type().In(0) + expectTypeNilSupported := false + switch expectType.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: + expectTypeNilSupported = true + } + + argType := reflect.TypeOf(argument) + var arg reflect.Value + if argType == nil { + arg = reflect.New(expectType).Elem() + } else { + arg = reflect.ValueOf(argument) + } + + if argType == nil && !expectTypeNilSupported { + panic(errors.New("attempting to call matcher with nil for non-nil expected type")) + } + if argType == nil || argType.AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{arg}) + return result[0].Bool() + } + return false +} + +func (f argumentMatcher) String() string { + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) +} + +// MatchedBy can be used to match a mock call based on only certain properties +// from a complex struct or some calculation. It takes a function that will be +// evaluated with the called argument and will return true when there's a match +// and false otherwise. +// +// Example: +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// |fn|, must be a function accepting a single argument (of the expected type) +// which returns a bool. If |fn| doesn't match the required signature, +// MatchedBy() panics. +func MatchedBy(fn interface{}) argumentMatcher { + fnType := reflect.TypeOf(fn) + + if fnType.Kind() != reflect.Func { + panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) + } + if fnType.NumIn() != 1 { + panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) + } + if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { + panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) + } + + return argumentMatcher{fn: reflect.ValueOf(fn)} +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + //TODO: could return string as error and nil for No difference + + var output = "\n" + var differences int + + var maxArgCount = len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + var actualFmt, expectedFmt string + + if len(objects) <= i { + actual = "(Missing)" + actualFmt = "(Missing)" + } else { + actual = objects[i] + actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual) + } + + if len(args) <= i { + expected = "(Missing)" + expectedFmt = "(Missing)" + } else { + expected = args[i] + expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected) + } + + if matcher, ok := expected.(argumentMatcher); ok { + if matcher.Matches(actual) { + output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) + } else { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) + } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + + } else { + + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences + +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false + +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + var index = indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) + +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +func diffArguments(expected Arguments, actual Arguments) string { + if len(expected) != len(actual) { + return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) + } + + for x := range expected { + if diffString := diff(expected[x], actual[x]); diffString != "" { + return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) + } + } + + return "" +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return diff +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +type tHelper interface { + Helper() +} diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore deleted file mode 100644 index 0a4504f11..000000000 --- a/vendor/go.uber.org/atomic/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -.DS_Store -/vendor -/cover -cover.out -lint.log - -# Binaries -*.test - -# Profiling output -*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml deleted file mode 100644 index f1ce95afc..000000000 --- a/vendor/go.uber.org/atomic/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/atomic - -go: - - 1.5 - - 1.6 - - tip - -cache: - directories: - - vendor - -install: - - make install_ci - -script: - - make test_ci - - scripts/test-ubergo.sh - - make lint - - travis_retry goveralls -coverprofile=cover.out -service=travis-ci diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile deleted file mode 100644 index dfc63d9db..000000000 --- a/vendor/go.uber.org/atomic/Makefile +++ /dev/null @@ -1,64 +0,0 @@ -PACKAGES := $(shell glide nv) -# Many Go tools take file globs or directories as arguments instead of packages. -PACKAGE_FILES ?= *.go - - -# The linting tools evolve with each Go version, so run them only on the latest -# stable release. -GO_VERSION := $(shell go version | cut -d " " -f 3) -GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 7 8 -ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) -SHOULD_LINT := true -endif - - -export GO15VENDOREXPERIMENT=1 - - -.PHONY: build -build: - go build -i $(PACKAGES) - - -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install - - -.PHONY: test -test: - go test -cover -race $(PACKAGES) - - -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover -ifdef SHOULD_LINT - go get github.com/golang/lint/golint -endif - -.PHONY: lint -lint: -ifdef SHOULD_LINT - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @$(foreach dir,$(PACKAGE_FILES),go tool vet $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking lint..." - @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log - @[ ! -s lint.log ] -else - @echo "Skipping linters on" $(GO_VERSION) -endif - - -.PHONY: test_ci -test_ci: install_ci build - ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md deleted file mode 100644 index bb4a12c18..000000000 --- a/vendor/go.uber.org/atomic/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Simple wrappers for primitive types to enforce atomic access. - -## Installation -`go get -u go.uber.org/atomic` - -## Usage -The standard library's `sync/atomic` is powerful, but it's easy to forget which -variables must be accessed atomically. `go.uber.org/atomic` preserves all the -functionality of the standard library, but wraps the primitive types to -provide a safer, more convenient API. - -```go -var atom atomic.Uint32 -atom.Store(42) -atom.Sub(2) -atom.CAS(40, 11) -``` - -See the [documentation][doc] for a complete API specification. - -## Development Status -Stable. - -
-Released under the [MIT License](LICENSE.txt). - -[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg -[doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.org/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.org/uber-go/atomic -[cov-img]: https://coveralls.io/repos/github/uber-go/atomic/badge.svg?branch=master -[cov]: https://coveralls.io/github/uber-go/atomic?branch=master diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go index 290b88414..1db6849fc 100644 --- a/vendor/go.uber.org/atomic/atomic.go +++ b/vendor/go.uber.org/atomic/atomic.go @@ -25,6 +25,7 @@ package atomic import ( "math" "sync/atomic" + "time" ) // Int32 is an atomic wrapper around an int32. @@ -232,6 +233,11 @@ func (b *Bool) Load() bool { return truthy(atomic.LoadUint32(&b.v)) } +// CAS is an atomic compare-and-swap. +func (b *Bool) CAS(old, new bool) bool { + return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) +} + // Store atomically stores the passed value. func (b *Bool) Store(new bool) { atomic.StoreUint32(&b.v, boolToInt(new)) @@ -299,6 +305,47 @@ func (f *Float64) CAS(old, new float64) bool { return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) } +// Duration is an atomic wrapper around time.Duration +// https://godoc.org/time#Duration +type Duration struct { + v Int64 +} + +// NewDuration creates a Duration. +func NewDuration(d time.Duration) *Duration { + return &Duration{v: *NewInt64(int64(d))} +} + +// Load atomically loads the wrapped value. +func (d *Duration) Load() time.Duration { + return time.Duration(d.v.Load()) +} + +// Store atomically stores the passed value. +func (d *Duration) Store(n time.Duration) { + d.v.Store(int64(n)) +} + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// Swap atomically swaps the wrapped time.Duration and returns the old value. +func (d *Duration) Swap(n time.Duration) time.Duration { + return time.Duration(d.v.Swap(int64(n))) +} + +// CAS is an atomic compare-and-swap. +func (d *Duration) CAS(old, new time.Duration) bool { + return d.v.CAS(int64(old), int64(new)) +} + // Value shadows the type of the same name from sync/atomic // https://godoc.org/sync/atomic#Value type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock deleted file mode 100644 index 3c72c5997..000000000 --- a/vendor/go.uber.org/atomic/glide.lock +++ /dev/null @@ -1,17 +0,0 @@ -hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 -updated: 2016-10-27T00:10:51.16960137-07:00 -imports: [] -testImports: -- name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: d77da356e56a7428ad25149ca77381849a6a5232 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml deleted file mode 100644 index 4cf608ec0..000000000 --- a/vendor/go.uber.org/atomic/glide.yaml +++ /dev/null @@ -1,6 +0,0 @@ -package: go.uber.org/atomic -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index acf0f5bb1..ede8136fa 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -20,10 +20,8 @@ package atomic -import "sync/atomic" - -// String is an atomic type-safe wrapper around atomic.Value for strings. -type String struct{ v atomic.Value } +// String is an atomic type-safe wrapper around Value for strings. +type String struct{ v Value } // NewString creates a String. func NewString(str string) *String { @@ -44,7 +42,7 @@ func (s *String) Load() string { } // Store atomically stores the passed string. -// Note: Converting the string to an interface{} to store in the atomic.Value +// Note: Converting the string to an interface{} to store in the Value // requires an allocation. func (s *String) Store(str string) { s.v.Store(str) diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml deleted file mode 100644 index 6d4d1be7b..000000000 --- a/vendor/go.uber.org/multierr/.codecov.yml +++ /dev/null @@ -1,15 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore deleted file mode 100644 index 61ead8666..000000000 --- a/vendor/go.uber.org/multierr/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/vendor diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml deleted file mode 100644 index 5ffa8fed4..000000000 --- a/vendor/go.uber.org/multierr/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/multierr - -env: - global: - - GO15VENDOREXPERIMENT=1 - -go: - - 1.7 - - 1.8 - - tip - -cache: - directories: - - vendor - -before_install: -- go version - -install: -- | - set -e - make install_ci - -script: -- | - set -e - make lint - make test_ci - -after_success: -- bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md deleted file mode 100644 index 898445d06..000000000 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ /dev/null @@ -1,28 +0,0 @@ -Releases -======== - -v1.1.0 (2017-06-30) -=================== - -- Added an `Errors(error) []error` function to extract the underlying list of - errors for a multierr error. - - -v1.0.0 (2017-05-31) -=================== - -No changes since v0.2.0. This release is committing to making no breaking -changes to the current API in the 1.X series. - - -v0.2.0 (2017-04-11) -=================== - -- Repeatedly appending to the same error is now faster due to fewer - allocations. - - -v0.1.0 (2017-31-03) -=================== - -- Initial release diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile deleted file mode 100644 index a7437d061..000000000 --- a/vendor/go.uber.org/multierr/Makefile +++ /dev/null @@ -1,74 +0,0 @@ -export GO15VENDOREXPERIMENT=1 - -PACKAGES := $(shell glide nv) - -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -.PHONY: install -install: - glide --version || go get github.com/Masterminds/glide - glide install - -.PHONY: build -build: - go build -i $(PACKAGES) - -.PHONY: test -test: - go test -cover -race $(PACKAGES) - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) - -.PHONY: govet -govet: - $(eval VET_LOG := $(shell mktemp -t govet.XXXXX)) - @go vet $(PACKAGES) 2>&1 \ - | grep -v '^exit status' > $(VET_LOG) || true - @[ ! -s "$(VET_LOG)" ] || (echo "govet failed:" | cat - $(VET_LOG) && false) - -.PHONY: golint -golint: - @go get github.com/golang/lint/golint - $(eval LINT_LOG := $(shell mktemp -t golint.XXXXX)) - @cat /dev/null > $(LINT_LOG) - @$(foreach pkg, $(PACKAGES), golint $(pkg) >> $(LINT_LOG) || true;) - @[ ! -s "$(LINT_LOG)" ] || (echo "golint failed:" | cat - $(LINT_LOG) && false) - -.PHONY: staticcheck -staticcheck: - @go get honnef.co/go/tools/cmd/staticcheck - $(eval STATICCHECK_LOG := $(shell mktemp -t staticcheck.XXXXX)) - @staticcheck $(PACKAGES) 2>&1 > $(STATICCHECK_LOG) || true - @[ ! -s "$(STATICCHECK_LOG)" ] || (echo "staticcheck failed:" | cat - $(STATICCHECK_LOG) && false) - -.PHONY: lint -lint: gofmt govet golint staticcheck - -.PHONY: cover -cover: - ./scripts/cover.sh $(shell go list $(PACKAGES)) - go tool cover -html=cover.out -o cover.html - -update-license: - @go get go.uber.org/tools/update-license - @update-license \ - $(shell go list -json $(PACKAGES) | \ - jq -r '.Dir + "/" + (.GoFiles | .[])') - -############################################################################## - -.PHONY: install_ci -install_ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover - -.PHONY: test_ci -test_ci: install_ci - ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md deleted file mode 100644 index 065088f64..000000000 --- a/vendor/go.uber.org/multierr/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -`multierr` allows combining one or more Go `error`s together. - -## Installation - - go get -u go.uber.org/multierr - -## Status - -Stable: No breaking changes will be made before 2.0. - -------------------------------------------------------------------------------- - -Released under the [MIT License]. - -[MIT License]: LICENSE.txt -[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg -[doc]: https://godoc.org/go.uber.org/multierr -[ci-img]: https://travis-ci.org/uber-go/multierr.svg?branch=master -[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://travis-ci.org/uber-go/multierr -[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/glide.lock b/vendor/go.uber.org/multierr/glide.lock deleted file mode 100644 index f9ea94c33..000000000 --- a/vendor/go.uber.org/multierr/glide.lock +++ /dev/null @@ -1,19 +0,0 @@ -hash: b53b5e9a84b9cb3cc4b2d0499e23da2feca1eec318ce9bb717ecf35bf24bf221 -updated: 2017-04-10T13:34:45.671678062-07:00 -imports: -- name: go.uber.org/atomic - version: 3b8db5e93c4c02efbc313e17b2e796b0914a01fb -testImports: -- name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/stretchr/testify - version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml deleted file mode 100644 index 6ef084ec2..000000000 --- a/vendor/go.uber.org/multierr/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/multierr -import: -- package: go.uber.org/atomic - version: ^1 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml deleted file mode 100644 index 8e5ca7d3e..000000000 --- a/vendor/go.uber.org/zap/.codecov.yml +++ /dev/null @@ -1,17 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 95% # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure -ignore: - - internal/readme/readme.go - diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore deleted file mode 100644 index 08fbde6ce..000000000 --- a/vendor/go.uber.org/zap/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -vendor - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -*.pprof -*.out -*.log diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl deleted file mode 100644 index c6440db8e..000000000 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ /dev/null @@ -1,108 +0,0 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Blazing fast, structured, leveled logging in Go. - -## Installation - -`go get -u go.uber.org/zap` - -Note that zap only supports the two most recent minor versions of Go. - -## Quick Start - -In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than other structured logging -packages and includes both structured and `printf`-style APIs. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() // flushes buffer, if any -sugar := logger.Sugar() -sugar.Infow("failed to fetch URL", - // Structured context as loosely typed key-value pairs. - "url", url, - "attempt", 3, - "backoff", time.Second, -) -sugar.Infof("Failed to fetch URL: %s", url) -``` - -When performance and type safety are critical, use the `Logger`. It's even -faster than the `SugaredLogger` and allocates far less, but it only supports -structured logging. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() -logger.Info("failed to fetch URL", - // Structured context as strongly typed Field values. - zap.String("url", url), - zap.Int("attempt", 3), - zap.Duration("backoff", time.Second), -) -``` - -See the [documentation][doc] and [FAQ](FAQ.md) for more details. - -## Performance - -For applications that log in the hot path, reflection-based serialization and -string formatting are prohibitively expensive — they're CPU-intensive -and make many small allocations. Put differently, using `encoding/json` and -`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. - -Zap takes a different approach. It includes a reflection-free, zero-allocation -JSON encoder, and the base `Logger` strives to avoid serialization overhead -and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users *choose* when they need to count every -allocation and when they'd prefer a more familiar, loosely typed API. - -As measured by its own [benchmarking suite][], not only is zap more performant -than comparable structured logging packages — it's also faster than the -standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) - -Log a message and 10 fields: - -{{.BenchmarkAddingFields}} - -Log a message with a logger that already has 10 fields of context: - -{{.BenchmarkAccumulatedContext}} - -Log a static string, without any context or `printf`-style templating: - -{{.BenchmarkWithoutFields}} - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -zap to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on -issues and pull requests, but you can also report any negative conduct to -oss-conduct@uber.com. That email list is a private, safe space; even the zap -maintainers don't have access, so don't hesitate to hold us to a high -standard. - -
- -Released under the [MIT License](LICENSE.txt). - -1 In particular, keep in mind that we may be -benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) - -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap -[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/zap -[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml deleted file mode 100644 index a3321fa2d..000000000 --- a/vendor/go.uber.org/zap/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -sudo: false -go: - - 1.9.x - - 1.10.x -go_import_path: go.uber.org/zap -env: - global: - - TEST_TIMEOUT_SCALE=10 -cache: - directories: - - vendor -install: - - make dependencies -script: - - make lint - - make test - - make bench -after_success: - - make cover - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md deleted file mode 100644 index 17d5b49f3..000000000 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ /dev/null @@ -1,305 +0,0 @@ -# Changelog - -## v1.9.1 (06 Aug 2018) - -Bugfixes: - -* [#614][]: MapObjectEncoder should not ignore empty slices. - -## v1.9.0 (19 Jul 2018) - -Enhancements: -* [#602][]: Reduce number of allocations when logging with reflection. -* [#572][], [#606][]: Expose a registry for third-party logging sinks. - -Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and -@dimroc for their contributions to this release. - -## v1.8.0 (13 Apr 2018) - -Enhancements: -* [#508][]: Make log level configurable when redirecting the standard - library's logger. -* [#518][]: Add a logger that writes to a `*testing.TB`. -* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. - -Bugfixes: -* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. - -Thanks to @DiSiqueira and @djui for their contributions to this release. - -## v1.7.1 (25 Sep 2017) - -Bugfixes: -* [#504][]: Store strings when using AddByteString with the map encoder. - -## v1.7.0 (21 Sep 2017) - -Enhancements: - -* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user - to specify the level of the logged messages. - -## v1.6.0 (30 Aug 2017) - -Enhancements: - -* [#491][]: Omit zap stack frames from stacktraces. -* [#490][]: Add a `ContextMap` method to observer logs for simpler - field validation in tests. - -## v1.5.0 (22 Jul 2017) - -Enhancements: - -* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. -* [#465][]: Support user-supplied encoders for logger names. - -Bugfixes: - -* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. - -Thanks to @richard-tunein and @pavius for their contributions to this release. - -## v1.4.1 (08 Jun 2017) - -This release fixes two bugs. - -Bugfixes: - -* [#435][]: Support a variety of case conventions when unmarshaling levels. -* [#444][]: Fix a panic in the observer. - -## v1.4.0 (12 May 2017) - -This release adds a few small features and is fully backward-compatible. - -Enhancements: - -* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to - override the Unix-style default. -* [#425][]: Preserve time zones when logging times. -* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a - variety of operations a bit simpler. - -## v1.3.0 (25 Apr 2017) - -This release adds an enhancement to zap's testing helpers as well as the -ability to marshal an AtomicLevel. It is fully backward-compatible. - -Enhancements: - -* [#415][]: Add a substring-filtering helper to zap's observer. This is - particularly useful when testing the `SugaredLogger`. -* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. - -## v1.2.0 (13 Apr 2017) - -This release adds a gRPC compatibility wrapper. It is fully backward-compatible. - -Enhancements: - -* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements - `grpclog.Logger`. - -## v1.1.0 (31 Mar 2017) - -This release fixes two bugs and adds some enhancements to zap's testing helpers. -It is fully backward-compatible. - -Bugfixes: - -* [#385][]: Fix caller path trimming on Windows. -* [#396][]: Fix a panic when attempting to use non-existent directories with - zap's configuration struct. - -Enhancements: - -* [#386][]: Add filtering helpers to zaptest's observing logger. - -Thanks to @moitias for contributing to this release. - -## v1.0.0 (14 Mar 2017) - -This is zap's first stable release. All exported APIs are now final, and no -further breaking changes will be made in the 1.x release series. Anyone using a -semver-aware dependency manager should now pin to `^1`. - -Breaking changes: - -* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without - casting from `[]byte` to `string`. -* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, - `zap.Logger`, and `zap.SugaredLogger`. -* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to - clash with other testing helpers. - -Bugfixes: - -* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier - for tab-separated console output. -* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to - work with concurrency-safe `WriteSyncer` implementations. -* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux - systems. -* [#373][]: Report the correct caller from zap's standard library - interoperability wrappers. - -Enhancements: - -* [#348][]: Add a registry allowing third-party encodings to work with zap's - built-in `Config`. -* [#327][]: Make the representation of logger callers configurable (like times, - levels, and durations). -* [#376][]: Allow third-party encoders to use their own buffer pools, which - removes the last performance advantage that zap's encoders have over plugins. -* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple - `WriteSyncer`s and lock the result. -* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in - Go 1.9). -* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it - easier for particularly punctilious users to unit test their application's - logging. - -Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their -contributions to this release. - -## v1.0.0-rc.3 (7 Mar 2017) - -This is the third release candidate for zap's stable release. There are no -breaking changes. - -Bugfixes: - -* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs - rather than `[]uint8`. - -Enhancements: - -* [#307][]: Users can opt into colored output for log levels. -* [#353][]: In addition to hijacking the output of the standard library's - package-global logging functions, users can now construct a zap-backed - `log.Logger` instance. -* [#311][]: Frames from common runtime functions and some of zap's internal - machinery are now omitted from stacktraces. - -Thanks to @ansel1 and @suyash for their contributions to this release. - -## v1.0.0-rc.2 (21 Feb 2017) - -This is the second release candidate for zap's stable release. It includes two -breaking changes. - -Breaking changes: - -* [#316][]: Zap's global loggers are now fully concurrency-safe - (previously, users had to ensure that `ReplaceGlobals` was called before the - loggers were in use). However, they must now be accessed via the `L()` and - `S()` functions. Users can update their projects with - - ``` - gofmt -r "zap.L -> zap.L()" -w . - gofmt -r "zap.S -> zap.S()" -w . - ``` -* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid - JSON and YAML struct tags on all config structs. This release fixes the tags - and adds static analysis to prevent similar bugs in the future. - -Bugfixes: - -* [#321][]: Redirecting the standard library's `log` output now - correctly reports the logger's caller. - -Enhancements: - -* [#325][] and [#333][]: Zap now transparently supports non-standard, rich - errors like those produced by `github.com/pkg/errors`. -* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is - now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> - zap.NewNop()' -w .`. -* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a - more informative error. - -Thanks to @skipor and @chapsuk for their contributions to this release. - -## v1.0.0-rc.1 (14 Feb 2017) - -This is the first release candidate for zap's stable release. There are multiple -breaking changes and improvements from the pre-release version. Most notably: - -* **Zap's import path is now "go.uber.org/zap"** — all users will - need to update their code. -* User-facing types and functions remain in the `zap` package. Code relevant - largely to extension authors is now in the `zapcore` package. -* The `zapcore.Core` type makes it easy for third-party packages to use zap's - internals but provide a different user-facing API. -* `Logger` is now a concrete type instead of an interface. -* A less verbose (though slower) logging API is included by default. -* Package-global loggers `L` and `S` are included. -* A human-friendly console encoder is included. -* A declarative config struct allows common logger configurations to be managed - as configuration instead of code. -* Sampling is more accurate, and doesn't depend on the standard library's shared - timer heap. - -## v0.1.0-beta.1 (6 Feb 2017) - -This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and -upgrade at their leisure. Since this is the first tagged release, there are no -backward compatibility concerns and all functionality is new. - -Early zap adopters should pin to the 0.1.x minor version until they're ready to -upgrade to the upcoming stable release. - -[#316]: https://github.com/uber-go/zap/pull/316 -[#309]: https://github.com/uber-go/zap/pull/309 -[#317]: https://github.com/uber-go/zap/pull/317 -[#321]: https://github.com/uber-go/zap/pull/321 -[#325]: https://github.com/uber-go/zap/pull/325 -[#333]: https://github.com/uber-go/zap/pull/333 -[#326]: https://github.com/uber-go/zap/pull/326 -[#300]: https://github.com/uber-go/zap/pull/300 -[#339]: https://github.com/uber-go/zap/pull/339 -[#307]: https://github.com/uber-go/zap/pull/307 -[#353]: https://github.com/uber-go/zap/pull/353 -[#311]: https://github.com/uber-go/zap/pull/311 -[#366]: https://github.com/uber-go/zap/pull/366 -[#364]: https://github.com/uber-go/zap/pull/364 -[#371]: https://github.com/uber-go/zap/pull/371 -[#362]: https://github.com/uber-go/zap/pull/362 -[#369]: https://github.com/uber-go/zap/pull/369 -[#347]: https://github.com/uber-go/zap/pull/347 -[#373]: https://github.com/uber-go/zap/pull/373 -[#348]: https://github.com/uber-go/zap/pull/348 -[#327]: https://github.com/uber-go/zap/pull/327 -[#376]: https://github.com/uber-go/zap/pull/376 -[#346]: https://github.com/uber-go/zap/pull/346 -[#365]: https://github.com/uber-go/zap/pull/365 -[#372]: https://github.com/uber-go/zap/pull/372 -[#385]: https://github.com/uber-go/zap/pull/385 -[#396]: https://github.com/uber-go/zap/pull/396 -[#386]: https://github.com/uber-go/zap/pull/386 -[#402]: https://github.com/uber-go/zap/pull/402 -[#415]: https://github.com/uber-go/zap/pull/415 -[#416]: https://github.com/uber-go/zap/pull/416 -[#424]: https://github.com/uber-go/zap/pull/424 -[#425]: https://github.com/uber-go/zap/pull/425 -[#431]: https://github.com/uber-go/zap/pull/431 -[#435]: https://github.com/uber-go/zap/pull/435 -[#444]: https://github.com/uber-go/zap/pull/444 -[#477]: https://github.com/uber-go/zap/pull/477 -[#465]: https://github.com/uber-go/zap/pull/465 -[#460]: https://github.com/uber-go/zap/pull/460 -[#470]: https://github.com/uber-go/zap/pull/470 -[#487]: https://github.com/uber-go/zap/pull/487 -[#490]: https://github.com/uber-go/zap/pull/490 -[#491]: https://github.com/uber-go/zap/pull/491 -[#504]: https://github.com/uber-go/zap/pull/504 -[#508]: https://github.com/uber-go/zap/pull/508 -[#518]: https://github.com/uber-go/zap/pull/518 -[#577]: https://github.com/uber-go/zap/pull/577 -[#574]: https://github.com/uber-go/zap/pull/574 -[#602]: https://github.com/uber-go/zap/pull/602 -[#572]: https://github.com/uber-go/zap/pull/572 -[#606]: https://github.com/uber-go/zap/pull/606 -[#614]: https://github.com/uber-go/zap/pull/614 diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md deleted file mode 100644 index e327d9aa5..000000000 --- a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,75 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, -body size, disability, ethnicity, gender identity and expression, level of -experience, nationality, personal appearance, race, religion, or sexual -identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an -appointed representative at an online or offline event. Representation of a -project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at oss-conduct@uber.com. The project -team will review and investigate all complaints, and will respond in a way -that it deems appropriate to the circumstances. The project team is obligated -to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.4, available at -[http://contributor-covenant.org/version/1/4][version]. - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md deleted file mode 100644 index 9454bbaf0..000000000 --- a/vendor/go.uber.org/zap/CONTRIBUTING.md +++ /dev/null @@ -1,81 +0,0 @@ -# Contributing - -We'd love your help making zap the very best structured logging library in Go! - -If you'd like to add new exported APIs, please [open an issue][open-issue] -describing your proposal — discussing API changes ahead of time makes -pull request review much smoother. In your issue, pull request, and any other -communications, please remember to treat your fellow contributors with -respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. - -Note that you'll need to sign [Uber's Contributor License Agreement][cla] -before we can accept any of your contributions. If necessary, a bot will remind -you to accept the CLA when you open your pull request. - -## Setup - -[Fork][fork], then clone the repository: - -``` -mkdir -p $GOPATH/src/go.uber.org -cd $GOPATH/src/go.uber.org -git clone git@github.com:your_github_username/zap.git -cd zap -git remote add upstream https://github.com/uber-go/zap.git -git fetch upstream -``` - -Install zap's dependencies: - -``` -make dependencies -``` - -Make sure that the tests and the linters pass: - -``` -make test -make lint -``` - -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - -## Making Changes - -Start by creating a new branch for your changes: - -``` -cd $GOPATH/src/go.uber.org/zap -git checkout master -git fetch upstream -git rebase upstream/master -git checkout -b cool_new_feature -``` - -Make your changes, then ensure that `make lint` and `make test` still pass. If -you're satisfied with your changes, push them to your fork. - -``` -git push origin cool_new_feature -``` - -Then use the GitHub UI to open a pull request. - -At this point, you're waiting on us to review your changes. We *try* to respond -to issues and pull requests within a few business days, and we may suggest some -improvements or alternatives. Once your changes are approved, one of the -project maintainers will merge them. - -We're much more likely to approve your changes if you: - -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. - -[fork]: https://github.com/uber-go/zap/fork -[open-issue]: https://github.com/uber-go/zap/issues/new -[cla]: https://cla-assistant.io/uber-go/zap -[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md deleted file mode 100644 index 4256d35c7..000000000 --- a/vendor/go.uber.org/zap/FAQ.md +++ /dev/null @@ -1,155 +0,0 @@ -# Frequently Asked Questions - -## Design - -### Why spend so much effort on logger performance? - -Of course, most applications won't notice the impact of a slow logger: they -already take tens or hundreds of milliseconds for each operation, so an extra -millisecond doesn't matter. - -On the other hand, why *not* make structured logging fast? The `SugaredLogger` -isn't any harder to use than other logging packages, and the `Logger` makes -structured logging possible in performance-sensitive contexts. Across a fleet -of Go microservices, making each application even slightly more efficient adds -up quickly. - -### Why aren't `Logger` and `SugaredLogger` interfaces? - -Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and -`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points -out][go-proverbs], "The bigger the interface, the weaker the abstraction." -Interfaces are also rigid — *any* change requires releasing a new major -version, since it breaks all third-party implementations. - -Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much -abstraction, and it lets us add methods without introducing breaking changes. -Your applications should define and depend upon an interface that includes -just the methods you use. - -### Why sample application logs? - -Applications often experience runs of errors, either because of a bug or -because of a misbehaving user. Logging errors is usually a good idea, but it -can easily make this bad situation worse: not only is your application coping -with a flood of errors, it's also spending extra CPU cycles and I/O logging -those errors. Since writes are typically serialized, logging limits throughput -when you need it most. - -Sampling fixes this problem by dropping repetitive log entries. Under normal -conditions, your application writes out every entry. When similar entries are -logged hundreds or thousands of times each second, though, zap begins dropping -duplicates to preserve throughput. - -### Why do the structured logging APIs take a message in addition to fields? - -Subjectively, we find it helpful to accompany structured context with a brief -description. This isn't critical during development, but it makes debugging -and operating unfamiliar systems much easier. - -More concretely, zap's sampling algorithm uses the message to identify -duplicate entries. In our experience, this is a practical middle ground -between random sampling (which often drops the exact entry that you need while -debugging) and hashing the complete entry (which is prohibitively expensive). - -### Why include package-global loggers? - -Since so many other logging packages include a global logger, many -applications aren't designed to accept loggers as explicit parameters. -Changing function signatures is often a breaking change, so zap includes -global loggers to simplify migration. - -Avoid them where possible. - -### Why include dedicated Panic and Fatal log levels? - -In general, application code should handle errors gracefully instead of using -`panic` or `os.Exit`. However, every rule has exceptions, and it's common to -crash when an error is truly unrecoverable. To avoid losing any information -— especially the reason for the crash — the logger must flush any -buffered entries before the process exits. - -Zap makes this easy by offering `Panic` and `Fatal` logging methods that -automatically flush before exiting. Of course, this doesn't guarantee that -logs will never be lost, but it eliminates a common error. - -See the discussion in uber-go/zap#207 for more details. - -### What's `DPanic`? - -`DPanic` stands for "panic in development." In development, it logs at -`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to -catch errors that are theoretically possible, but shouldn't actually happen, -*without* crashing in production. - -If you've ever written code like this, you need `DPanic`: - -```go -if err != nil { - panic(fmt.Sprintf("shouldn't ever get here: %v", err)) -} -``` - -## Installation - -### What does the error `expects import "go.uber.org/zap"` mean? - -Either zap was installed incorrectly or you're referencing the wrong package -name in your code. - -Zap's source code happens to be hosted on GitHub, but the [import -path][import-path] is `go.uber.org/zap`. This gives us, the project -maintainers, the freedom to move the source code if necessary. However, it -means that you need to take a little care when installing and using the -package. - -If you follow two simple rules, everything should work: install zap with `go -get -u go.uber.org/zap`, and always import it in your code with `import -"go.uber.org/zap"`. Your code shouldn't contain *any* references to -`github.com/uber-go/zap`. - -## Usage - -### Does zap support log rotation? - -Zap doesn't natively support rotating log files, since we prefer to leave this -to an external program like `logrotate`. - -However, it's easy to integrate a log rotation package like -[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. - -```go -// lumberjack.Logger is already safe for concurrent use, so we don't need to -// lock it. -w := zapcore.AddSync(&lumberjack.Logger{ - Filename: "/var/log/myapp/foo.log", - MaxSize: 500, // megabytes - MaxBackups: 3, - MaxAge: 28, // days -}) -core := zapcore.NewCore( - zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), - w, - zap.InfoLevel, -) -logger := zap.New(core) -``` - -## Extensions - -We'd love to support every logging need within zap itself, but we're only -familiar with a handful of log ingestion systems, flag-parsing packages, and -the like. Rather than merging code that we can't effectively debug and -support, we'd rather grow an ecosystem of zap extensions. - -We're aware of the following extensions, but haven't used them ourselves: - -| Package | Integration | -| --- | --- | -| `github.com/tchap/zapext` | Sentry, syslog | -| `github.com/fgrosse/zaptest` | Ginkgo | -| `github.com/blendle/zapdriver` | Stackdriver | - -[go-proverbs]: https://go-proverbs.github.io/ -[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths -[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile deleted file mode 100644 index ef7893b3b..000000000 --- a/vendor/go.uber.org/zap/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -export GO15VENDOREXPERIMENT=1 - -BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem -PKGS ?= $(shell glide novendor) -# Many Go tools take file globs or directories as arguments instead of packages. -PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer internal/bufferpool internal/exit internal/color internal/ztest - -# The linting tools evolve with each Go version, so run them only on the latest -# stable release. -GO_VERSION := $(shell go version | cut -d " " -f 3) -GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 10 -ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) -SHOULD_LINT := true -endif - - -.PHONY: all -all: lint test - -.PHONY: dependencies -dependencies: - @echo "Installing Glide and locked dependencies..." - glide --version || go get -u -f github.com/Masterminds/glide - glide install - @echo "Installing test dependencies..." - go install ./vendor/github.com/axw/gocov/gocov - go install ./vendor/github.com/mattn/goveralls -ifdef SHOULD_LINT - @echo "Installing golint..." - go install ./vendor/github.com/golang/lint/golint -else - @echo "Not installing golint, since we don't expect to lint on" $(GO_VERSION) -endif - -# Disable printf-like invocation checking due to testify.assert.Error() -VET_RULES := -printf=false - -.PHONY: lint -lint: -ifdef SHOULD_LINT - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(PKG_FILES) 2>&1 | tee lint.log - @echo "Installing test dependencies for vet..." - @go test -i $(PKGS) - @echo "Checking vet..." - @$(foreach dir,$(PKG_FILES),go tool vet $(VET_RULES) $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking lint..." - @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log - @echo "Checking for license headers..." - @./check_license.sh | tee -a lint.log - @[ ! -s lint.log ] -else - @echo "Skipping linters on" $(GO_VERSION) -endif - -.PHONY: test -test: - go test -race $(PKGS) - -.PHONY: cover -cover: - ./scripts/cover.sh $(PKGS) - -.PHONY: bench -BENCH ?= . -bench: - @$(foreach pkg,$(PKGS),go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) $(pkg);) - -.PHONY: updatereadme -updatereadme: - rm -f README.md - cat .readme.tmpl | go run internal/readme/readme.go > README.md diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md deleted file mode 100644 index f4fd1cb44..000000000 --- a/vendor/go.uber.org/zap/README.md +++ /dev/null @@ -1,136 +0,0 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Blazing fast, structured, leveled logging in Go. - -## Installation - -`go get -u go.uber.org/zap` - -Note that zap only supports the two most recent minor versions of Go. - -## Quick Start - -In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than other structured logging -packages and includes both structured and `printf`-style APIs. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() // flushes buffer, if any -sugar := logger.Sugar() -sugar.Infow("failed to fetch URL", - // Structured context as loosely typed key-value pairs. - "url", url, - "attempt", 3, - "backoff", time.Second, -) -sugar.Infof("Failed to fetch URL: %s", url) -``` - -When performance and type safety are critical, use the `Logger`. It's even -faster than the `SugaredLogger` and allocates far less, but it only supports -structured logging. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() -logger.Info("failed to fetch URL", - // Structured context as strongly typed Field values. - zap.String("url", url), - zap.Int("attempt", 3), - zap.Duration("backoff", time.Second), -) -``` - -See the [documentation][doc] and [FAQ](FAQ.md) for more details. - -## Performance - -For applications that log in the hot path, reflection-based serialization and -string formatting are prohibitively expensive — they're CPU-intensive -and make many small allocations. Put differently, using `encoding/json` and -`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. - -Zap takes a different approach. It includes a reflection-free, zero-allocation -JSON encoder, and the base `Logger` strives to avoid serialization overhead -and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users *choose* when they need to count every -allocation and when they'd prefer a more familiar, loosely typed API. - -As measured by its own [benchmarking suite][], not only is zap more performant -than comparable structured logging packages — it's also faster than the -standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) - -Log a message and 10 fields: - -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 3131 ns/op | 5 allocs/op | -| :zap: zap (sugared) | 4173 ns/op | 21 allocs/op | -| zerolog | 16154 ns/op | 90 allocs/op | -| lion | 16341 ns/op | 111 allocs/op | -| go-kit | 17049 ns/op | 126 allocs/op | -| logrus | 23662 ns/op | 142 allocs/op | -| log15 | 36351 ns/op | 149 allocs/op | -| apex/log | 42530 ns/op | 126 allocs/op | - -Log a message with a logger that already has 10 fields of context: - -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 380 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 564 ns/op | 2 allocs/op | -| zerolog | 321 ns/op | 0 allocs/op | -| lion | 7092 ns/op | 39 allocs/op | -| go-kit | 20226 ns/op | 115 allocs/op | -| logrus | 22312 ns/op | 130 allocs/op | -| log15 | 28788 ns/op | 79 allocs/op | -| apex/log | 42063 ns/op | 115 allocs/op | - -Log a static string, without any context or `printf`-style templating: - -| Package | Time | Objects Allocated | -| :--- | :---: | :---: | -| :zap: zap | 361 ns/op | 0 allocs/op | -| :zap: zap (sugared) | 534 ns/op | 2 allocs/op | -| zerolog | 323 ns/op | 0 allocs/op | -| standard library | 575 ns/op | 2 allocs/op | -| go-kit | 922 ns/op | 13 allocs/op | -| lion | 1413 ns/op | 10 allocs/op | -| logrus | 2291 ns/op | 27 allocs/op | -| apex/log | 3690 ns/op | 11 allocs/op | -| log15 | 5954 ns/op | 26 allocs/op | - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -zap to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on -issues and pull requests, but you can also report any negative conduct to -oss-conduct@uber.com. That email list is a private, safe space; even the zap -maintainers don't have access, so don't hesitate to hold us to a high -standard. - -
- -Released under the [MIT License](LICENSE.txt). - -1 In particular, keep in mind that we may be -benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) - -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.org/uber-go/zap -[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/zap -[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/vendor/go.uber.org/zap/check_license.sh b/vendor/go.uber.org/zap/check_license.sh deleted file mode 100755 index 345ac8b89..000000000 --- a/vendor/go.uber.org/zap/check_license.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -e - -ERROR_COUNT=0 -while read -r file -do - case "$(head -1 "${file}")" in - *"Copyright (c) "*" Uber Technologies, Inc.") - # everything's cool - ;; - *) - echo "$file is missing license header." - (( ERROR_COUNT++ )) - ;; - esac -done < <(git ls-files "*\.go") - -exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/glide.lock b/vendor/go.uber.org/zap/glide.lock deleted file mode 100644 index 881b462c0..000000000 --- a/vendor/go.uber.org/zap/glide.lock +++ /dev/null @@ -1,76 +0,0 @@ -hash: f073ba522c06c88ea3075bde32a8aaf0969a840a66cab6318a0897d141ffee92 -updated: 2017-07-22T18:06:49.598185334-07:00 -imports: -- name: go.uber.org/atomic - version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf -- name: go.uber.org/multierr - version: 3c4937480c32f4c13a875a1829af76c98ca3d40a -testImports: -- name: github.com/apex/log - version: d9b960447bfa720077b2da653cc79e533455b499 - subpackages: - - handlers/json -- name: github.com/axw/gocov - version: 3a69a0d2a4ef1f263e2d92b041a69593d6964fe8 - subpackages: - - gocov -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew -- name: github.com/fatih/color - version: 62e9147c64a1ed519147b62a56a14e83e2be02c1 -- name: github.com/go-kit/kit - version: e10f5bf035be9af21fd5b2fb4469d5716c6ab07d - subpackages: - - log -- name: github.com/go-logfmt/logfmt - version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 -- name: github.com/go-stack/stack - version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b -- name: github.com/golang/lint - version: c5fb716d6688a859aae56d26d3e6070808df29f7 - subpackages: - - golint -- name: github.com/kr/logfmt - version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 -- name: github.com/mattn/go-colorable - version: 3fa8c76f9daed4067e4a806fb7e4dc86455c6d6a -- name: github.com/mattn/go-isatty - version: fc9e8d8ef48496124e79ae0df75490096eccf6fe -- name: github.com/mattn/goveralls - version: 6efce81852ad1b7567c17ad71b03aeccc9dd9ae0 -- name: github.com/pborman/uuid - version: e790cca94e6cc75c7064b1332e63811d4aae1a53 -- name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/rs/zerolog - version: eed4c2b94d945e0b2456ad6aa518a443986b5f22 -- name: github.com/satori/go.uuid - version: 5bf94b69c6b68ee1b541973bb8e1144db23a194b -- name: github.com/sirupsen/logrus - version: 7dd06bf38e1e13df288d471a57d5adbac106be9e -- name: github.com/stretchr/testify - version: f6abca593680b2315d2075e0f5e2a9751e3f431a - subpackages: - - assert - - require -- name: go.pedge.io/lion - version: 87958e8713f1fa138d993087133b97e976642159 -- name: golang.org/x/sys - version: c4489faa6e5ab84c0ef40d6ee878f7a030281f0f - subpackages: - - unix -- name: golang.org/x/tools - version: 496819729719f9d07692195e0a94d6edd2251389 - subpackages: - - cover -- name: gopkg.in/inconshreveable/log15.v2 - version: b105bd37f74e5d9dc7b6ad7806715c7a2b83fd3f - subpackages: - - stack - - term diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml deleted file mode 100644 index 94412594c..000000000 --- a/vendor/go.uber.org/zap/glide.yaml +++ /dev/null @@ -1,35 +0,0 @@ -package: go.uber.org/zap -license: MIT -import: -- package: go.uber.org/atomic - version: ^1 -- package: go.uber.org/multierr - version: ^1 -testImport: -- package: github.com/satori/go.uuid -- package: github.com/sirupsen/logrus -- package: github.com/apex/log - subpackages: - - handlers/json -- package: github.com/go-kit/kit - subpackages: - - log -- package: github.com/stretchr/testify - subpackages: - - assert - - require -- package: gopkg.in/inconshreveable/log15.v2 -- package: github.com/mattn/goveralls -- package: github.com/pborman/uuid -- package: github.com/pkg/errors -- package: go.pedge.io/lion -- package: github.com/rs/zerolog -- package: golang.org/x/tools - subpackages: - - cover -- package: github.com/golang/lint - subpackages: - - golint -- package: github.com/axw/gocov - subpackages: - - gocov diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 000000000..cd0a8ac15 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go new file mode 100644 index 000000000..5d052781b --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/gen.go @@ -0,0 +1,712 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go +//go:generate go run gen.go -test + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "math/rand" + "os" + "sort" + "strings" +) + +// identifier converts s to a Go exported identifier. +// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". +func identifier(s string) string { + b := make([]byte, 0, len(s)) + cap := true + for _, c := range s { + if c == '-' { + cap = true + continue + } + if cap && 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + cap = false + b = append(b, byte(c)) + } + return string(b) +} + +var test = flag.Bool("test", false, "generate table_test.go") + +func genFile(name string, buf *bytes.Buffer) { + b, err := format.Source(buf.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile(name, b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main() { + flag.Parse() + + var all []string + all = append(all, elements...) + all = append(all, attributes...) + all = append(all, eventHandlers...) + all = append(all, extra...) + sort.Strings(all) + + // uniq - lists have dups + w := 0 + for _, s := range all { + if w == 0 || all[w-1] != s { + all[w] = s + w++ + } + } + all = all[:w] + + if *test { + var buf bytes.Buffer + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") + fmt.Fprintln(&buf, "package atom\n") + fmt.Fprintln(&buf, "var testAtomList = []string{") + for _, s := range all { + fmt.Fprintf(&buf, "\t%q,\n", s) + } + fmt.Fprintln(&buf, "}") + + genFile("table_test.go", &buf) + return + } + + // Find hash that minimizes table size. + var best *table + for i := 0; i < 1000000; i++ { + if best != nil && 1<<(best.k-1) < len(all) { + break + } + h := rand.Uint32() + for k := uint(0); k <= 16; k++ { + if best != nil && k >= best.k { + break + } + var t table + if t.init(h, k, all) { + best = &t + break + } + } + } + if best == nil { + fmt.Fprintf(os.Stderr, "failed to construct string table\n") + os.Exit(1) + } + + // Lay out strings, using overlaps when possible. + layout := append([]string{}, all...) + + // Remove strings that are substrings of other strings + for changed := true; changed; { + changed = false + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i != j && t != "" && strings.Contains(s, t) { + changed = true + layout[j] = "" + } + } + } + } + + // Join strings where one suffix matches another prefix. + for { + // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], + // maximizing overlap length k. + besti := -1 + bestj := -1 + bestk := 0 + for i, s := range layout { + if s == "" { + continue + } + for j, t := range layout { + if i == j { + continue + } + for k := bestk + 1; k <= len(s) && k <= len(t); k++ { + if s[len(s)-k:] == t[:k] { + besti = i + bestj = j + bestk = k + } + } + } + } + if bestk > 0 { + layout[besti] += layout[bestj][bestk:] + layout[bestj] = "" + continue + } + break + } + + text := strings.Join(layout, "") + + atom := map[string]uint32{} + for _, s := range all { + off := strings.Index(text, s) + if off < 0 { + panic("lost string " + s) + } + atom[s] = uint32(off<<8 | len(s)) + } + + var buf bytes.Buffer + // Generate the Go code. + fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") + fmt.Fprintln(&buf, "//go:generate go run gen.go\n") + fmt.Fprintln(&buf, "package atom\n\nconst (") + + // compute max len + maxLen := 0 + for _, s := range all { + if maxLen < len(s) { + maxLen = len(s) + } + fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) + } + fmt.Fprintln(&buf, ")\n") + + fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) + fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) + + fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) + for i, s := range best.tab { + if s == "" { + continue + } + fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) + } + fmt.Fprintf(&buf, "}\n") + datasize := (1 << best.k) * 4 + + fmt.Fprintln(&buf, "const atomText =") + textsize := len(text) + for len(text) > 60 { + fmt.Fprintf(&buf, "\t%q +\n", text[:60]) + text = text[60:] + } + fmt.Fprintf(&buf, "\t%q\n\n", text) + + genFile("table.go", &buf) + + fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) +} + +type byLen []string + +func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } +func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byLen) Len() int { return len(x) } + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s string) uint32 { + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// A table represents an attempt at constructing the lookup table. +// The lookup table uses cuckoo hashing, meaning that each string +// can be found in one of two positions. +type table struct { + h0 uint32 + k uint + mask uint32 + tab []string +} + +// hash returns the two hashes for s. +func (t *table) hash(s string) (h1, h2 uint32) { + h := fnv(t.h0, s) + h1 = h & t.mask + h2 = (h >> 16) & t.mask + return +} + +// init initializes the table with the given parameters. +// h0 is the initial hash value, +// k is the number of bits of hash value to use, and +// x is the list of strings to store in the table. +// init returns false if the table cannot be constructed. +func (t *table) init(h0 uint32, k uint, x []string) bool { + t.h0 = h0 + t.k = k + t.tab = make([]string, 1< len(t.tab) { + return false + } + s := t.tab[i] + h1, h2 := t.hash(s) + j := h1 + h2 - i + if t.tab[j] != "" && !t.push(j, depth+1) { + return false + } + t.tab[j] = s + return true +} + +// The lists of element names and attribute keys were taken from +// https://html.spec.whatwg.org/multipage/indices.html#index +// as of the "HTML Living Standard - Last Updated 16 April 2018" version. + +// "command", "keygen" and "menuitem" have been removed from the spec, +// but are kept here for backwards compatibility. +var elements = []string{ + "a", + "abbr", + "address", + "area", + "article", + "aside", + "audio", + "b", + "base", + "bdi", + "bdo", + "blockquote", + "body", + "br", + "button", + "canvas", + "caption", + "cite", + "code", + "col", + "colgroup", + "command", + "data", + "datalist", + "dd", + "del", + "details", + "dfn", + "dialog", + "div", + "dl", + "dt", + "em", + "embed", + "fieldset", + "figcaption", + "figure", + "footer", + "form", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "hgroup", + "hr", + "html", + "i", + "iframe", + "img", + "input", + "ins", + "kbd", + "keygen", + "label", + "legend", + "li", + "link", + "main", + "map", + "mark", + "menu", + "menuitem", + "meta", + "meter", + "nav", + "noscript", + "object", + "ol", + "optgroup", + "option", + "output", + "p", + "param", + "picture", + "pre", + "progress", + "q", + "rp", + "rt", + "ruby", + "s", + "samp", + "script", + "section", + "select", + "slot", + "small", + "source", + "span", + "strong", + "style", + "sub", + "summary", + "sup", + "table", + "tbody", + "td", + "template", + "textarea", + "tfoot", + "th", + "thead", + "time", + "title", + "tr", + "track", + "u", + "ul", + "var", + "video", + "wbr", +} + +// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 +// +// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", +// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, +// but are kept here for backwards compatibility. +var attributes = []string{ + "abbr", + "accept", + "accept-charset", + "accesskey", + "action", + "allowfullscreen", + "allowpaymentrequest", + "allowusermedia", + "alt", + "as", + "async", + "autocomplete", + "autofocus", + "autoplay", + "challenge", + "charset", + "checked", + "cite", + "class", + "color", + "cols", + "colspan", + "command", + "content", + "contenteditable", + "contextmenu", + "controls", + "coords", + "crossorigin", + "data", + "datetime", + "default", + "defer", + "dir", + "dirname", + "disabled", + "download", + "draggable", + "dropzone", + "enctype", + "for", + "form", + "formaction", + "formenctype", + "formmethod", + "formnovalidate", + "formtarget", + "headers", + "height", + "hidden", + "high", + "href", + "hreflang", + "http-equiv", + "icon", + "id", + "inputmode", + "integrity", + "is", + "ismap", + "itemid", + "itemprop", + "itemref", + "itemscope", + "itemtype", + "keytype", + "kind", + "label", + "lang", + "list", + "loop", + "low", + "manifest", + "max", + "maxlength", + "media", + "mediagroup", + "method", + "min", + "minlength", + "multiple", + "muted", + "name", + "nomodule", + "nonce", + "novalidate", + "open", + "optimum", + "pattern", + "ping", + "placeholder", + "playsinline", + "poster", + "preload", + "radiogroup", + "readonly", + "referrerpolicy", + "rel", + "required", + "reversed", + "rows", + "rowspan", + "sandbox", + "spellcheck", + "scope", + "scoped", + "seamless", + "selected", + "shape", + "size", + "sizes", + "sortable", + "sorted", + "slot", + "span", + "spellcheck", + "src", + "srcdoc", + "srclang", + "srcset", + "start", + "step", + "style", + "tabindex", + "target", + "title", + "translate", + "type", + "typemustmatch", + "updateviacache", + "usemap", + "value", + "width", + "workertype", + "wrap", +} + +// "onautocomplete", "onautocompleteerror", "onmousewheel", +// "onshow" and "onsort" have been removed from the spec, +// but are kept here for backwards compatibility. +var eventHandlers = []string{ + "onabort", + "onautocomplete", + "onautocompleteerror", + "onauxclick", + "onafterprint", + "onbeforeprint", + "onbeforeunload", + "onblur", + "oncancel", + "oncanplay", + "oncanplaythrough", + "onchange", + "onclick", + "onclose", + "oncontextmenu", + "oncopy", + "oncuechange", + "oncut", + "ondblclick", + "ondrag", + "ondragend", + "ondragenter", + "ondragexit", + "ondragleave", + "ondragover", + "ondragstart", + "ondrop", + "ondurationchange", + "onemptied", + "onended", + "onerror", + "onfocus", + "onhashchange", + "oninput", + "oninvalid", + "onkeydown", + "onkeypress", + "onkeyup", + "onlanguagechange", + "onload", + "onloadeddata", + "onloadedmetadata", + "onloadend", + "onloadstart", + "onmessage", + "onmessageerror", + "onmousedown", + "onmouseenter", + "onmouseleave", + "onmousemove", + "onmouseout", + "onmouseover", + "onmouseup", + "onmousewheel", + "onwheel", + "onoffline", + "ononline", + "onpagehide", + "onpageshow", + "onpaste", + "onpause", + "onplay", + "onplaying", + "onpopstate", + "onprogress", + "onratechange", + "onreset", + "onresize", + "onrejectionhandled", + "onscroll", + "onsecuritypolicyviolation", + "onseeked", + "onseeking", + "onselect", + "onshow", + "onsort", + "onstalled", + "onstorage", + "onsubmit", + "onsuspend", + "ontimeupdate", + "ontoggle", + "onunhandledrejection", + "onunload", + "onvolumechange", + "onwaiting", +} + +// extra are ad-hoc values not covered by any of the lists above. +var extra = []string{ + "acronym", + "align", + "annotation", + "annotation-xml", + "applet", + "basefont", + "bgsound", + "big", + "blink", + "center", + "color", + "desc", + "face", + "font", + "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. + "foreignobject", + "frame", + "frameset", + "image", + "isindex", + "listing", + "malignmark", + "marquee", + "math", + "mglyph", + "mi", + "mn", + "mo", + "ms", + "mtext", + "nobr", + "noembed", + "noframes", + "plaintext", + "prompt", + "public", + "rb", + "rtc", + "spacer", + "strike", + "svg", + "system", + "tt", + "xmp", +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 000000000..2a938864c --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,783 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Acronym Atom = 0xaa07 + Action Atom = 0x27206 + Address Atom = 0x6f307 + Align Atom = 0xb105 + Allowfullscreen Atom = 0x2080f + Allowpaymentrequest Atom = 0xc113 + Allowusermedia Atom = 0xdd0e + Alt Atom = 0xf303 + Annotation Atom = 0x1c90a + AnnotationXml Atom = 0x1c90e + Applet Atom = 0x31906 + Area Atom = 0x35604 + Article Atom = 0x3fc07 + As Atom = 0x3c02 + Aside Atom = 0x10705 + Async Atom = 0xff05 + Audio Atom = 0x11505 + Autocomplete Atom = 0x2780c + Autofocus Atom = 0x12109 + Autoplay Atom = 0x13c08 + B Atom = 0x101 + Base Atom = 0x3b04 + Basefont Atom = 0x3b08 + Bdi Atom = 0xba03 + Bdo Atom = 0x14b03 + Bgsound Atom = 0x15e07 + Big Atom = 0x17003 + Blink Atom = 0x17305 + Blockquote Atom = 0x1870a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x19106 + Canvas Atom = 0x10306 + Caption Atom = 0x23107 + Center Atom = 0x22006 + Challenge Atom = 0x29b09 + Charset Atom = 0x2107 + Checked Atom = 0x47907 + Cite Atom = 0x19c04 + Class Atom = 0x56405 + Code Atom = 0x5c504 + Col Atom = 0x1ab03 + Colgroup Atom = 0x1ab08 + Color Atom = 0x1bf05 + Cols Atom = 0x1c404 + Colspan Atom = 0x1c407 + Command Atom = 0x1d707 + Content Atom = 0x58b07 + Contenteditable Atom = 0x58b0f + Contextmenu Atom = 0x3800b + Controls Atom = 0x1de08 + Coords Atom = 0x1ea06 + Crossorigin Atom = 0x1fb0b + Data Atom = 0x4a504 + Datalist Atom = 0x4a508 + Datetime Atom = 0x2b808 + Dd Atom = 0x2d702 + Default Atom = 0x10a07 + Defer Atom = 0x5c705 + Del Atom = 0x45203 + Desc Atom = 0x56104 + Details Atom = 0x7207 + Dfn Atom = 0x8703 + Dialog Atom = 0xbb06 + Dir Atom = 0x9303 + Dirname Atom = 0x9307 + Disabled Atom = 0x16408 + Div Atom = 0x16b03 + Dl Atom = 0x5e602 + Download Atom = 0x46308 + Draggable Atom = 0x17a09 + Dropzone Atom = 0x40508 + Dt Atom = 0x64b02 + Em Atom = 0x6e02 + Embed Atom = 0x6e05 + Enctype Atom = 0x28d07 + Face Atom = 0x21e04 + Fieldset Atom = 0x22608 + Figcaption Atom = 0x22e0a + Figure Atom = 0x24806 + Font Atom = 0x3f04 + Footer Atom = 0xf606 + For Atom = 0x25403 + ForeignObject Atom = 0x2540d + Foreignobject Atom = 0x2610d + Form Atom = 0x26e04 + Formaction Atom = 0x26e0a + Formenctype Atom = 0x2890b + Formmethod Atom = 0x2a40a + Formnovalidate Atom = 0x2ae0e + Formtarget Atom = 0x2c00a + Frame Atom = 0x8b05 + Frameset Atom = 0x8b08 + H1 Atom = 0x15c02 + H2 Atom = 0x2de02 + H3 Atom = 0x30d02 + H4 Atom = 0x34502 + H5 Atom = 0x34f02 + H6 Atom = 0x64d02 + Head Atom = 0x33104 + Header Atom = 0x33106 + Headers Atom = 0x33107 + Height Atom = 0x5206 + Hgroup Atom = 0x2ca06 + Hidden Atom = 0x2d506 + High Atom = 0x2db04 + Hr Atom = 0x15702 + Href Atom = 0x2e004 + Hreflang Atom = 0x2e008 + Html Atom = 0x5604 + HttpEquiv Atom = 0x2e80a + I Atom = 0x601 + Icon Atom = 0x58a04 + Id Atom = 0x10902 + Iframe Atom = 0x2fc06 + Image Atom = 0x30205 + Img Atom = 0x30703 + Input Atom = 0x44b05 + Inputmode Atom = 0x44b09 + Ins Atom = 0x20403 + Integrity Atom = 0x23f09 + Is Atom = 0x16502 + Isindex Atom = 0x30f07 + Ismap Atom = 0x31605 + Itemid Atom = 0x38b06 + Itemprop Atom = 0x19d08 + Itemref Atom = 0x3cd07 + Itemscope Atom = 0x67109 + Itemtype Atom = 0x31f08 + Kbd Atom = 0xb903 + Keygen Atom = 0x3206 + Keytype Atom = 0xd607 + Kind Atom = 0x17704 + Label Atom = 0x5905 + Lang Atom = 0x2e404 + Legend Atom = 0x18106 + Li Atom = 0xb202 + Link Atom = 0x17404 + List Atom = 0x4a904 + Listing Atom = 0x4a907 + Loop Atom = 0x5d04 + Low Atom = 0xc303 + Main Atom = 0x1004 + Malignmark Atom = 0xb00a + Manifest Atom = 0x6d708 + Map Atom = 0x31803 + Mark Atom = 0xb604 + Marquee Atom = 0x32707 + Math Atom = 0x32e04 + Max Atom = 0x33d03 + Maxlength Atom = 0x33d09 + Media Atom = 0xe605 + Mediagroup Atom = 0xe60a + Menu Atom = 0x38704 + Menuitem Atom = 0x38708 + Meta Atom = 0x4b804 + Meter Atom = 0x9805 + Method Atom = 0x2a806 + Mglyph Atom = 0x30806 + Mi Atom = 0x34702 + Min Atom = 0x34703 + Minlength Atom = 0x34709 + Mn Atom = 0x2b102 + Mo Atom = 0xa402 + Ms Atom = 0x67402 + Mtext Atom = 0x35105 + Multiple Atom = 0x35f08 + Muted Atom = 0x36705 + Name Atom = 0x9604 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x6c07 + Noframes Atom = 0x8908 + Nomodule Atom = 0xa208 + Nonce Atom = 0x1a605 + Noscript Atom = 0x21608 + Novalidate Atom = 0x2b20a + Object Atom = 0x26806 + Ol Atom = 0x13702 + Onabort Atom = 0x19507 + Onafterprint Atom = 0x2360c + Onautocomplete Atom = 0x2760e + Onautocompleteerror Atom = 0x27613 + Onauxclick Atom = 0x61f0a + Onbeforeprint Atom = 0x69e0d + Onbeforeunload Atom = 0x6e70e + Onblur Atom = 0x56d06 + Oncancel Atom = 0x11908 + Oncanplay Atom = 0x14d09 + Oncanplaythrough Atom = 0x14d10 + Onchange Atom = 0x41b08 + Onclick Atom = 0x2f507 + Onclose Atom = 0x36c07 + Oncontextmenu Atom = 0x37e0d + Oncopy Atom = 0x39106 + Oncuechange Atom = 0x3970b + Oncut Atom = 0x3a205 + Ondblclick Atom = 0x3a70a + Ondrag Atom = 0x3b106 + Ondragend Atom = 0x3b109 + Ondragenter Atom = 0x3ba0b + Ondragexit Atom = 0x3c50a + Ondragleave Atom = 0x3df0b + Ondragover Atom = 0x3ea0a + Ondragstart Atom = 0x3f40b + Ondrop Atom = 0x40306 + Ondurationchange Atom = 0x41310 + Onemptied Atom = 0x40a09 + Onended Atom = 0x42307 + Onerror Atom = 0x42a07 + Onfocus Atom = 0x43107 + Onhashchange Atom = 0x43d0c + Oninput Atom = 0x44907 + Oninvalid Atom = 0x45509 + Onkeydown Atom = 0x45e09 + Onkeypress Atom = 0x46b0a + Onkeyup Atom = 0x48007 + Onlanguagechange Atom = 0x48d10 + Onload Atom = 0x49d06 + Onloadeddata Atom = 0x49d0c + Onloadedmetadata Atom = 0x4b010 + Onloadend Atom = 0x4c609 + Onloadstart Atom = 0x4cf0b + Onmessage Atom = 0x4da09 + Onmessageerror Atom = 0x4da0e + Onmousedown Atom = 0x4e80b + Onmouseenter Atom = 0x4f30c + Onmouseleave Atom = 0x4ff0c + Onmousemove Atom = 0x50b0b + Onmouseout Atom = 0x5160a + Onmouseover Atom = 0x5230b + Onmouseup Atom = 0x52e09 + Onmousewheel Atom = 0x53c0c + Onoffline Atom = 0x54809 + Ononline Atom = 0x55108 + Onpagehide Atom = 0x5590a + Onpageshow Atom = 0x5730a + Onpaste Atom = 0x57f07 + Onpause Atom = 0x59a07 + Onplay Atom = 0x5a406 + Onplaying Atom = 0x5a409 + Onpopstate Atom = 0x5ad0a + Onprogress Atom = 0x5b70a + Onratechange Atom = 0x5cc0c + Onrejectionhandled Atom = 0x5d812 + Onreset Atom = 0x5ea07 + Onresize Atom = 0x5f108 + Onscroll Atom = 0x60008 + Onsecuritypolicyviolation Atom = 0x60819 + Onseeked Atom = 0x62908 + Onseeking Atom = 0x63109 + Onselect Atom = 0x63a08 + Onshow Atom = 0x64406 + Onsort Atom = 0x64f06 + Onstalled Atom = 0x65909 + Onstorage Atom = 0x66209 + Onsubmit Atom = 0x66b08 + Onsuspend Atom = 0x67b09 + Ontimeupdate Atom = 0x400c + Ontoggle Atom = 0x68408 + Onunhandledrejection Atom = 0x68c14 + Onunload Atom = 0x6ab08 + Onvolumechange Atom = 0x6b30e + Onwaiting Atom = 0x6c109 + Onwheel Atom = 0x6ca07 + Open Atom = 0x1a304 + Optgroup Atom = 0x5f08 + Optimum Atom = 0x6d107 + Option Atom = 0x6e306 + Output Atom = 0x51d06 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x6607 + Picture Atom = 0x7b07 + Ping Atom = 0xef04 + Placeholder Atom = 0x1310b + Plaintext Atom = 0x1b209 + Playsinline Atom = 0x1400b + Poster Atom = 0x2cf06 + Pre Atom = 0x47003 + Preload Atom = 0x48607 + Progress Atom = 0x5b908 + Prompt Atom = 0x53606 + Public Atom = 0x58606 + Q Atom = 0xcf01 + Radiogroup Atom = 0x30a + Rb Atom = 0x3a02 + Readonly Atom = 0x35708 + Referrerpolicy Atom = 0x3d10e + Rel Atom = 0x48703 + Required Atom = 0x24c08 + Reversed Atom = 0x8008 + Rows Atom = 0x9c04 + Rowspan Atom = 0x9c07 + Rp Atom = 0x23c02 + Rt Atom = 0x19a02 + Rtc Atom = 0x19a03 + Ruby Atom = 0xfb04 + S Atom = 0x2501 + Samp Atom = 0x7804 + Sandbox Atom = 0x12907 + Scope Atom = 0x67505 + Scoped Atom = 0x67506 + Script Atom = 0x21806 + Seamless Atom = 0x37108 + Section Atom = 0x56807 + Select Atom = 0x63c06 + Selected Atom = 0x63c08 + Shape Atom = 0x1e505 + Size Atom = 0x5f504 + Sizes Atom = 0x5f505 + Slot Atom = 0x1ef04 + Small Atom = 0x20605 + Sortable Atom = 0x65108 + Sorted Atom = 0x33706 + Source Atom = 0x37806 + Spacer Atom = 0x43706 + Span Atom = 0x9f04 + Spellcheck Atom = 0x4740a + Src Atom = 0x5c003 + Srcdoc Atom = 0x5c006 + Srclang Atom = 0x5f907 + Srcset Atom = 0x6f906 + Start Atom = 0x3fa05 + Step Atom = 0x58304 + Strike Atom = 0xd206 + Strong Atom = 0x6dd06 + Style Atom = 0x6ff05 + Sub Atom = 0x66d03 + Summary Atom = 0x70407 + Sup Atom = 0x70b03 + Svg Atom = 0x70e03 + System Atom = 0x71106 + Tabindex Atom = 0x4be08 + Table Atom = 0x59505 + Target Atom = 0x2c406 + Tbody Atom = 0x2705 + Td Atom = 0x9202 + Template Atom = 0x71408 + Textarea Atom = 0x35208 + Tfoot Atom = 0xf505 + Th Atom = 0x15602 + Thead Atom = 0x33005 + Time Atom = 0x4204 + Title Atom = 0x11005 + Tr Atom = 0xcc02 + Track Atom = 0x1ba05 + Translate Atom = 0x1f209 + Tt Atom = 0x6802 + Type Atom = 0xd904 + Typemustmatch Atom = 0x2900d + U Atom = 0xb01 + Ul Atom = 0xa702 + Updateviacache Atom = 0x460e + Usemap Atom = 0x59e06 + Value Atom = 0x1505 + Var Atom = 0x16d03 + Video Atom = 0x2f105 + Wbr Atom = 0x57c03 + Width Atom = 0x64905 + Workertype Atom = 0x71c0a + Wrap Atom = 0x72604 + Xmp Atom = 0x12f03 +) + +const hash0 = 0x81cdf10e + +const maxAtomLen = 25 + +var table = [1 << 9]Atom{ + 0x1: 0xe60a, // mediagroup + 0x2: 0x2e404, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x8b08, // frameset + 0x7: 0x63a08, // onselect + 0x8: 0x71106, // system + 0xa: 0x64905, // width + 0xc: 0x2890b, // formenctype + 0xd: 0x13702, // ol + 0xe: 0x3970b, // oncuechange + 0x10: 0x14b03, // bdo + 0x11: 0x11505, // audio + 0x12: 0x17a09, // draggable + 0x14: 0x2f105, // video + 0x15: 0x2b102, // mn + 0x16: 0x38704, // menu + 0x17: 0x2cf06, // poster + 0x19: 0xf606, // footer + 0x1a: 0x2a806, // method + 0x1b: 0x2b808, // datetime + 0x1c: 0x19507, // onabort + 0x1d: 0x460e, // updateviacache + 0x1e: 0xff05, // async + 0x1f: 0x49d06, // onload + 0x21: 0x11908, // oncancel + 0x22: 0x62908, // onseeked + 0x23: 0x30205, // image + 0x24: 0x5d812, // onrejectionhandled + 0x26: 0x17404, // link + 0x27: 0x51d06, // output + 0x28: 0x33104, // head + 0x29: 0x4ff0c, // onmouseleave + 0x2a: 0x57f07, // onpaste + 0x2b: 0x5a409, // onplaying + 0x2c: 0x1c407, // colspan + 0x2f: 0x1bf05, // color + 0x30: 0x5f504, // size + 0x31: 0x2e80a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x5590a, // onpagehide + 0x35: 0x68c14, // onunhandledrejection + 0x37: 0x42a07, // onerror + 0x3a: 0x3b08, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x17704, // kind + 0x41: 0x35708, // readonly + 0x42: 0x30806, // mglyph + 0x44: 0xb202, // li + 0x46: 0x2d506, // hidden + 0x47: 0x70e03, // svg + 0x48: 0x58304, // step + 0x49: 0x23f09, // integrity + 0x4a: 0x58606, // public + 0x4c: 0x1ab03, // col + 0x4d: 0x1870a, // blockquote + 0x4e: 0x34f02, // h5 + 0x50: 0x5b908, // progress + 0x51: 0x5f505, // sizes + 0x52: 0x34502, // h4 + 0x56: 0x33005, // thead + 0x57: 0xd607, // keytype + 0x58: 0x5b70a, // onprogress + 0x59: 0x44b09, // inputmode + 0x5a: 0x3b109, // ondragend + 0x5d: 0x3a205, // oncut + 0x5e: 0x43706, // spacer + 0x5f: 0x1ab08, // colgroup + 0x62: 0x16502, // is + 0x65: 0x3c02, // as + 0x66: 0x54809, // onoffline + 0x67: 0x33706, // sorted + 0x69: 0x48d10, // onlanguagechange + 0x6c: 0x43d0c, // onhashchange + 0x6d: 0x9604, // name + 0x6e: 0xf505, // tfoot + 0x6f: 0x56104, // desc + 0x70: 0x33d03, // max + 0x72: 0x1ea06, // coords + 0x73: 0x30d02, // h3 + 0x74: 0x6e70e, // onbeforeunload + 0x75: 0x9c04, // rows + 0x76: 0x63c06, // select + 0x77: 0x9805, // meter + 0x78: 0x38b06, // itemid + 0x79: 0x53c0c, // onmousewheel + 0x7a: 0x5c006, // srcdoc + 0x7d: 0x1ba05, // track + 0x7f: 0x31f08, // itemtype + 0x82: 0xa402, // mo + 0x83: 0x41b08, // onchange + 0x84: 0x33107, // headers + 0x85: 0x5cc0c, // onratechange + 0x86: 0x60819, // onsecuritypolicyviolation + 0x88: 0x4a508, // datalist + 0x89: 0x4e80b, // onmousedown + 0x8a: 0x1ef04, // slot + 0x8b: 0x4b010, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x26806, // object + 0x91: 0x6b30e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x27613, // onautocompleteerror + 0x94: 0xc113, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0x10a07, // default + 0x97: 0x63c08, // selected + 0x98: 0x21e04, // face + 0x99: 0x1e505, // shape + 0x9b: 0x68408, // ontoggle + 0x9e: 0x64b02, // dt + 0x9f: 0xb604, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6ab08, // onunload + 0xa5: 0x5d04, // loop + 0xa6: 0x16408, // disabled + 0xaa: 0x42307, // onended + 0xab: 0xb00a, // malignmark + 0xad: 0x67b09, // onsuspend + 0xae: 0x35105, // mtext + 0xaf: 0x64f06, // onsort + 0xb0: 0x19d08, // itemprop + 0xb3: 0x67109, // itemscope + 0xb4: 0x17305, // blink + 0xb6: 0x3b106, // ondrag + 0xb7: 0xa702, // ul + 0xb8: 0x26e04, // form + 0xb9: 0x12907, // sandbox + 0xba: 0x8b05, // frame + 0xbb: 0x1505, // value + 0xbc: 0x66209, // onstorage + 0xbf: 0xaa07, // acronym + 0xc0: 0x19a02, // rt + 0xc2: 0x202, // br + 0xc3: 0x22608, // fieldset + 0xc4: 0x2900d, // typemustmatch + 0xc5: 0xa208, // nomodule + 0xc6: 0x6c07, // noembed + 0xc7: 0x69e0d, // onbeforeprint + 0xc8: 0x19106, // button + 0xc9: 0x2f507, // onclick + 0xca: 0x70407, // summary + 0xcd: 0xfb04, // ruby + 0xce: 0x56405, // class + 0xcf: 0x3f40b, // ondragstart + 0xd0: 0x23107, // caption + 0xd4: 0xdd0e, // allowusermedia + 0xd5: 0x4cf0b, // onloadstart + 0xd9: 0x16b03, // div + 0xda: 0x4a904, // list + 0xdb: 0x32e04, // math + 0xdc: 0x44b05, // input + 0xdf: 0x3ea0a, // ondragover + 0xe0: 0x2de02, // h2 + 0xe2: 0x1b209, // plaintext + 0xe4: 0x4f30c, // onmouseenter + 0xe7: 0x47907, // checked + 0xe8: 0x47003, // pre + 0xea: 0x35f08, // multiple + 0xeb: 0xba03, // bdi + 0xec: 0x33d09, // maxlength + 0xed: 0xcf01, // q + 0xee: 0x61f0a, // onauxclick + 0xf0: 0x57c03, // wbr + 0xf2: 0x3b04, // base + 0xf3: 0x6e306, // option + 0xf5: 0x41310, // ondurationchange + 0xf7: 0x8908, // noframes + 0xf9: 0x40508, // dropzone + 0xfb: 0x67505, // scope + 0xfc: 0x8008, // reversed + 0xfd: 0x3ba0b, // ondragenter + 0xfe: 0x3fa05, // start + 0xff: 0x12f03, // xmp + 0x100: 0x5f907, // srclang + 0x101: 0x30703, // img + 0x104: 0x101, // b + 0x105: 0x25403, // for + 0x106: 0x10705, // aside + 0x107: 0x44907, // oninput + 0x108: 0x35604, // area + 0x109: 0x2a40a, // formmethod + 0x10a: 0x72604, // wrap + 0x10c: 0x23c02, // rp + 0x10d: 0x46b0a, // onkeypress + 0x10e: 0x6802, // tt + 0x110: 0x34702, // mi + 0x111: 0x36705, // muted + 0x112: 0xf303, // alt + 0x113: 0x5c504, // code + 0x114: 0x6e02, // em + 0x115: 0x3c50a, // ondragexit + 0x117: 0x9f04, // span + 0x119: 0x6d708, // manifest + 0x11a: 0x38708, // menuitem + 0x11b: 0x58b07, // content + 0x11d: 0x6c109, // onwaiting + 0x11f: 0x4c609, // onloadend + 0x121: 0x37e0d, // oncontextmenu + 0x123: 0x56d06, // onblur + 0x124: 0x3fc07, // article + 0x125: 0x9303, // dir + 0x126: 0xef04, // ping + 0x127: 0x24c08, // required + 0x128: 0x45509, // oninvalid + 0x129: 0xb105, // align + 0x12b: 0x58a04, // icon + 0x12c: 0x64d02, // h6 + 0x12d: 0x1c404, // cols + 0x12e: 0x22e0a, // figcaption + 0x12f: 0x45e09, // onkeydown + 0x130: 0x66b08, // onsubmit + 0x131: 0x14d09, // oncanplay + 0x132: 0x70b03, // sup + 0x133: 0xc01, // p + 0x135: 0x40a09, // onemptied + 0x136: 0x39106, // oncopy + 0x137: 0x19c04, // cite + 0x138: 0x3a70a, // ondblclick + 0x13a: 0x50b0b, // onmousemove + 0x13c: 0x66d03, // sub + 0x13d: 0x48703, // rel + 0x13e: 0x5f08, // optgroup + 0x142: 0x9c07, // rowspan + 0x143: 0x37806, // source + 0x144: 0x21608, // noscript + 0x145: 0x1a304, // open + 0x146: 0x20403, // ins + 0x147: 0x2540d, // foreignObject + 0x148: 0x5ad0a, // onpopstate + 0x14a: 0x28d07, // enctype + 0x14b: 0x2760e, // onautocomplete + 0x14c: 0x35208, // textarea + 0x14e: 0x2780c, // autocomplete + 0x14f: 0x15702, // hr + 0x150: 0x1de08, // controls + 0x151: 0x10902, // id + 0x153: 0x2360c, // onafterprint + 0x155: 0x2610d, // foreignobject + 0x156: 0x32707, // marquee + 0x157: 0x59a07, // onpause + 0x158: 0x5e602, // dl + 0x159: 0x5206, // height + 0x15a: 0x34703, // min + 0x15b: 0x9307, // dirname + 0x15c: 0x1f209, // translate + 0x15d: 0x5604, // html + 0x15e: 0x34709, // minlength + 0x15f: 0x48607, // preload + 0x160: 0x71408, // template + 0x161: 0x3df0b, // ondragleave + 0x162: 0x3a02, // rb + 0x164: 0x5c003, // src + 0x165: 0x6dd06, // strong + 0x167: 0x7804, // samp + 0x168: 0x6f307, // address + 0x169: 0x55108, // ononline + 0x16b: 0x1310b, // placeholder + 0x16c: 0x2c406, // target + 0x16d: 0x20605, // small + 0x16e: 0x6ca07, // onwheel + 0x16f: 0x1c90a, // annotation + 0x170: 0x4740a, // spellcheck + 0x171: 0x7207, // details + 0x172: 0x10306, // canvas + 0x173: 0x12109, // autofocus + 0x174: 0xc05, // param + 0x176: 0x46308, // download + 0x177: 0x45203, // del + 0x178: 0x36c07, // onclose + 0x179: 0xb903, // kbd + 0x17a: 0x31906, // applet + 0x17b: 0x2e004, // href + 0x17c: 0x5f108, // onresize + 0x17e: 0x49d0c, // onloadeddata + 0x180: 0xcc02, // tr + 0x181: 0x2c00a, // formtarget + 0x182: 0x11005, // title + 0x183: 0x6ff05, // style + 0x184: 0xd206, // strike + 0x185: 0x59e06, // usemap + 0x186: 0x2fc06, // iframe + 0x187: 0x1004, // main + 0x189: 0x7b07, // picture + 0x18c: 0x31605, // ismap + 0x18e: 0x4a504, // data + 0x18f: 0x5905, // label + 0x191: 0x3d10e, // referrerpolicy + 0x192: 0x15602, // th + 0x194: 0x53606, // prompt + 0x195: 0x56807, // section + 0x197: 0x6d107, // optimum + 0x198: 0x2db04, // high + 0x199: 0x15c02, // h1 + 0x19a: 0x65909, // onstalled + 0x19b: 0x16d03, // var + 0x19c: 0x4204, // time + 0x19e: 0x67402, // ms + 0x19f: 0x33106, // header + 0x1a0: 0x4da09, // onmessage + 0x1a1: 0x1a605, // nonce + 0x1a2: 0x26e0a, // formaction + 0x1a3: 0x22006, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x59505, // table + 0x1a6: 0x4a907, // listing + 0x1a7: 0x18106, // legend + 0x1a9: 0x29b09, // challenge + 0x1aa: 0x24806, // figure + 0x1ab: 0xe605, // media + 0x1ae: 0xd904, // type + 0x1af: 0x3f04, // font + 0x1b0: 0x4da0e, // onmessageerror + 0x1b1: 0x37108, // seamless + 0x1b2: 0x8703, // dfn + 0x1b3: 0x5c705, // defer + 0x1b4: 0xc303, // low + 0x1b5: 0x19a03, // rtc + 0x1b6: 0x5230b, // onmouseover + 0x1b7: 0x2b20a, // novalidate + 0x1b8: 0x71c0a, // workertype + 0x1ba: 0x3cd07, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x31803, // map + 0x1bf: 0x400c, // ontimeupdate + 0x1c0: 0x15e07, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64406, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x6607, // pattern + 0x1cc: 0x14d10, // oncanplaythrough + 0x1ce: 0x2d702, // dd + 0x1cf: 0x6f906, // srcset + 0x1d0: 0x17003, // big + 0x1d2: 0x65108, // sortable + 0x1d3: 0x48007, // onkeyup + 0x1d5: 0x5a406, // onplay + 0x1d7: 0x4b804, // meta + 0x1d8: 0x40306, // ondrop + 0x1da: 0x60008, // onscroll + 0x1db: 0x1fb0b, // crossorigin + 0x1dc: 0x5730a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x9202, // td + 0x1df: 0x58b0f, // contenteditable + 0x1e0: 0x27206, // action + 0x1e1: 0x1400b, // playsinline + 0x1e2: 0x43107, // onfocus + 0x1e3: 0x2e008, // hreflang + 0x1e5: 0x5160a, // onmouseout + 0x1e6: 0x5ea07, // onreset + 0x1e7: 0x13c08, // autoplay + 0x1e8: 0x63109, // onseeking + 0x1ea: 0x67506, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3800b, // contextmenu + 0x1ef: 0x52e09, // onmouseup + 0x1f1: 0x2ca06, // hgroup + 0x1f2: 0x2080f, // allowfullscreen + 0x1f3: 0x4be08, // tabindex + 0x1f6: 0x30f07, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2ae0e, // formnovalidate + 0x1fb: 0x1c90e, // annotation-xml + 0x1fc: 0x6e05, // embed + 0x1fd: 0x21806, // script + 0x1fe: 0xbb06, // dialog + 0x1ff: 0x1d707, // command +} + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + + "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + + "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + + "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + + "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + + "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + + "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + + "ignObjectforeignobjectformactionautocompleteerrorformenctype" + + "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + + "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + + "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + + "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + + "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + + "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + + "articleondropzonemptiedondurationchangeonendedonerroronfocus" + + "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + + "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + + "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + + "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + + "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + + "classectionbluronpageshowbronpastepublicontenteditableonpaus" + + "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + + "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + + "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + + "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go new file mode 100644 index 000000000..13bed1599 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset.go @@ -0,0 +1,257 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package charset provides common text encodings for HTML documents. +// +// The mapping from encoding labels to encodings is defined at +// https://encoding.spec.whatwg.org/. +package charset // import "golang.org/x/net/html/charset" + +import ( + "bytes" + "fmt" + "io" + "mime" + "strings" + "unicode/utf8" + + "golang.org/x/net/html" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/htmlindex" + "golang.org/x/text/transform" +) + +// Lookup returns the encoding with the specified label, and its canonical +// name. It returns nil and the empty string if label is not one of the +// standard encodings for HTML. Matching is case-insensitive and ignores +// leading and trailing whitespace. Encoders will use HTML escape sequences for +// runes that are not supported by the character set. +func Lookup(label string) (e encoding.Encoding, name string) { + e, err := htmlindex.Get(label) + if err != nil { + return nil, "" + } + name, _ = htmlindex.Name(e) + return &htmlEncoding{e}, name +} + +type htmlEncoding struct{ encoding.Encoding } + +func (h *htmlEncoding) NewEncoder() *encoding.Encoder { + // HTML requires a non-terminating legacy encoder. We use HTML escapes to + // substitute unsupported code points. + return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder()) +} + +// DetermineEncoding determines the encoding of an HTML document by examining +// up to the first 1024 bytes of content and the declared Content-Type. +// +// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding +func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) { + if len(content) > 1024 { + content = content[:1024] + } + + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + e, name = Lookup(b.enc) + return e, name, true + } + } + + if _, params, err := mime.ParseMediaType(contentType); err == nil { + if cs, ok := params["charset"]; ok { + if e, name = Lookup(cs); e != nil { + return e, name, true + } + } + } + + if len(content) > 0 { + e, name = prescan(content) + if e != nil { + return e, name, false + } + } + + // Try to detect UTF-8. + // First eliminate any partial rune at the end. + for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { + b := content[i] + if b < 0x80 { + break + } + if utf8.RuneStart(b) { + content = content[:i] + break + } + } + hasHighBit := false + for _, c := range content { + if c >= 0x80 { + hasHighBit = true + break + } + } + if hasHighBit && utf8.Valid(content) { + return encoding.Nop, "utf-8", false + } + + // TODO: change default depending on user's locale? + return charmap.Windows1252, "windows-1252", false +} + +// NewReader returns an io.Reader that converts the content of r to UTF-8. +// It calls DetermineEncoding to find out what r's encoding is. +func NewReader(r io.Reader, contentType string) (io.Reader, error) { + preview := make([]byte, 1024) + n, err := io.ReadFull(r, preview) + switch { + case err == io.ErrUnexpectedEOF: + preview = preview[:n] + r = bytes.NewReader(preview) + case err != nil: + return nil, err + default: + r = io.MultiReader(bytes.NewReader(preview), r) + } + + if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop { + r = transform.NewReader(r, e.NewDecoder()) + } + return r, nil +} + +// NewReaderLabel returns a reader that converts from the specified charset to +// UTF-8. It uses Lookup to find the encoding that corresponds to label, and +// returns an error if Lookup returns nil. It is suitable for use as +// encoding/xml.Decoder's CharsetReader function. +func NewReaderLabel(label string, input io.Reader) (io.Reader, error) { + e, _ := Lookup(label) + if e == nil { + return nil, fmt.Errorf("unsupported charset: %q", label) + } + return transform.NewReader(input, e.NewDecoder()), nil +} + +func prescan(content []byte) (e encoding.Encoding, name string) { + z := html.NewTokenizer(bytes.NewReader(content)) + for { + switch z.Next() { + case html.ErrorToken: + return nil, "" + + case html.StartTagToken, html.SelfClosingTagToken: + tagName, hasAttr := z.TagName() + if !bytes.Equal(tagName, []byte("meta")) { + continue + } + attrList := make(map[string]bool) + gotPragma := false + + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + needPragma := dontKnow + + name = "" + e = nil + for hasAttr { + var key, val []byte + key, val, hasAttr = z.TagAttr() + ks := string(key) + if attrList[ks] { + continue + } + attrList[ks] = true + for i, c := range val { + if 'A' <= c && c <= 'Z' { + val[i] = c + 0x20 + } + } + + switch ks { + case "http-equiv": + if bytes.Equal(val, []byte("content-type")) { + gotPragma = true + } + + case "content": + if e == nil { + name = fromMetaElement(string(val)) + if name != "" { + e, name = Lookup(name) + if e != nil { + needPragma = doNeedPragma + } + } + } + + case "charset": + e, name = Lookup(string(val)) + needPragma = doNotNeedPragma + } + } + + if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { + continue + } + + if strings.HasPrefix(name, "utf-16") { + name = "utf-8" + e = encoding.Nop + } + + if e != nil { + return e, name + } + } + } +} + +func fromMetaElement(s string) string { + for s != "" { + csLoc := strings.Index(s, "charset") + if csLoc == -1 { + return "" + } + s = s[csLoc+len("charset"):] + s = strings.TrimLeft(s, " \t\n\f\r") + if !strings.HasPrefix(s, "=") { + continue + } + s = s[1:] + s = strings.TrimLeft(s, " \t\n\f\r") + if s == "" { + return "" + } + if q := s[0]; q == '"' || q == '\'' { + s = s[1:] + closeQuote := strings.IndexRune(s, rune(q)) + if closeQuote == -1 { + return "" + } + return s[:closeQuote] + } + + end := strings.IndexAny(s, "; \t\n\f\r") + if end == -1 { + end = len(s) + } + return s[:end] + } + return "" +} + +var boms = []struct { + bom []byte + enc string +}{ + {[]byte{0xfe, 0xff}, "utf-16be"}, + {[]byte{0xff, 0xfe}, "utf-16le"}, + {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, +} diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 000000000..a3a918f0b --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,112 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. + "keygen": true, + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "math": + switch element.Data { + case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": + return true + } + case "svg": + switch element.Data { + case "foreignObject", "desc", "title": + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 000000000..822ed42a0 --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 000000000..c484e5a94 --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 000000000..b628880a0 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 000000000..d85613962 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,258 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a': + esc = ">" + case '"': + // """ is shorter than """. + esc = """ + case '\r': + esc = " " + default: + panic("unrecognized escape character") + } + s = s[i+1:] + if _, err := w.WriteString(esc); err != nil { + return err + } + i = strings.IndexAny(s, escapedChars) + } + _, err := w.WriteString(s) + return err +} + +// EscapeString escapes special characters like "<" to become "<". It +// escapes only five such characters: <, >, &, ' and ". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func EscapeString(s string) string { + if strings.IndexAny(s, escapedChars) == -1 { + return s + } + var buf bytes.Buffer + escape(&buf, s) + return buf.String() +} + +// UnescapeString unescapes entities like "<" to become "<". It unescapes a +// larger range of entities than EscapeString escapes. For example, "á" +// unescapes to "á", as does "á" and "&xE1;". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func UnescapeString(s string) string { + for _, c := range s { + if c == '&' { + return string(unescape([]byte(s), false)) + } + } + return s +} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go new file mode 100644 index 000000000..01477a963 --- /dev/null +++ b/vendor/golang.org/x/net/html/foreign.go @@ -0,0 +1,226 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { + for i := range aa { + if newName, ok := nameMap[aa[i].Key]; ok { + aa[i].Key = newName + } + } +} + +func adjustForeignAttributes(aa []Attribute) { + for i, a := range aa { + if a.Key == "" || a.Key[0] != 'x' { + continue + } + switch a.Key { + case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", + "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": + j := strings.Index(a.Key, ":") + aa[i].Namespace = a.Key[:j] + aa[i].Key = a.Key[j+1:] + } + } +} + +func htmlIntegrationPoint(n *Node) bool { + if n.Type != ElementNode { + return false + } + switch n.Namespace { + case "math": + if n.Data == "annotation-xml" { + for _, a := range n.Attr { + if a.Key == "encoding" { + val := strings.ToLower(a.Val) + if val == "text/html" || val == "application/xhtml+xml" { + return true + } + } + } + } + case "svg": + switch n.Data { + case "desc", "foreignObject", "title": + return true + } + } + return false +} + +func mathMLTextIntegrationPoint(n *Node) bool { + if n.Namespace != "math" { + return false + } + switch n.Data { + case "mi", "mo", "mn", "ms", "mtext": + return true + } + return false +} + +// Section 12.2.6.5. +var breakout = map[string]bool{ + "b": true, + "big": true, + "blockquote": true, + "body": true, + "br": true, + "center": true, + "code": true, + "dd": true, + "div": true, + "dl": true, + "dt": true, + "em": true, + "embed": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "hr": true, + "i": true, + "img": true, + "li": true, + "listing": true, + "menu": true, + "meta": true, + "nobr": true, + "ol": true, + "p": true, + "pre": true, + "ruby": true, + "s": true, + "small": true, + "span": true, + "strong": true, + "strike": true, + "sub": true, + "sup": true, + "table": true, + "tt": true, + "u": true, + "ul": true, + "var": true, +} + +// Section 12.2.6.5. +var svgTagNameAdjustments = map[string]string{ + "altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath", +} + +// Section 12.2.6.1 +var mathMLAttributeAdjustments = map[string]string{ + "definitionurl": "definitionURL", +} + +var svgAttributeAdjustments = map[string]string{ + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go new file mode 100644 index 000000000..633ee15dc --- /dev/null +++ b/vendor/golang.org/x/net/html/node.go @@ -0,0 +1,220 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "golang.org/x/net/html/atom" +) + +// A NodeType is the type of a Node. +type NodeType uint32 + +const ( + ErrorNode NodeType = iota + TextNode + DocumentNode + ElementNode + CommentNode + DoctypeNode + scopeMarkerNode +) + +// Section 12.2.4.3 says "The markers are inserted when entering applet, +// object, marquee, template, td, th, and caption elements, and are used +// to prevent formatting from "leaking" into applet, object, marquee, +// template, td, th, and caption elements". +var scopeMarker = Node{Type: scopeMarkerNode} + +// A Node consists of a NodeType and some Data (tag name for element nodes, +// content for text) and are part of a tree of Nodes. Element nodes may also +// have a Namespace and contain a slice of Attributes. Data is unescaped, so +// that it looks like "a 0 { + return (*s)[i-1] + } + return nil +} + +// index returns the index of the top-most occurrence of n in the stack, or -1 +// if n is not present. +func (s *nodeStack) index(n *Node) int { + for i := len(*s) - 1; i >= 0; i-- { + if (*s)[i] == n { + return i + } + } + return -1 +} + +// contains returns whether a is within s. +func (s *nodeStack) contains(a atom.Atom) bool { + for _, n := range *s { + if n.DataAtom == a && n.Namespace == "" { + return true + } + } + return false +} + +// insert inserts a node at the given index. +func (s *nodeStack) insert(i int, n *Node) { + (*s) = append(*s, nil) + copy((*s)[i+1:], (*s)[i:]) + (*s)[i] = n +} + +// remove removes a node from the stack. It is a no-op if n is not present. +func (s *nodeStack) remove(n *Node) { + i := s.index(n) + if i == -1 { + return + } + copy((*s)[i:], (*s)[i+1:]) + j := len(*s) - 1 + (*s)[j] = nil + *s = (*s)[:j] +} + +type insertionModeStack []insertionMode + +func (s *insertionModeStack) pop() (im insertionMode) { + i := len(*s) + im = (*s)[i-1] + *s = (*s)[:i-1] + return im +} + +func (s *insertionModeStack) top() insertionMode { + if i := len(*s); i > 0 { + return (*s)[i-1] + } + return nil +} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go new file mode 100644 index 000000000..ca2cb5875 --- /dev/null +++ b/vendor/golang.org/x/net/html/parse.go @@ -0,0 +1,2324 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "errors" + "fmt" + "io" + "strings" + + a "golang.org/x/net/html/atom" +) + +// A parser implements the HTML5 parsing algorithm: +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction +type parser struct { + // tokenizer provides the tokens for the parser. + tokenizer *Tokenizer + // tok is the most recently read token. + tok Token + // Self-closing tags like
are treated as start tags, except that + // hasSelfClosingToken is set while they are being processed. + hasSelfClosingToken bool + // doc is the document root element. + doc *Node + // The stack of open elements (section 12.2.4.2) and active formatting + // elements (section 12.2.4.3). + oe, afe nodeStack + // Element pointers (section 12.2.4.4). + head, form *Node + // Other parsing state flags (section 12.2.4.5). + scripting, framesetOK bool + // The stack of template insertion modes + templateStack insertionModeStack + // im is the current insertion mode. + im insertionMode + // originalIM is the insertion mode to go back to after completing a text + // or inTableText insertion mode. + originalIM insertionMode + // fosterParenting is whether new elements should be inserted according to + // the foster parenting rules (section 12.2.6.1). + fosterParenting bool + // quirks is whether the parser is operating in "quirks mode." + quirks bool + // fragment is whether the parser is parsing an HTML fragment. + fragment bool + // context is the context element when parsing an HTML fragment + // (section 12.4). + context *Node +} + +func (p *parser) top() *Node { + if n := p.oe.top(); n != nil { + return n + } + return p.doc +} + +// Stop tags for use in popUntil. These come from section 12.2.4.2. +var ( + defaultScopeStopTags = map[string][]a.Atom{ + "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, + "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, + "svg": {a.Desc, a.ForeignObject, a.Title}, + } +) + +type scope int + +const ( + defaultScope scope = iota + listItemScope + buttonScope + tableScope + tableRowScope + tableBodyScope + selectScope +) + +// popUntil pops the stack of open elements at the highest element whose tag +// is in matchTags, provided there is no higher element in the scope's stop +// tags (as defined in section 12.2.4.2). It returns whether or not there was +// such an element. If there was not, popUntil leaves the stack unchanged. +// +// For example, the set of stop tags for table scope is: "html", "table". If +// the stack was: +// ["html", "body", "font", "table", "b", "i", "u"] +// then popUntil(tableScope, "font") would return false, but +// popUntil(tableScope, "i") would return true and the stack would become: +// ["html", "body", "font", "table", "b"] +// +// If an element's tag is in both the stop tags and matchTags, then the stack +// will be popped and the function returns true (provided, of course, there was +// no higher element in the stack that was also in the stop tags). For example, +// popUntil(tableScope, "table") returns true and leaves: +// ["html", "body", "font"] +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { + if i := p.indexOfElementInScope(s, matchTags...); i != -1 { + p.oe = p.oe[:i] + return true + } + return false +} + +// indexOfElementInScope returns the index in p.oe of the highest element whose +// tag is in matchTags that is in scope. If no matching element is in scope, it +// returns -1. +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + if p.oe[i].Namespace == "" { + for _, t := range matchTags { + if t == tagAtom { + return i + } + } + switch s { + case defaultScope: + // No-op. + case listItemScope: + if tagAtom == a.Ol || tagAtom == a.Ul { + return -1 + } + case buttonScope: + if tagAtom == a.Button { + return -1 + } + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { + return -1 + } + case selectScope: + if tagAtom != a.Optgroup && tagAtom != a.Option { + return -1 + } + default: + panic("unreachable") + } + } + switch s { + case defaultScope, listItemScope, buttonScope: + for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { + if t == tagAtom { + return -1 + } + } + } + } + return -1 +} + +// elementInScope is like popUntil, except that it doesn't modify the stack of +// open elements. +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { + return p.indexOfElementInScope(s, matchTags...) != -1 +} + +// clearStackToContext pops elements off the stack of open elements until a +// scope-defined element is found. +func (p *parser) clearStackToContext(s scope) { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + switch s { + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + case tableRowScope: + if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + case tableBodyScope: + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + default: + panic("unreachable") + } + } +} + +// generateImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. +// If exceptions are specified, nodes with that name will not be popped off. +func (p *parser) generateImpliedEndTags(exceptions ...string) { + var i int +loop: + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type == ElementNode { + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: + for _, except := range exceptions { + if n.Data == except { + break loop + } + } + continue + } + } + break + } + + p.oe = p.oe[:i+1] +} + +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. +func (p *parser) addChild(n *Node) { + if p.shouldFosterParent() { + p.fosterParent(n) + } else { + p.top().AppendChild(n) + } + + if n.Type == ElementNode { + p.oe = append(p.oe, n) + } +} + +// shouldFosterParent returns whether the next node to be added should be +// foster parented. +func (p *parser) shouldFosterParent() bool { + if p.fosterParenting { + switch p.top().DataAtom { + case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: + return true + } + } + return false +} + +// fosterParent adds a child node according to the foster parenting rules. +// Section 12.2.6.1, "foster parenting". +func (p *parser) fosterParent(n *Node) { + var table, parent, prev, template *Node + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + if p.oe[i].DataAtom == a.Table { + table = p.oe[i] + break + } + } + + var j int + for j = len(p.oe) - 1; j >= 0; j-- { + if p.oe[j].DataAtom == a.Template { + template = p.oe[j] + break + } + } + + if template != nil && (table == nil || j > i) { + template.AppendChild(n) + return + } + + if table == nil { + // The foster parent is the html element. + parent = p.oe[0] + } else { + parent = table.Parent + } + if parent == nil { + parent = p.oe[i-1] + } + + if table != nil { + prev = table.PrevSibling + } else { + prev = parent.LastChild + } + if prev != nil && prev.Type == TextNode && n.Type == TextNode { + prev.Data += n.Data + return + } + + parent.InsertBefore(n, table) +} + +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. +func (p *parser) addText(text string) { + if text == "" { + return + } + + if p.shouldFosterParent() { + p.fosterParent(&Node{ + Type: TextNode, + Data: text, + }) + return + } + + t := p.top() + if n := t.LastChild; n != nil && n.Type == TextNode { + n.Data += text + return + } + p.addChild(&Node{ + Type: TextNode, + Data: text, + }) +} + +// addElement adds a child element based on the current token. +func (p *parser) addElement() { + p.addChild(&Node{ + Type: ElementNode, + DataAtom: p.tok.DataAtom, + Data: p.tok.Data, + Attr: p.tok.Attr, + }) +} + +// Section 12.2.4.3. +func (p *parser) addFormattingElement() { + tagAtom, attr := p.tok.DataAtom, p.tok.Attr + p.addElement() + + // Implement the Noah's Ark clause, but with three per family instead of two. + identicalElements := 0 +findIdenticalElements: + for i := len(p.afe) - 1; i >= 0; i-- { + n := p.afe[i] + if n.Type == scopeMarkerNode { + break + } + if n.Type != ElementNode { + continue + } + if n.Namespace != "" { + continue + } + if n.DataAtom != tagAtom { + continue + } + if len(n.Attr) != len(attr) { + continue + } + compareAttributes: + for _, t0 := range n.Attr { + for _, t1 := range attr { + if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { + // Found a match for this attribute, continue with the next attribute. + continue compareAttributes + } + } + // If we get here, there is no attribute that matches a. + // Therefore the element is not identical to the new one. + continue findIdenticalElements + } + + identicalElements++ + if identicalElements >= 3 { + p.afe.remove(n) + } + } + + p.afe = append(p.afe, p.top()) +} + +// Section 12.2.4.3. +func (p *parser) clearActiveFormattingElements() { + for { + n := p.afe.pop() + if len(p.afe) == 0 || n.Type == scopeMarkerNode { + return + } + } +} + +// Section 12.2.4.3. +func (p *parser) reconstructActiveFormattingElements() { + n := p.afe.top() + if n == nil { + return + } + if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { + return + } + i := len(p.afe) - 1 + for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { + if i == 0 { + i = -1 + break + } + i-- + n = p.afe[i] + } + for { + i++ + clone := p.afe[i].clone() + p.addChild(clone) + p.afe[i] = clone + if i == len(p.afe)-1 { + break + } + } +} + +// Section 12.2.5. +func (p *parser) acknowledgeSelfClosingTag() { + p.hasSelfClosingToken = false +} + +// An insertion mode (section 12.2.4.1) is the state transition function from +// a particular state in the HTML5 parser's state machine. It updates the +// parser's fields depending on parser.tok (where ErrorToken means EOF). +// It returns whether the token was consumed. +type insertionMode func(*parser) bool + +// setOriginalIM sets the insertion mode to return to after completing a text or +// inTableText insertion mode. +// Section 12.2.4.1, "using the rules for". +func (p *parser) setOriginalIM() { + if p.originalIM != nil { + panic("html: bad parser state: originalIM was set twice") + } + p.originalIM = p.im +} + +// Section 12.2.4.1, "reset the insertion mode". +func (p *parser) resetInsertionMode() { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + last := i == 0 + if last && p.context != nil { + n = p.context + } + + switch n.DataAtom { + case a.Select: + if !last { + for ancestor, first := n, p.oe[0]; ancestor != first; { + ancestor = p.oe[p.oe.index(ancestor)-1] + switch ancestor.DataAtom { + case a.Template: + p.im = inSelectIM + return + case a.Table: + p.im = inSelectInTableIM + return + } + } + } + p.im = inSelectIM + case a.Td, a.Th: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.im = inCellIM + case a.Tr: + p.im = inRowIM + case a.Tbody, a.Thead, a.Tfoot: + p.im = inTableBodyIM + case a.Caption: + p.im = inCaptionIM + case a.Colgroup: + p.im = inColumnGroupIM + case a.Table: + p.im = inTableIM + case a.Template: + // TODO: remove this divergence from the HTML5 spec. + if n.Namespace != "" { + continue + } + p.im = p.templateStack.top() + case a.Head: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.im = inHeadIM + case a.Body: + p.im = inBodyIM + case a.Frameset: + p.im = inFramesetIM + case a.Html: + if p.head == nil { + p.im = beforeHeadIM + } else { + p.im = afterHeadIM + } + default: + if last { + p.im = inBodyIM + return + } + continue + } + return + } +} + +const whitespace = " \t\r\n\f" + +// Section 12.2.6.4.1. +func initialIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + n, quirks := parseDoctype(p.tok.Data) + p.doc.AppendChild(n) + p.quirks = quirks + p.im = beforeHTMLIM + return true + } + p.quirks = true + p.im = beforeHTMLIM + return false +} + +// Section 12.2.6.4.2. +func beforeHTMLIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + if p.tok.DataAtom == a.Html { + p.addElement() + p.im = beforeHeadIM + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + } + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false +} + +// Section 12.2.6.4.3. +func beforeHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Head: + p.addElement() + p.head = p.top() + p.im = inHeadIM + return true + case a.Html: + return inBodyIM(p) + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.4. +func inHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + p.addElement() + p.oe.pop() + p.acknowledgeSelfClosingTag() + return true + case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: + p.addElement() + p.setOriginalIM() + p.im = textIM + return true + case a.Head: + // Ignore the token. + return true + case a.Template: + p.addElement() + p.afe = append(p.afe, &scopeMarker) + p.framesetOK = false + p.im = inTemplateIM + p.templateStack = append(p.templateStack, inTemplateIM) + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head: + p.oe.pop() + p.im = afterHeadIM + return true + case a.Body, a.Html, a.Br: + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false + case a.Template: + if !p.oe.contains(a.Template) { + return true + } + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.generateImpliedEndTags() + for i := len(p.oe) - 1; i >= 0; i-- { + if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template { + p.oe = p.oe[:i] + break + } + } + p.clearActiveFormattingElements() + p.templateStack.pop() + p.resetInsertionMode() + return true + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.6. +func afterHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Body: + p.addElement() + p.framesetOK = false + p.im = inBodyIM + return true + case a.Frameset: + p.addElement() + p.im = inFramesetIM + return true + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: + p.oe = append(p.oe, p.head) + defer p.oe.remove(p.head) + return inHeadIM(p) + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Body, a.Html, a.Br: + // Drop down to creating an implied tag. + case a.Template: + return inHeadIM(p) + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) + p.framesetOK = true + return false +} + +// copyAttributes copies attributes of src not found on dst to dst. +func copyAttributes(dst *Node, src Token) { + if len(src.Attr) == 0 { + return + } + attr := map[string]string{} + for _, t := range dst.Attr { + attr[t.Key] = t.Val + } + for _, t := range src.Attr { + if _, ok := attr[t.Key]; !ok { + dst.Attr = append(dst.Attr, t) + attr[t.Key] = t.Val + } + } +} + +// Section 12.2.6.4.7. +func inBodyIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + d := p.tok.Data + switch n := p.oe.top(); n.DataAtom { + case a.Pre, a.Listing: + if n.FirstChild == nil { + // Ignore a newline at the start of a
 block.
+				if d != "" && d[0] == '\r' {
+					d = d[1:]
+				}
+				if d != "" && d[0] == '\n' {
+					d = d[1:]
+				}
+			}
+		}
+		d = strings.Replace(d, "\x00", "", -1)
+		if d == "" {
+			return true
+		}
+		p.reconstructActiveFormattingElements()
+		p.addText(d)
+		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+			// There were non-whitespace characters inserted.
+			p.framesetOK = false
+		}
+	case StartTagToken:
+		switch p.tok.DataAtom {
+		case a.Html:
+			if p.oe.contains(a.Template) {
+				return true
+			}
+			copyAttributes(p.oe[0], p.tok)
+		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
+			return inHeadIM(p)
+		case a.Body:
+			if p.oe.contains(a.Template) {
+				return true
+			}
+			if len(p.oe) >= 2 {
+				body := p.oe[1]
+				if body.Type == ElementNode && body.DataAtom == a.Body {
+					p.framesetOK = false
+					copyAttributes(body, p.tok)
+				}
+			}
+		case a.Frameset:
+			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+				// Ignore the token.
+				return true
+			}
+			body := p.oe[1]
+			if body.Parent != nil {
+				body.Parent.RemoveChild(body)
+			}
+			p.oe = p.oe[:1]
+			p.addElement()
+			p.im = inFramesetIM
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(buttonScope, a.P)
+			switch n := p.top(); n.DataAtom {
+			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+				p.oe.pop()
+			}
+			p.addElement()
+		case a.Pre, a.Listing:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			// The newline, if any, will be dealt with by the TextToken case.
+			p.framesetOK = false
+		case a.Form:
+			if p.form != nil && !p.oe.contains(a.Template) {
+				// Ignore the token
+				return true
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			if !p.oe.contains(a.Template) {
+				p.form = p.top()
+			}
+		case a.Li:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Li:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Dd, a.Dt:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Dd, a.Dt:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Plaintext:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Button:
+			p.popUntil(defaultScope, a.Button)
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+		case a.A:
+			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+					p.inBodyEndTagFormatting(a.A)
+					p.oe.remove(n)
+					p.afe.remove(n)
+					break
+				}
+			}
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.Nobr:
+			p.reconstructActiveFormattingElements()
+			if p.elementInScope(defaultScope, a.Nobr) {
+				p.inBodyEndTagFormatting(a.Nobr)
+				p.reconstructActiveFormattingElements()
+			}
+			p.addFormattingElement()
+		case a.Applet, a.Marquee, a.Object:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.afe = append(p.afe, &scopeMarker)
+			p.framesetOK = false
+		case a.Table:
+			if !p.quirks {
+				p.popUntil(buttonScope, a.P)
+			}
+			p.addElement()
+			p.framesetOK = false
+			p.im = inTableIM
+			return true
+		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			if p.tok.DataAtom == a.Input {
+				for _, t := range p.tok.Attr {
+					if t.Key == "type" {
+						if strings.ToLower(t.Val) == "hidden" {
+							// Skip setting framesetOK = false
+							return true
+						}
+					}
+				}
+			}
+			p.framesetOK = false
+		case a.Param, a.Source, a.Track:
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+		case a.Hr:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			p.framesetOK = false
+		case a.Image:
+			p.tok.DataAtom = a.Img
+			p.tok.Data = a.Img.String()
+			return false
+		case a.Isindex:
+			if p.form != nil {
+				// Ignore the token.
+				return true
+			}
+			action := ""
+			prompt := "This is a searchable index. Enter search keywords: "
+			attr := []Attribute{{Key: "name", Val: "isindex"}}
+			for _, t := range p.tok.Attr {
+				switch t.Key {
+				case "action":
+					action = t.Val
+				case "name":
+					// Ignore the attribute.
+				case "prompt":
+					prompt = t.Val
+				default:
+					attr = append(attr, t)
+				}
+			}
+			p.acknowledgeSelfClosingTag()
+			p.popUntil(buttonScope, a.P)
+			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+			if p.form == nil {
+				// NOTE: The 'isindex' element has been removed,
+				// and the 'template' element has not been designed to be
+				// collaborative with the index element.
+				//
+				// Ignore the token.
+				return true
+			}
+			if action != "" {
+				p.form.Attr = []Attribute{{Key: "action", Val: action}}
+			}
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
+			p.addText(prompt)
+			p.addChild(&Node{
+				Type:     ElementNode,
+				DataAtom: a.Input,
+				Data:     a.Input.String(),
+				Attr:     attr,
+			})
+			p.oe.pop()
+			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
+		case a.Textarea:
+			p.addElement()
+			p.setOriginalIM()
+			p.framesetOK = false
+			p.im = textIM
+		case a.Xmp:
+			p.popUntil(buttonScope, a.P)
+			p.reconstructActiveFormattingElements()
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Iframe:
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Noembed, a.Noscript:
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Select:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+			p.im = inSelectIM
+			return true
+		case a.Optgroup, a.Option:
+			if p.top().DataAtom == a.Option {
+				p.oe.pop()
+			}
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		case a.Rb, a.Rtc:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags()
+			}
+			p.addElement()
+		case a.Rp, a.Rt:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags("rtc")
+			}
+			p.addElement()
+		case a.Math, a.Svg:
+			p.reconstructActiveFormattingElements()
+			if p.tok.DataAtom == a.Math {
+				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+			} else {
+				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+			}
+			adjustForeignAttributes(p.tok.Attr)
+			p.addElement()
+			p.top().Namespace = p.tok.Data
+			if p.hasSelfClosingToken {
+				p.oe.pop()
+				p.acknowledgeSelfClosingTag()
+			}
+			return true
+		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+			// Ignore the token.
+		default:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		}
+	case EndTagToken:
+		switch p.tok.DataAtom {
+		case a.Body:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.im = afterBodyIM
+			}
+		case a.Html:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+				return false
+			}
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.Form:
+			if p.oe.contains(a.Template) {
+				i := p.indexOfElementInScope(defaultScope, a.Form)
+				if i == -1 {
+					// Ignore the token.
+					return true
+				}
+				p.generateImpliedEndTags()
+				if p.oe[i].DataAtom != a.Form {
+					// Ignore the token.
+					return true
+				}
+				p.popUntil(defaultScope, a.Form)
+			} else {
+				node := p.form
+				p.form = nil
+				i := p.indexOfElementInScope(defaultScope, a.Form)
+				if node == nil || i == -1 || p.oe[i] != node {
+					// Ignore the token.
+					return true
+				}
+				p.generateImpliedEndTags()
+				p.oe.remove(node)
+			}
+		case a.P:
+			if !p.elementInScope(buttonScope, a.P) {
+				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+			}
+			p.popUntil(buttonScope, a.P)
+		case a.Li:
+			p.popUntil(listItemScope, a.Li)
+		case a.Dd, a.Dt:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.inBodyEndTagFormatting(p.tok.DataAtom)
+		case a.Applet, a.Marquee, a.Object:
+			if p.popUntil(defaultScope, p.tok.DataAtom) {
+				p.clearActiveFormattingElements()
+			}
+		case a.Br:
+			p.tok.Type = StartTagToken
+			return false
+		case a.Template:
+			return inHeadIM(p)
+		default:
+			p.inBodyEndTagOther(p.tok.DataAtom)
+		}
+	case CommentToken:
+		p.addChild(&Node{
+			Type: CommentNode,
+			Data: p.tok.Data,
+		})
+	case ErrorToken:
+		// TODO: remove this divergence from the HTML5 spec.
+		if len(p.templateStack) > 0 {
+			p.im = inTemplateIM
+			return false
+		} else {
+			for _, e := range p.oe {
+				switch e.DataAtom {
+				case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th,
+					a.Thead, a.Tr, a.Body, a.Html:
+				default:
+					return true
+				}
+			}
+		}
+	}
+
+	return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
+	// This is the "adoption agency" algorithm, described at
+	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+	// TODO: this is a fairly literal line-by-line translation of that algorithm.
+	// Once the code successfully parses the comprehensive test suite, we should
+	// refactor this code to be more idiomatic.
+
+	// Steps 1-4. The outer loop.
+	for i := 0; i < 8; i++ {
+		// Step 5. Find the formatting element.
+		var formattingElement *Node
+		for j := len(p.afe) - 1; j >= 0; j-- {
+			if p.afe[j].Type == scopeMarkerNode {
+				break
+			}
+			if p.afe[j].DataAtom == tagAtom {
+				formattingElement = p.afe[j]
+				break
+			}
+		}
+		if formattingElement == nil {
+			p.inBodyEndTagOther(tagAtom)
+			return
+		}
+		feIndex := p.oe.index(formattingElement)
+		if feIndex == -1 {
+			p.afe.remove(formattingElement)
+			return
+		}
+		if !p.elementInScope(defaultScope, tagAtom) {
+			// Ignore the tag.
+			return
+		}
+
+		// Steps 9-10. Find the furthest block.
+		var furthestBlock *Node
+		for _, e := range p.oe[feIndex:] {
+			if isSpecialElement(e) {
+				furthestBlock = e
+				break
+			}
+		}
+		if furthestBlock == nil {
+			e := p.oe.pop()
+			for e != formattingElement {
+				e = p.oe.pop()
+			}
+			p.afe.remove(e)
+			return
+		}
+
+		// Steps 11-12. Find the common ancestor and bookmark node.
+		commonAncestor := p.oe[feIndex-1]
+		bookmark := p.afe.index(formattingElement)
+
+		// Step 13. The inner loop. Find the lastNode to reparent.
+		lastNode := furthestBlock
+		node := furthestBlock
+		x := p.oe.index(node)
+		// Steps 13.1-13.2
+		for j := 0; j < 3; j++ {
+			// Step 13.3.
+			x--
+			node = p.oe[x]
+			// Step 13.4 - 13.5.
+			if p.afe.index(node) == -1 {
+				p.oe.remove(node)
+				continue
+			}
+			// Step 13.6.
+			if node == formattingElement {
+				break
+			}
+			// Step 13.7.
+			clone := node.clone()
+			p.afe[p.afe.index(node)] = clone
+			p.oe[p.oe.index(node)] = clone
+			node = clone
+			// Step 13.8.
+			if lastNode == furthestBlock {
+				bookmark = p.afe.index(node) + 1
+			}
+			// Step 13.9.
+			if lastNode.Parent != nil {
+				lastNode.Parent.RemoveChild(lastNode)
+			}
+			node.AppendChild(lastNode)
+			// Step 13.10.
+			lastNode = node
+		}
+
+		// Step 14. Reparent lastNode to the common ancestor,
+		// or for misnested table nodes, to the foster parent.
+		if lastNode.Parent != nil {
+			lastNode.Parent.RemoveChild(lastNode)
+		}
+		switch commonAncestor.DataAtom {
+		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+			p.fosterParent(lastNode)
+		default:
+			commonAncestor.AppendChild(lastNode)
+		}
+
+		// Steps 15-17. Reparent nodes from the furthest block's children
+		// to a clone of the formatting element.
+		clone := formattingElement.clone()
+		reparentChildren(clone, furthestBlock)
+		furthestBlock.AppendChild(clone)
+
+		// Step 18. Fix up the list of active formatting elements.
+		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+			// Move the bookmark with the rest of the list.
+			bookmark--
+		}
+		p.afe.remove(formattingElement)
+		p.afe.insert(bookmark, clone)
+
+		// Step 19. Fix up the stack of open elements.
+		p.oe.remove(formattingElement)
+		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+	}
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
+	for i := len(p.oe) - 1; i >= 0; i-- {
+		if p.oe[i].DataAtom == tagAtom {
+			p.oe = p.oe[:i]
+			break
+		}
+		if isSpecialElement(p.oe[i]) {
+			break
+		}
+	}
+}
+
+// Section 12.2.6.4.8.
+func textIM(p *parser) bool {
+	switch p.tok.Type {
+	case ErrorToken:
+		p.oe.pop()
+	case TextToken:
+		d := p.tok.Data
+		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+			// Ignore a newline at the start of a