diff --git a/.travis.yml b/.travis.yml index bb05a35ff..8c14194b1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,9 +17,11 @@ before_install: before_script: - dep ensure -v - docker --version + - git clone --single-branch -b v0.11.4 -q https://github.com/mholt/caddy $GOPATH/src/github.com/mholt/caddy script: - - make all && make test + - make test + - bash hack/docker_build.sh deploy: skip_cleanup: true diff --git a/Gopkg.lock b/Gopkg.lock index 3517263db..d96efbb71 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,57 +2,82 @@ [[projects]] + digest = "1:4d6f036ea3fe636bcb2e89850bcdc62a771354e157cd51b8b22a2de8562bf663" name = "cloud.google.com/go" packages = ["compute/metadata"] + pruneopts = "UT" revision = "c9474f2f8deb81759839474b6bd1726bbfe1c1c4" version = "v0.36.0" [[projects]] + digest = "1:f9ae348e1f793dcf9ed930ed47136a67343dbd6809c5c91391322267f4476892" name = "github.com/Microsoft/go-winio" packages = ["."] - revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f" - version = "v0.4.7" + pruneopts = "UT" + revision = "1a8911d1ed007260465c3bfbbc785ac6915a0bb8" + version = "v0.4.12" [[projects]] + digest = "1:a2682518d905d662d984ef9959984ef87cecb777d379bfa9d9fe40e78069b3e4" name = "github.com/PuerkitoBio/purell" packages = ["."] - revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" - version = "v1.1.0" + pruneopts = "UT" + revision = "44968752391892e1b0d0b821ee79e9a85fa13049" + version = "v1.1.1" [[projects]] branch = "master" + digest = "1:c739832d67eb1e9cc478a19cc1a1ccd78df0397bf8a32978b759152e205f644b" name = "github.com/PuerkitoBio/urlesc" packages = ["."] + pruneopts = "UT" revision = "de5bf2ad457846296e2031421a34e2568e304e35" [[projects]] + digest = "1:87c2e02fb01c27060ccc5ba7c5a407cc91147726f8f40b70cceeedbc52b1f3a8" name = "github.com/Sirupsen/logrus" packages = ["."] - revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" - version = "v1.0.5" + pruneopts = "UT" + revision = "e1e72e9de974bd926e5c56f83753fba2df402ce5" + version = "v1.3.0" [[projects]] branch = "master" + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" packages = ["quantile"] + pruneopts = "UT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" [[projects]] branch = "master" + digest = "1:4c7d169280debf9f36b84a0f682094889cccc5dc0db8657f9cffc93b21975a57" name = "github.com/docker/distribution" packages = [ "digestset", - "reference" + "reference", ] - revision = "749f6afb4572201e3c37325d0ffedb6f32be8950" + pruneopts = "UT" + revision = "6d62eb1d4a3515399431b713fde3ce5a9b40e8d5" [[projects]] + digest = "1:ec821dda59d7dd340498d74f798aa218b2c782bba54a690e866dc4f520d900d5" name = "github.com/docker/docker" packages = [ "api", @@ -74,338 +99,466 @@ "pkg/ioutils", "pkg/longpath", "pkg/system", - "pkg/tlsconfig" + "pkg/tlsconfig", ] + pruneopts = "UT" revision = "90d35abf7b3535c1c319c872900fbd76374e521c" version = "v17.05.0-ce-rc3" [[projects]] branch = "master" + digest = "1:02f8f4889e53a6adbc7bf3f2b9007ce8aecb0b79686fc9f99265246c88063f10" name = "github.com/docker/go-connections" packages = [ "nat", "sockets", - "tlsconfig" + "tlsconfig", ] - revision = "7395e3f8aa162843a74ed6d48e79627d9792ac55" + pruneopts = "UT" + revision = "97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e" [[projects]] + digest = "1:6f82cacd0af5921e99bf3f46748705239b36489464f4529a1589bc895764fb18" name = "github.com/docker/go-units" packages = ["."] + pruneopts = "UT" revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" version = "v0.3.3" [[projects]] branch = "master" + digest = "1:4841e14252a2cecf11840bd05230412ad469709bbacfc12467e2ce5ad07f339b" name = "github.com/docker/libtrust" packages = ["."] + pruneopts = "UT" revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20" [[projects]] + digest = "1:8ee7b41ace3ba875c17e38ba7780e7cf0d29882338637861e9f13f04f60ecc5c" name = "github.com/emicklei/go-restful" packages = [ ".", - "log" + "log", ] - revision = "3658237ded108b4134956c1b3050349d93e7b895" - version = "v2.7.1" + pruneopts = "UT" + revision = "85d198d05a92d31823b852b4a5928114912e8949" + version = "v2.9.0" [[projects]] + digest = "1:8ba45daa43acfd9364068b118a6884cb82bc1ef0569dc05260f3b464907e07d9" name = "github.com/emicklei/go-restful-openapi" packages = ["."] - revision = "51bf251d405ad1e23511fef0a2dbe40bc70ce2c6" - version = "v0.11.0" + pruneopts = "UT" + revision = "b7062368c258c9e8f8cbe9dd2e6aebfa1b747be6" + version = "v1.0.0" [[projects]] + digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda" name = "github.com/ghodss/yaml" packages = ["."] + pruneopts = "UT" revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" [[projects]] - branch = "master" - name = "github.com/go-logr/logr" + digest = "1:2a792dde3ae5dc95000d4dee312523504ea875b097a2b119a2f55d1d1d32ed61" + name = "github.com/go-ldap/ldap" packages = ["."] - revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" + pruneopts = "UT" + revision = "729c20c2694d870bcd631f0dadaecd088bd7ccbc" + version = "v3.0.2" [[projects]] - name = "github.com/go-logr/zapr" + digest = "1:edd2fa4578eb086265db78a9201d15e76b298dfd0d5c379da83e9c61712cf6df" + name = "github.com/go-logr/logr" packages = ["."] - revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab" + pruneopts = "UT" + revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" version = "v0.1.0" [[projects]] - branch = "master" + digest = "1:d81dfed1aa731d8e4a45d87154ec15ef18da2aa80fa9a2f95bec38577a244a99" + name = "github.com/go-logr/zapr" + packages = ["."] + pruneopts = "UT" + revision = "03f06a783fbb7dfaf3f629c7825480e43a7105e6" + version = "v0.1.1" + +[[projects]] + digest = "1:953a2628e4c5c72856b53f5470ed5e071c55eccf943d798d42908102af2a610f" name = "github.com/go-openapi/jsonpointer" packages = ["."] - revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" + pruneopts = "UT" + revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" + version = "v0.18.0" [[projects]] - branch = "master" + digest = "1:81210e0af657a0fb3638932ec68e645236bceefa4c839823db0c4d918f080895" name = "github.com/go-openapi/jsonreference" packages = ["."] - revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" + pruneopts = "UT" + revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3" + version = "v0.18.0" [[projects]] - branch = "master" + digest = "1:08656ef9c5a45ddccb7f206ca2d67e12e9fcda4122a83dc0544b5c967267cefa" name = "github.com/go-openapi/spec" packages = ["."] - revision = "bce47c9386f9ecd6b86f450478a80103c3fe1402" + pruneopts = "UT" + revision = "5b6cdde3200976e3ecceb2868706ee39b6aff3e4" + version = "v0.18.0" [[projects]] - branch = "master" + digest = "1:0005186c6608dd542239ac8e4f4f1e2e7c24d493e999113c46b93332f0362fc0" name = "github.com/go-openapi/swag" packages = ["."] - revision = "2b0bd4f193d011c203529df626a65d63cb8a79e8" + pruneopts = "UT" + revision = "1d29f06aebd59ccdf11ae04aa0334ded96e2d909" + version = "v0.18.0" [[projects]] + digest = "1:33082c63746b464db3d1c2c07a1396d860484d97fe857ef9e8668a9b406db09f" + name = "github.com/go-redis/redis" + packages = [ + ".", + "internal", + "internal/consistenthash", + "internal/hashtag", + "internal/pool", + "internal/proto", + "internal/util", + ] + pruneopts = "UT" + revision = "d22fde8721cc915a55aeb6b00944a76a92bfeb6e" + version = "v6.15.2" + +[[projects]] + digest = "1:ec6f9bf5e274c833c911923c9193867f3f18788c461f76f05f62bb1510e0ae65" name = "github.com/go-sql-driver/mysql" packages = ["."] - revision = "d523deb1b23d913de5bdada721a6071e71283618" - version = "v1.4.0" + pruneopts = "UT" + revision = "72cd26f257d44c1114970e19afddcd812016007e" + version = "v1.4.1" [[projects]] + digest = "1:4d02824a56d268f74a6b6fdd944b20b58a77c3d70e81008b3ee0c4f1a6777340" name = "github.com/gogo/protobuf" packages = [ "proto", - "sortkeys" + "sortkeys", ] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" + pruneopts = "UT" + revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" + version = "v1.2.1" [[projects]] branch = "master" + digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467" name = "github.com/golang/glog" packages = ["."] + pruneopts = "UT" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" name = "github.com/golang/groupcache" packages = ["lru"] + pruneopts = "UT" revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b" [[projects]] + digest = "1:239c4c7fd2159585454003d9be7207167970194216193a8a210b8d29576f19c9" name = "github.com/golang/protobuf" packages = [ "proto", "ptypes", "ptypes/any", "ptypes/duration", - "ptypes/timestamp" + "ptypes/timestamp", ] - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" + pruneopts = "UT" + revision = "c823c79ea1570fb5ff454033735a8e68575d1d0f" + version = "v1.3.0" [[projects]] branch = "master" + digest = "1:0bfbe13936953a98ae3cfe8ed6670d396ad81edf069a806d2f6515d7bb6950df" name = "github.com/google/btree" packages = ["."] + pruneopts = "UT" revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" [[projects]] branch = "master" + digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb" name = "github.com/google/gofuzz" packages = ["."] + pruneopts = "UT" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" [[projects]] + digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b" name = "github.com/google/uuid" packages = ["."] - revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8" - version = "v1.1.0" + pruneopts = "UT" + revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" + version = "v1.1.1" [[projects]] + digest = "1:65c4414eeb350c47b8de71110150d0ea8a281835b1f386eacaa3ad7325929c21" name = "github.com/googleapis/gnostic" packages = [ "OpenAPIv2", "compiler", - "extensions" + "extensions", ] + pruneopts = "UT" revision = "7c663266750e7d82587642f65e60bc4083f1f84e" version = "v0.2.0" [[projects]] branch = "master" + digest = "1:b4395b2a4566c24459af3d04009b39cc21762fc77ec7bf7a1aa905c91e8f018d" name = "github.com/gregjones/httpcache" packages = [ ".", - "diskcache" + "diskcache", ] + pruneopts = "UT" revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f" [[projects]] - branch = "master" + digest = "1:d15ee511aa0f56baacc1eb4c6b922fa1c03b38413b6be18166b996d82a0156ea" name = "github.com/hashicorp/golang-lru" packages = [ ".", - "simplelru" + "simplelru", ] - revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" + pruneopts = "UT" + revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" + version = "v0.5.1" [[projects]] + digest = "1:a0cefd27d12712af4b5018dc7046f245e1e3b5760e2e848c30b171b570708f9b" name = "github.com/imdario/mergo" packages = ["."] - revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58" - version = "v0.3.5" + pruneopts = "UT" + revision = "7c29201646fa3de8506f701213473dd407f19646" + version = "v0.3.7" [[projects]] + digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" name = "github.com/inconshreveable/mousetrap" packages = ["."] + pruneopts = "UT" revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" [[projects]] + digest = "1:d4e6e8584d0a94ce567d237e19192dae44d57d2767ac7e1d7fbf5626d176381a" name = "github.com/jinzhu/gorm" packages = ["."] - revision = "6ed508ec6a4ecb3531899a69cbc746ccf65a4166" - version = "v1.9.1" + pruneopts = "UT" + revision = "472c70caa40267cb89fd8facb07fe6454b578626" + version = "v1.9.2" [[projects]] branch = "master" + digest = "1:fd97437fbb6b7dce04132cf06775bd258cce305c44add58eb55ca86c6c325160" name = "github.com/jinzhu/inflection" packages = ["."] + pruneopts = "UT" revision = "04140366298a54a039076d798123ffa108fff46c" [[projects]] branch = "master" + digest = "1:f5a2051c55d05548d2d4fd23d244027b59fbd943217df8aa3b5e170ac2fd6e1b" name = "github.com/json-iterator/go" packages = ["."] - revision = "5bc93205020f6311d7e4a34f82c5616a18ec35e5" + pruneopts = "UT" + revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29" + +[[projects]] + digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "UT" + revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" + version = "v1.0.2" [[projects]] branch = "master" + digest = "1:84a5a2b67486d5d67060ac393aa255d05d24ed5ee41daecd5635ec22657b6492" name = "github.com/mailru/easyjson" packages = [ "buffer", "jlexer", - "jwriter" + "jwriter", ] - revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485" + pruneopts = "UT" + revision = "6243d8e04c3f819e79757e8bc3faa15c3cb27003" [[projects]] branch = "master" + digest = "1:fc2b04b0069d6b10bdef96d278fe20c345794009685ed3c8c7f1a6dc023eefec" name = "github.com/mattbaird/jsonpatch" packages = ["."] + pruneopts = "UT" revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" [[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] + pruneopts = "UT" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" [[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" name = "github.com/mitchellh/go-homedir" packages = ["."] + pruneopts = "UT" revision = "af06845cf3004701891bf4fdb884bfe4920b3727" version = "v1.1.0" [[projects]] + digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" name = "github.com/modern-go/concurrent" packages = ["."] + pruneopts = "UT" revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" version = "1.0.3" [[projects]] + digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" name = "github.com/modern-go/reflect2" packages = ["."] + pruneopts = "UT" revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" [[projects]] + digest = "1:ee4d4af67d93cc7644157882329023ce9a7bcfce956a079069a9405521c7cc8d" name = "github.com/opencontainers/go-digest" packages = ["."] + pruneopts = "UT" revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" version = "v1.0.0-rc1" [[projects]] + digest = "1:e5d0bd87abc2781d14e274807a470acd180f0499f8bf5bb18606e9ec22ad9de9" name = "github.com/pborman/uuid" packages = ["."] + pruneopts = "UT" revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" version = "v1.2" [[projects]] branch = "master" + digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" name = "github.com/petar/GoLLRB" packages = ["llrb"] + pruneopts = "UT" revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" [[projects]] + digest = "1:0e7775ebbcf00d8dd28ac663614af924411c868dca3d5aa762af0fae3808d852" name = "github.com/peterbourgon/diskv" packages = ["."] + pruneopts = "UT" revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" [[projects]] + digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" name = "github.com/pkg/errors" packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" + pruneopts = "UT" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" [[projects]] + digest = "1:93a746f1060a8acbcf69344862b2ceced80f854170e1caae089b2834c5fbf7f4" name = "github.com/prometheus/client_golang" packages = [ "prometheus", "prometheus/internal", - "prometheus/promhttp" + "prometheus/promhttp", ] + pruneopts = "UT" revision = "505eaef017263e299324067d40ca2c48f6a2cf50" version = "v0.9.2" [[projects]] branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" name = "github.com/prometheus/client_model" packages = ["go"] + pruneopts = "UT" revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" [[projects]] + digest = "1:35cf6bdf68db765988baa9c4f10cc5d7dda1126a54bd62e252dbcd0b1fc8da90" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model" + "model", ] + pruneopts = "UT" revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250" version = "v0.2.0" [[projects]] branch = "master" + digest = "1:01cd0cd47758f04c5604daa3be4637e2afa1e0c15af7e08289e95360369e4f48" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "iostats", "nfs", - "xfs" + "xfs", ] - revision = "bbced9601137e764853b2fad7ec3e2dc4c504e02" + pruneopts = "UT" + revision = "d0f344d83b0c80a1bc03b547a2374a9ec6711144" [[projects]] + digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939" name = "github.com/spf13/cobra" packages = ["."] + pruneopts = "UT" revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" version = "v0.0.3" [[projects]] + digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" name = "github.com/spf13/pflag" packages = ["."] - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" + pruneopts = "UT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" [[projects]] + digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" name = "go.uber.org/atomic" packages = ["."] + pruneopts = "UT" revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" version = "v1.3.2" [[projects]] + digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" name = "go.uber.org/multierr" packages = ["."] + pruneopts = "UT" revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" version = "v1.1.0" [[projects]] + digest = "1:c52caf7bd44f92e54627a31b85baf06a68333a196b3d8d241480a774733dcf8b" name = "go.uber.org/zap" packages = [ ".", @@ -413,19 +566,23 @@ "internal/bufferpool", "internal/color", "internal/exit", - "zapcore" + "zapcore", ] + pruneopts = "UT" revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" version = "v1.9.1" [[projects]] branch = "master" + digest = "1:058e9504b9a79bfe86092974d05bb3298d2aa0c312d266d43148de289a5065d9" name = "golang.org/x/crypto" packages = ["ssh/terminal"] - revision = "7f39a6fea4fe9364fb61e1def6a268a51b4f3a06" + pruneopts = "UT" + revision = "c2843e01d9a2bc60bb26ad24e09734fdc2d9ec58" [[projects]] branch = "master" + digest = "1:348e38852dac5030f8ce455e1b8d7a5727bbe0af43c0664efd89ec32414093c0" name = "golang.org/x/net" packages = [ "context", @@ -435,32 +592,38 @@ "http2/hpack", "idna", "internal/socks", - "proxy" + "proxy", ] - revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196" + pruneopts = "UT" + revision = "56fb01167e7d1e1d17dd87993d34c963f4356e87" [[projects]] branch = "master" + digest = "1:5e9f22cf754ab20a5dff0ae04b12516b112c5b81cd44dccccde148865084d730" name = "golang.org/x/oauth2" packages = [ ".", "google", "internal", "jws", - "jwt" + "jwt", ] - revision = "9b3c75971fc92dd27c6436a37c05c831498658f1" + pruneopts = "UT" + revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3" [[projects]] branch = "master" + digest = "1:85cd5224b89829559e8327ba2e52e0df72638d33cf3082b066ea9dea34391e2f" name = "golang.org/x/sys" packages = [ "unix", - "windows" + "windows", ] - revision = "fc8bd948cf46f9c7af0f07d34151ce25fe90e477" + pruneopts = "UT" + revision = "10058d7d4faa7dd5ef860cbd31af00903076e7b8" [[projects]] + digest = "1:0c56024909189aee3364b7f21a95a27459f718aa7c199a5c111c36cfffd9eaef" name = "golang.org/x/text" packages = [ "collate", @@ -477,24 +640,30 @@ "unicode/cldr", "unicode/norm", "unicode/rangetable", - "width" + "width", ] + pruneopts = "UT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" + digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" name = "golang.org/x/time" packages = ["rate"] - revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" + pruneopts = "UT" + revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" [[projects]] branch = "master" + digest = "1:6bb2e1cbcf253307a88a531a75aa4d38fecb8d56a0dc9e837e288fe3189954f6" name = "golang.org/x/tools" packages = ["container/intsets"] - revision = "44bee7e801e4a70b5fc9a91ff23830ab4df55d5e" + pruneopts = "UT" + revision = "00c44ba9c14f88ffdd4fb5bfae57fe8dd6d6afb1" [[projects]] + digest = "1:7bc25c2efff76b31f146caf630c617be9b666c6164f0632050466fbec0500125" name = "google.golang.org/appengine" packages = [ ".", @@ -507,24 +676,38 @@ "internal/modules", "internal/remote_api", "internal/urlfetch", - "urlfetch" + "urlfetch", ] - revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" - version = "v1.1.0" + pruneopts = "UT" + revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" + version = "v1.4.0" [[projects]] + digest = "1:af07c44dc04418be522bfd4e21ca9130d58169ea084e3a883e23772003a381c4" + name = "gopkg.in/asn1-ber.v1" + packages = ["."] + pruneopts = "UT" + revision = "f715ec2f112d1e4195b827ad68cf44017a3ef2b1" + version = "v1.3" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" name = "gopkg.in/inf.v0" packages = ["."] + pruneopts = "UT" revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" [[projects]] + digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" name = "gopkg.in/yaml.v2" packages = ["."] - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" + pruneopts = "UT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" [[projects]] + digest = "1:26a67eb988225c6a0600c1af0b35e795ac4d23a9c40a7aa178fa2adc0670f1f7" name = "k8s.io/api" packages = [ "admission/v1beta1", @@ -558,12 +741,14 @@ "settings/v1alpha1", "storage/v1", "storage/v1alpha1", - "storage/v1beta1" + "storage/v1beta1", ] + pruneopts = "UT" revision = "b503174bad5991eb66f18247f52e41c3258f6348" version = "kubernetes-1.12.3" [[projects]] + digest = "1:3c38a27df3152aa083018cb7a8d7b5bd5af5e808733ebbc6ae5b5fe10f8b0f84" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -609,12 +794,27 @@ "pkg/version", "pkg/watch", "third_party/forked/golang/json", - "third_party/forked/golang/reflect" + "third_party/forked/golang/reflect", ] + pruneopts = "UT" revision = "eddba98df674a16931d2d4ba75edc3a389bf633a" version = "kubernetes-1.12.3" [[projects]] + branch = "release-1.10" + digest = "1:89837c4f0a1aa35aa5993bd0ee002ed0073e505c08bcb716c9941059e1df3da6" + name = "k8s.io/apiserver" + packages = [ + "pkg/apis/audit", + "pkg/authentication/user", + "pkg/authorization/authorizer", + "pkg/endpoints/request", + ] + pruneopts = "UT" + revision = "30d6a91f580b7a2a240dccebc1a35bff2a6940f6" + +[[projects]] + digest = "1:7b85a80c96c7294b896204fca216c35d10e63fd0a021b23c5b5fe00f68932420" name = "k8s.io/client-go" packages = [ "discovery", @@ -754,36 +954,30 @@ "util/integer", "util/jsonpath", "util/retry", - "util/workqueue" + "util/workqueue", ] + pruneopts = "UT" revision = "d082d5923d3cc0bfbb066ee5fbdea3d0ca79acf8" version = "kubernetes-1.12.3" [[projects]] branch = "master" - name = "k8s.io/component-base" - packages = ["logs"] - revision = "1925c57e3358154fd54383773de0c0c7710a9196" - -[[projects]] - name = "k8s.io/klog" - packages = ["."] - revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0" - version = "v0.2.0" - -[[projects]] - branch = "master" + digest = "1:20ca56a1299fe3787b1cf86c3a0388fbc11d08604783b32a258eca7a947a7fdb" name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] - revision = "d50a959ae76a85c7c262a9767ef29f37093c2b8a" + pruneopts = "UT" + revision = "15615b16d372105f0c69ff47dfe7402926a65aaa" [[projects]] + digest = "1:ceff1906e568c23d7a337fdda89900d6155aaf93db7cb6da73488ffb649641e1" name = "k8s.io/kubernetes" packages = ["pkg/util/slice"] - revision = "721bfa751924da8d1680787490c54b9179b1fed0" - version = "v1.13.3" + pruneopts = "UT" + revision = "c27b913fddd1a6c480c229191a087698aa92f0b1" + version = "v1.13.4" [[projects]] + digest = "1:3c56f356b5cb581860246ca0c08c7a35aa21b2c071b20c428a53b2c3c13c87fd" name = "sigs.k8s.io/controller-runtime" packages = [ "pkg/cache", @@ -803,14 +997,68 @@ "pkg/webhook/admission", "pkg/webhook/admission/types", "pkg/webhook/internal/metrics", - "pkg/webhook/types" + "pkg/webhook/types", ] + pruneopts = "UT" revision = "f6f0bc9611363b43664d08fb097ab13243ef621d" version = "v0.1.9" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "af7a1befccf91a9979c7710cf95e61ccb7dc4131fd0cc22e585e2e3b8034f92f" + input-imports = [ + "github.com/dgrijalva/jwt-go", + "github.com/docker/docker/api/types", + "github.com/docker/docker/client", + "github.com/emicklei/go-restful", + "github.com/emicklei/go-restful-openapi", + "github.com/go-ldap/ldap", + "github.com/go-openapi/spec", + "github.com/go-redis/redis", + "github.com/go-sql-driver/mysql", + "github.com/golang/glog", + "github.com/jinzhu/gorm", + "github.com/json-iterator/go", + "github.com/mitchellh/go-homedir", + "github.com/spf13/cobra", + "github.com/spf13/pflag", + "golang.org/x/tools/container/intsets", + "gopkg.in/yaml.v2", + "k8s.io/api/apps/v1", + "k8s.io/api/apps/v1beta2", + "k8s.io/api/batch/v1", + "k8s.io/api/batch/v1beta1", + "k8s.io/api/core/v1", + "k8s.io/api/extensions/v1beta1", + "k8s.io/api/policy/v1beta1", + "k8s.io/api/rbac/v1", + "k8s.io/api/storage/v1", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apiserver/pkg/authorization/authorizer", + "k8s.io/apiserver/pkg/endpoints/request", + "k8s.io/client-go/informers", + "k8s.io/client-go/informers/core/v1", + "k8s.io/client-go/informers/rbac/v1", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/plugin/pkg/client/auth/gcp", + "k8s.io/client-go/rest", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/util/workqueue", + "k8s.io/kubernetes/pkg/util/slice", + "sigs.k8s.io/controller-runtime/pkg/client/config", + "sigs.k8s.io/controller-runtime/pkg/manager", + "sigs.k8s.io/controller-runtime/pkg/runtime/log", + "sigs.k8s.io/controller-runtime/pkg/runtime/signals", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index ec5f2e9bb..ebaab04bf 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -24,6 +24,8 @@ # go-tests = true # unused-packages = true +ignored = ["github.com/mholt/caddy","github.com/mholt/caddy/caddyfile","github.com/mholt/caddy/caddyhttp/httpserver","github.com/mholt/caddy/caddyhttp/staticfiles","github.com/mholt/caddy/caddytls","github.com/mholt/caddy/telemetry"] + [[constraint]] name = "github.com/docker/docker" version = "v17.05.0-ce" diff --git a/build/ks-apigateway/Dockerfile b/build/ks-apigateway/Dockerfile new file mode 100644 index 000000000..f53e0ad33 --- /dev/null +++ b/build/ks-apigateway/Dockerfile @@ -0,0 +1,28 @@ +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +FROM golang:1.10.3 as ks-apigateway-builder + +COPY / /go/src/kubesphere.io/kubesphere +RUN git clone --single-branch -b v0.11.4 -q https://github.com/mholt/caddy /go/src/github.com/mholt/caddy +WORKDIR /go/src/github.com/mholt/caddy +RUN sed -i "/\/\/ This is where other plugins get plugged in (imported)/a\\\t\ + _ \"kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/authenticate\"\n\t\ + _ \"kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/authentication\"\n\t\ + _ \"kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/swagger\""\ + caddy/caddymain/run.go && \ + sed -i "/\/\/ github.com\/BTBurke\/caddy-jwt/a\\\t\"authenticate\",\n\t\"authentication\",\n\t\"swagger\","\ + caddyhttp/httpserver/plugin.go && \ + go install ./caddy && \ + go run /go/src/kubesphere.io/kubesphere/tools/cmd/doc-gen/main.go --output=/go/src/kubesphere.io/kubesphere/install/swagger-ui/api.json + +FROM alpine:3.7 +RUN apk add --update ca-certificates && update-ca-certificates +COPY --from=ks-apigateway-builder /go/bin/* /usr/local/bin/ +COPY --from=ks-apigateway-builder /go/src/kubesphere.io/kubesphere/install/swagger-ui /var/static/swagger-ui +CMD ["sh"] \ No newline at end of file diff --git a/build/ks-apiserver/Dockerfile b/build/ks-apiserver/Dockerfile new file mode 100644 index 000000000..e0e35fc15 --- /dev/null +++ b/build/ks-apiserver/Dockerfile @@ -0,0 +1,18 @@ +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. +FROM golang:1.10.3 as ks-apiserver-builder + +COPY / /go/src/kubesphere.io/kubesphere +WORKDIR /go/src/kubesphere.io/kubesphere + +RUN go build -o ks-apiserver cmd/ks-apiserver/apiserver.go + +FROM alpine:3.7 +RUN apk add --update ca-certificates && update-ca-certificates +COPY --from=ks-apiserver-builder /go/src/kubesphere.io/kubesphere/ks-apiserver /usr/local/bin/ +CMD ["sh"] diff --git a/build/ks-iam/Dockerfile b/build/ks-iam/Dockerfile new file mode 100644 index 000000000..65f17d631 --- /dev/null +++ b/build/ks-iam/Dockerfile @@ -0,0 +1,18 @@ +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. + +# Copyright 2018 The KubeSphere Authors. All rights reserved. +# Use of this source code is governed by a Apache license +# that can be found in the LICENSE file. +FROM golang:1.10.3 as ks-iam-builder + +COPY / /go/src/kubesphere.io/kubesphere +WORKDIR /go/src/kubesphere.io/kubesphere + +RUN go build -o ks-iam cmd/ks-iam/main.go + +FROM alpine:3.7 +RUN apk add --update ca-certificates && update-ca-certificates +COPY --from=ks-iam-builder /go/src/kubesphere.io/kubesphere/ks-iam /usr/local/bin/ +CMD ["sh"] diff --git a/cmd/ks-apiserver/apiserver.go b/cmd/ks-apiserver/apiserver.go index 7db8d5269..38548483c 100644 --- a/cmd/ks-apiserver/apiserver.go +++ b/cmd/ks-apiserver/apiserver.go @@ -1,12 +1,28 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ package main import ( - "fmt" "kubesphere.io/kubesphere/cmd/ks-apiserver/app" - "os" - + "log" // Install apis _ "kubesphere.io/kubesphere/pkg/apis/metrics/install" + _ "kubesphere.io/kubesphere/pkg/apis/monitoring/install" _ "kubesphere.io/kubesphere/pkg/apis/operations/install" _ "kubesphere.io/kubesphere/pkg/apis/resources/install" ) @@ -16,7 +32,6 @@ func main() { cmd := app.NewAPIServerCommand() if err := cmd.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) + log.Fatalln(err) } } diff --git a/cmd/ks-apiserver/app/options/options.go b/cmd/ks-apiserver/app/options/options.go deleted file mode 100644 index 0816730fa..000000000 --- a/cmd/ks-apiserver/app/options/options.go +++ /dev/null @@ -1,67 +0,0 @@ -package options - -import ( - "fmt" - "github.com/spf13/pflag" - "os" -) - -type ServerRunOptions struct { - - // server bind address - BindAddress string - - // insecure port number - InsecurePort int - - // secure port number - SecurePort int - - // OpenPitrix api gateway service url - OpenPitrixAddress string - - // database connection string in MySQL like - // user:password@tcp(host)/dbname?charset=utf8&parseTime=True - DatabaseConnectionString string - - // tls cert file - TlsCertFile string - - // tls private key file - TlsPrivateKey string - - // host openapi doc - ApiDoc bool - - // kubeconfig file path - KubeConfig string -} - -func NewServerRunOptions() *ServerRunOptions { - // create default server run options - s := ServerRunOptions{ - BindAddress: "0.0.0.0", - InsecurePort: 9090, - SecurePort: 0, - OpenPitrixAddress: "openpitrix-api-gateway.openpitrix-system.svc", - DatabaseConnectionString: "", - TlsCertFile: "", - TlsPrivateKey: "", - ApiDoc: true, - } - - return &s -} - -func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { - - fs.StringVar(&s.BindAddress, "bind-address", "0.0.0.0", "server bind address") - fs.IntVar(&s.InsecurePort, "insecure-port", 9090, "insecure port number") - fs.IntVar(&s.SecurePort, "secure-port", 0, "secure port number") - fs.StringVar(&s.OpenPitrixAddress, "openpitrix", "openpitrix-api-gateway.openpitrix-system.svc", "openpitrix api gateway address") - fs.StringVar(&s.DatabaseConnectionString, "database-connection", "", "database connection string") - fs.StringVar(&s.TlsCertFile, "tls-cert-file", "", "tls cert file") - fs.StringVar(&s.TlsPrivateKey, "tls-private-key", "", "tls private key") - fs.BoolVar(&s.ApiDoc, "api-doc", true, "host OpenAPI doc") - fs.StringVar(&s.KubeConfig, "kubeconfig", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME")), "path to kubeconfig file") -} diff --git a/cmd/ks-apiserver/app/server.go b/cmd/ks-apiserver/app/server.go index 3478421b5..95b18c33d 100644 --- a/cmd/ks-apiserver/app/server.go +++ b/cmd/ks-apiserver/app/server.go @@ -1,21 +1,38 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ package app import ( + goflag "flag" "fmt" - "github.com/emicklei/go-restful-openapi" + "github.com/golang/glog" "github.com/spf13/cobra" - "kubesphere.io/kubesphere/cmd/ks-apiserver/app/options" "kubesphere.io/kubesphere/pkg/apiserver/runtime" - "kubesphere.io/kubesphere/pkg/client" "kubesphere.io/kubesphere/pkg/filter" "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/options" "kubesphere.io/kubesphere/pkg/signals" "log" "net/http" ) func NewAPIServerCommand() *cobra.Command { - s := options.NewServerRunOptions() + s := options.SharedOptions cmd := &cobra.Command{ Use: "ks-apiserver", @@ -23,15 +40,13 @@ func NewAPIServerCommand() *cobra.Command { for the api objects. The API Server services REST operations and provides the frontend to the cluster's shared state through which all other components interact.`, RunE: func(cmd *cobra.Command, args []string) error { - - //s.AddFlags(cmd.Flags()) - return Run(s) }, } - s.AddFlags(cmd.Flags()) - + cmd.Flags().AddFlagSet(s.CommandLine) + cmd.Flags().AddGoFlagSet(goflag.CommandLine) + glog.CopyStandardLogTo("INFO") return cmd } @@ -39,26 +54,11 @@ func Run(s *options.ServerRunOptions) error { var err error - stopChan := signals.SetupSignalHandler() - informers.SharedInformerFactory().Start(stopChan) - informers.SharedInformerFactory().WaitForCacheSync(stopChan) - log.Println("resources sync success") + waitForResourceSync() container := runtime.Container container.Filter(filter.Logging) - if len(s.KubeConfig) > 0 { - client.KubeConfigFile = s.KubeConfig - } - - if s.ApiDoc { - config := restfulspec.Config{ - WebServices: container.RegisteredWebServices(), - APIPath: "/apidoc.json", - } - container.Add(restfulspec.NewOpenAPIService(config)) - } - log.Printf("Server listening on %d.", s.InsecurePort) if s.InsecurePort != 0 { @@ -71,3 +71,36 @@ func Run(s *options.ServerRunOptions) error { return err } + +func waitForResourceSync() { + stopChan := signals.SetupSignalHandler() + + informerFactory := informers.SharedInformerFactory() + informerFactory.Rbac().V1().Roles().Lister() + informerFactory.Rbac().V1().RoleBindings().Lister() + informerFactory.Rbac().V1().ClusterRoles().Lister() + informerFactory.Rbac().V1().ClusterRoleBindings().Lister() + + informerFactory.Storage().V1().StorageClasses().Lister() + + informerFactory.Core().V1().Namespaces().Lister() + informerFactory.Core().V1().Nodes().Lister() + informerFactory.Core().V1().ResourceQuotas().Lister() + informerFactory.Core().V1().Pods().Lister() + informerFactory.Core().V1().Services().Lister() + informerFactory.Core().V1().PersistentVolumeClaims().Lister() + informerFactory.Core().V1().Secrets().Lister() + informerFactory.Core().V1().ConfigMaps().Lister() + + informerFactory.Apps().V1().ControllerRevisions().Lister() + informerFactory.Apps().V1().StatefulSets().Lister() + informerFactory.Apps().V1().Deployments().Lister() + informerFactory.Apps().V1().DaemonSets().Lister() + + informerFactory.Batch().V1().Jobs().Lister() + informerFactory.Batch().V1beta1().CronJobs() + + informerFactory.Start(stopChan) + informerFactory.WaitForCacheSync(stopChan) + log.Println("resources sync success") +} diff --git a/pkg/monitoring/v1alpha2/routes.go b/cmd/ks-iam/apiserver.go similarity index 71% rename from pkg/monitoring/v1alpha2/routes.go rename to cmd/ks-iam/apiserver.go index 2080c18c8..3ea75b6b2 100644 --- a/pkg/monitoring/v1alpha2/routes.go +++ b/cmd/ks-iam/apiserver.go @@ -15,12 +15,20 @@ limitations under the License. */ -package v1alpha2 +package main import ( - "kubesphere.io/kubesphere/pkg/monitoring/v1alpha2/monitoring" + "kubesphere.io/kubesphere/cmd/ks-iam/app" + "log" + // Install apis + _ "kubesphere.io/kubesphere/pkg/apis/iam/install" ) -func init() { - addToWebServiceFuncs = append(addToWebServiceFuncs, monitoring.Route) +func main() { + + cmd := app.NewAPIServerCommand() + + if err := cmd.Execute(); err != nil { + log.Fatalln(err) + } } diff --git a/cmd/ks-iam/app/server.go b/cmd/ks-iam/app/server.go new file mode 100644 index 000000000..14a8f3f4f --- /dev/null +++ b/cmd/ks-iam/app/server.go @@ -0,0 +1,95 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package app + +import ( + goflag "flag" + "fmt" + "github.com/golang/glog" + "github.com/spf13/cobra" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" + "kubesphere.io/kubesphere/pkg/filter" + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models/iam" + "kubesphere.io/kubesphere/pkg/options" + "kubesphere.io/kubesphere/pkg/signals" + "log" + "net/http" +) + +func NewAPIServerCommand() *cobra.Command { + s := options.SharedOptions + + cmd := &cobra.Command{ + Use: "ks-iam", + Long: `The KubeSphere API server validates and configures data +for the api objects. The API Server services REST operations and provides the frontend to the +cluster's shared state through which all other components interact.`, + RunE: func(cmd *cobra.Command, args []string) error { + return Run(s) + }, + } + cmd.Flags().AddFlagSet(s.CommandLine) + cmd.Flags().AddGoFlagSet(goflag.CommandLine) + glog.CopyStandardLogTo("INFO") + return cmd +} + +func Run(s *options.ServerRunOptions) error { + + var err error + + err = iam.DatabaseInit() + + if err != nil { + return err + } + + waitForResourceSync() + + container := runtime.Container + container.Filter(filter.Logging) + + log.Printf("Server listening on %d.", s.InsecurePort) + + if s.InsecurePort != 0 { + err = http.ListenAndServe(fmt.Sprintf("%s:%d", s.BindAddress, s.InsecurePort), container) + } + + if s.SecurePort != 0 && len(s.TlsCertFile) > 0 && len(s.TlsPrivateKey) > 0 { + err = http.ListenAndServeTLS(fmt.Sprintf("%s:%d", s.BindAddress, s.SecurePort), s.TlsCertFile, s.TlsPrivateKey, container) + } + + return err +} + +func waitForResourceSync() { + stopChan := signals.SetupSignalHandler() + + informerFactory := informers.SharedInformerFactory() + informerFactory.Rbac().V1().Roles().Lister() + informerFactory.Rbac().V1().RoleBindings().Lister() + informerFactory.Rbac().V1().ClusterRoles().Lister() + informerFactory.Rbac().V1().ClusterRoleBindings().Lister() + + informerFactory.Core().V1().Namespaces().Lister() + + informerFactory.Start(stopChan) + informerFactory.WaitForCacheSync(stopChan) + log.Println("resources sync success") +} diff --git a/hack/docker_build.sh b/hack/docker_build.sh new file mode 100755 index 000000000..0832bdfa5 --- /dev/null +++ b/hack/docker_build.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + + docker build -f build/ks-apigateway/Dockerfile -t kubespheredev/ks-apigateway:latest . + docker build -f build/ks-apiserver/Dockerfile -t kubespheredev/ks-apiserver:latest . \ No newline at end of file diff --git a/hack/docker_push.sh b/hack/docker_push.sh index 8dbd17fd0..f1de91ff8 100755 --- a/hack/docker_push.sh +++ b/hack/docker_push.sh @@ -3,4 +3,5 @@ # Push image to dockerhub, need to support multiple push echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin -docker push kubespheredev/ks-apiserver:latest \ No newline at end of file +docker push kubespheredev/ks-apigateway:latest +docker push kubespheredev/ks-apiserver:latest diff --git a/pkg/apigateway/caddy-plugin/authenticate/authenticate.go b/pkg/apigateway/caddy-plugin/authenticate/authenticate.go new file mode 100644 index 000000000..2d00d6196 --- /dev/null +++ b/pkg/apigateway/caddy-plugin/authenticate/authenticate.go @@ -0,0 +1,172 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package authenticate + +import ( + "errors" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/dgrijalva/jwt-go" + "github.com/mholt/caddy/caddyhttp/httpserver" +) + +type Auth struct { + Rule Rule + Next httpserver.Handler +} + +type Rule struct { + Secret []byte + Path string + ExceptedPath []string +} + +type User struct { + Username string `json:"username"` + UID string `json:"uid"` + Groups *[]string `json:"groups,omitempty"` + Extra *map[string]interface{} `json:"extra,omitempty"` +} + +func (h Auth) ServeHTTP(resp http.ResponseWriter, req *http.Request) (int, error) { + for _, path := range h.Rule.ExceptedPath { + if httpserver.Path(req.URL.Path).Matches(path) { + return h.Next.ServeHTTP(resp, req) + } + } + + if httpserver.Path(req.URL.Path).Matches(h.Rule.Path) { + + uToken, err := h.ExtractToken(req) + + if err != nil { + return h.HandleUnauthorized(resp, err), nil + } + + token, err := h.Validate(uToken) + + if err != nil { + return h.HandleUnauthorized(resp, err), nil + } + + req, err = h.InjectContext(req, token) + + if err != nil { + return h.HandleUnauthorized(resp, err), nil + } + } + + return h.Next.ServeHTTP(resp, req) +} + +func (h Auth) InjectContext(req *http.Request, token *jwt.Token) (*http.Request, error) { + + payLoad, ok := token.Claims.(jwt.MapClaims) + + if !ok { + return nil, errors.New("invalid payload") + } + + for header := range req.Header { + if strings.HasPrefix(header, "X-Token-") { + req.Header.Del(header) + } + } + + username, ok := payLoad["username"].(string) + + if ok && username != "" { + req.Header.Set("X-Token-Username", username) + } + + uid := payLoad["uid"] + + if uid != nil { + switch uid.(type) { + case int: + req.Header.Set("X-Token-UID", strconv.Itoa(uid.(int))) + break + case string: + req.Header.Set("X-Token-UID", uid.(string)) + break + } + } + + groups, ok := payLoad["groups"].([]string) + + if ok && len(groups) > 0 { + req.Header.Set("X-Token-Groups", strings.Join(groups, ",")) + } + + return req, nil +} + +func (h Auth) Validate(uToken string) (*jwt.Token, error) { + + if len(uToken) == 0 { + return nil, fmt.Errorf("token length is zero") + } + + token, err := jwt.Parse(uToken, h.ProvideKey) + + if err != nil { + return nil, err + } + + return token, nil +} + +func (h Auth) HandleUnauthorized(w http.ResponseWriter, err error) int { + message := fmt.Sprintf("Unauthorized,%v", err) + w.Header().Add("WWW-Authenticate", message) + return http.StatusUnauthorized +} + +func (h Auth) ExtractToken(r *http.Request) (string, error) { + + jwtHeader := strings.Split(r.Header.Get("Authorization"), " ") + + if jwtHeader[0] == "Bearer" && len(jwtHeader) == 2 { + return jwtHeader[1], nil + } + + jwtCookie, err := r.Cookie("token") + + if err == nil { + return jwtCookie.Value, nil + } + + jwtQuery := r.URL.Query().Get("token") + + if jwtQuery != "" { + return jwtQuery, nil + } + + return "", fmt.Errorf("no token found") +} + +func (h Auth) ProvideKey(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); ok { + return h.Rule.Secret, nil + } else { + return nil, fmt.Errorf("expect token signed with HMAC but got %v", token.Header["alg"]) + } +} diff --git a/pkg/apigateway/caddy-plugin/authenticate/auto_load.go b/pkg/apigateway/caddy-plugin/authenticate/auto_load.go new file mode 100644 index 000000000..498fc2de1 --- /dev/null +++ b/pkg/apigateway/caddy-plugin/authenticate/auto_load.go @@ -0,0 +1,110 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package authenticate + +import ( + "fmt" + "strings" + + "github.com/mholt/caddy" + "github.com/mholt/caddy/caddyhttp/httpserver" +) + +func init() { + caddy.RegisterPlugin("authenticate", caddy.Plugin{ + ServerType: "http", + Action: Setup, + }) +} + +func Setup(c *caddy.Controller) error { + + rule, err := parse(c) + + if err != nil { + return err + } + + c.OnStartup(func() error { + fmt.Println("Authenticate middleware is initiated") + return nil + }) + + httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { + return &Auth{Next: next, Rule: rule} + }) + + return nil +} +func parse(c *caddy.Controller) (Rule, error) { + + rule := Rule{ExceptedPath: make([]string, 0)} + + if c.Next() { + args := c.RemainingArgs() + switch len(args) { + case 0: + for c.NextBlock() { + switch c.Val() { + case "path": + if !c.NextArg() { + return rule, c.ArgErr() + } + + rule.Path = c.Val() + + if c.NextArg() { + return rule, c.ArgErr() + } + case "secret": + if !c.NextArg() { + return rule, c.ArgErr() + } + + rule.Secret = []byte(c.Val()) + + if c.NextArg() { + return rule, c.ArgErr() + } + case "except": + if !c.NextArg() { + return rule, c.ArgErr() + } + + rule.ExceptedPath = strings.Split(c.Val(), ",") + + for i := 0; i < len(rule.ExceptedPath); i++ { + rule.ExceptedPath[i] = strings.TrimSpace(rule.ExceptedPath[i]) + } + + if c.NextArg() { + return rule, c.ArgErr() + } + } + } + default: + return rule, c.ArgErr() + } + } + + if c.Next() { + return rule, c.ArgErr() + } + + return rule, nil +} diff --git a/pkg/apigateway/caddy-plugin/authentication/authentication.go b/pkg/apigateway/caddy-plugin/authentication/authentication.go new file mode 100644 index 000000000..6146c4c01 --- /dev/null +++ b/pkg/apigateway/caddy-plugin/authentication/authentication.go @@ -0,0 +1,300 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package authentication + +import ( + "errors" + "fmt" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/endpoints/request" + "net/http" + "strings" + + "github.com/mholt/caddy/caddyhttp/httpserver" + "k8s.io/api/rbac/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/util/slice" + "kubesphere.io/kubesphere/pkg/informers" + sliceutils "kubesphere.io/kubesphere/pkg/utils" +) + +type Authentication struct { + Rule Rule + Next httpserver.Handler +} + +type Rule struct { + Path string + ExceptedPath []string +} + +func (c Authentication) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { + + if httpserver.Path(r.URL.Path).Matches(c.Rule.Path) { + + for _, path := range c.Rule.ExceptedPath { + if httpserver.Path(r.URL.Path).Matches(path) { + return c.Next.ServeHTTP(w, r) + } + } + + attrs, err := getAuthorizerAttributes(r.Context()) + + if err != nil { + return http.StatusInternalServerError, err + } + + permitted, err := permissionValidate(attrs) + + if err != nil { + return http.StatusInternalServerError, err + } + + if !permitted { + err = k8serr.NewForbidden(schema.GroupResource{Group: attrs.GetAPIGroup(), Resource: attrs.GetResource()}, attrs.GetName(), fmt.Errorf("permission undefined")) + return handleForbidden(w, err), nil + } + } + + return c.Next.ServeHTTP(w, r) + +} + +func handleForbidden(w http.ResponseWriter, err error) int { + message := fmt.Sprintf("Forbidden,%s", err.Error()) + w.Header().Add("WWW-Authenticate", message) + return http.StatusForbidden +} + +func permissionValidate(attrs authorizer.Attributes) (bool, error) { + + permitted, err := clusterRoleValidate(attrs) + + if err != nil { + return false, err + } + + if permitted { + return true, nil + } + + if attrs.GetNamespace() != "" { + permitted, err = roleValidate(attrs) + + if err != nil { + return false, err + } + + if permitted { + return true, nil + } + } + + return false, nil +} + +func roleValidate(attrs authorizer.Attributes) (bool, error) { + roleBindingLister := informers.SharedInformerFactory().Rbac().V1().RoleBindings().Lister() + roleLister := informers.SharedInformerFactory().Rbac().V1().Roles().Lister() + roleBindings, err := roleBindingLister.RoleBindings(attrs.GetNamespace()).List(labels.Everything()) + + if err != nil { + return false, err + } + + fullSource := attrs.GetResource() + + if attrs.GetSubresource() != "" { + fullSource = fullSource + "/" + attrs.GetSubresource() + } + + for _, roleBinding := range roleBindings { + + for _, subj := range roleBinding.Subjects { + + if (subj.Kind == v1.UserKind && subj.Name == attrs.GetUser().GetName()) || + (subj.Kind == v1.GroupKind && slice.ContainsString(attrs.GetUser().GetGroups(), subj.Name, nil)) { + role, err := roleLister.Roles(attrs.GetNamespace()).Get(roleBinding.RoleRef.Name) + + if err != nil { + return false, err + } + + for _, rule := range role.Rules { + if ruleMatchesRequest(rule, attrs.GetAPIGroup(), "", attrs.GetResource(), attrs.GetSubresource(), attrs.GetName(), attrs.GetVerb()) { + return true, nil + } + } + } + } + } + + return false, nil +} + +func clusterRoleValidate(attrs authorizer.Attributes) (bool, error) { + clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() + clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything()) + clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() + if err != nil { + return false, err + } + + for _, clusterRoleBinding := range clusterRoleBindings { + + for _, subject := range clusterRoleBinding.Subjects { + + if (subject.Kind == v1.UserKind && subject.Name == attrs.GetUser().GetName()) || + (subject.Kind == v1.GroupKind && sliceutils.HasString(attrs.GetUser().GetGroups(), subject.Name)) { + + clusterRole, err := clusterRoleLister.Get(clusterRoleBinding.RoleRef.Name) + + if err != nil { + return false, err + } + + for _, rule := range clusterRole.Rules { + if attrs.IsResourceRequest() { + if ruleMatchesRequest(rule, attrs.GetAPIGroup(), "", attrs.GetResource(), attrs.GetSubresource(), attrs.GetName(), attrs.GetVerb()) { + return true, nil + } + } else { + if ruleMatchesRequest(rule, "", attrs.GetPath(), "", "", "", attrs.GetVerb()) { + return true, nil + } + } + + } + + } + } + } + + return false, nil +} + +func ruleMatchesResources(rule v1.PolicyRule, apiGroup string, resource string, subresource string, resourceName string) bool { + + if resource == "" { + return false + } + + if !sliceutils.HasString(rule.APIGroups, apiGroup) && !sliceutils.HasString(rule.APIGroups, v1.ResourceAll) { + return false + } + + if len(rule.ResourceNames) > 0 && !sliceutils.HasString(rule.ResourceNames, resourceName) { + return false + } + + combinedResource := resource + + if subresource != "" { + combinedResource = combinedResource + "/" + subresource + } + + for _, res := range rule.Resources { + + // match "*" + if res == v1.ResourceAll || res == combinedResource { + return true + } + + // match "*/subresource" + if len(subresource) > 0 && strings.HasPrefix(res, "*/") && subresource == strings.TrimLeft(res, "*/") { + return true + } + // match "resource/*" + if strings.HasSuffix(res, "/*") && resource == strings.TrimRight(res, "/*") { + return true + } + } + + return false +} + +func ruleMatchesRequest(rule v1.PolicyRule, apiGroup string, nonResourceURL string, resource string, subresource string, resourceName string, verb string) bool { + + if !sliceutils.HasString(rule.Verbs, verb) && !sliceutils.HasString(rule.Verbs, v1.VerbAll) { + return false + } + + if nonResourceURL == "" { + return ruleMatchesResources(rule, apiGroup, resource, subresource, resourceName) + } else { + return ruleMatchesNonResource(rule, nonResourceURL) + } +} + +func ruleMatchesNonResource(rule v1.PolicyRule, nonResourceURL string) bool { + + if nonResourceURL == "" { + return false + } + + for _, spec := range rule.NonResourceURLs { + if pathMatches(nonResourceURL, spec) { + return true + } + } + + return false +} + +func pathMatches(path, spec string) bool { + if spec == "*" { + return true + } + if spec == path { + return true + } + if strings.HasSuffix(spec, "*") && strings.HasPrefix(path, strings.TrimRight(spec, "*")) { + return true + } + return false +} + +func getAuthorizerAttributes(ctx request.Context) (authorizer.Attributes, error) { + attribs := authorizer.AttributesRecord{} + + user, ok := request.UserFrom(ctx) + if ok { + attribs.User = user + } + + requestInfo, found := request.RequestInfoFrom(ctx) + if !found { + return nil, errors.New("no RequestInfo found in the context") + } + + // Start with common attributes that apply to resource and non-resource requests + attribs.ResourceRequest = requestInfo.IsResourceRequest + attribs.Path = requestInfo.Path + attribs.Verb = requestInfo.Verb + + attribs.APIGroup = requestInfo.APIGroup + attribs.APIVersion = requestInfo.APIVersion + attribs.Resource = requestInfo.Resource + attribs.Subresource = requestInfo.Subresource + attribs.Namespace = requestInfo.Namespace + attribs.Name = requestInfo.Name + + return &attribs, nil +} diff --git a/pkg/apigateway/caddy-plugin/authentication/auto_load.go b/pkg/apigateway/caddy-plugin/authentication/auto_load.go new file mode 100644 index 000000000..48ef31766 --- /dev/null +++ b/pkg/apigateway/caddy-plugin/authentication/auto_load.go @@ -0,0 +1,119 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package authentication + +import ( + "fmt" + "strings" + + "github.com/mholt/caddy" + "github.com/mholt/caddy/caddyhttp/httpserver" + + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/signals" +) + +func init() { + caddy.RegisterPlugin("authentication", caddy.Plugin{ + ServerType: "http", + Action: Setup, + }) +} + +// Setup is called by Caddy to parse the config block +func Setup(c *caddy.Controller) error { + + rule, err := parse(c) + + if err != nil { + return err + } + + if err != nil { + return err + } + + c.OnStartup(func() error { + stopChan := signals.SetupSignalHandler() + informers.SharedInformerFactory().Start(stopChan) + informers.SharedInformerFactory().WaitForCacheSync(stopChan) + fmt.Println("Authentication middleware is initiated") + return nil + }) + + httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { + return &Authentication{Next: next, Rule: rule} + }) + return nil +} + +func parse(c *caddy.Controller) (Rule, error) { + + rule := Rule{ExceptedPath: make([]string, 0)} + + if c.Next() { + args := c.RemainingArgs() + switch len(args) { + case 0: + for c.NextBlock() { + switch c.Val() { + case "path": + if !c.NextArg() { + return rule, c.ArgErr() + } + + rule.Path = c.Val() + + if c.NextArg() { + return rule, c.ArgErr() + } + + break + case "except": + if !c.NextArg() { + return rule, c.ArgErr() + } + + rule.ExceptedPath = strings.Split(c.Val(), ",") + + for i := 0; i < len(rule.ExceptedPath); i++ { + rule.ExceptedPath[i] = strings.TrimSpace(rule.ExceptedPath[i]) + } + + if c.NextArg() { + return rule, c.ArgErr() + } + break + } + } + case 1: + rule.Path = args[0] + if c.NextBlock() { + return rule, c.ArgErr() + } + default: + return rule, c.ArgErr() + } + } + + if c.Next() { + return rule, c.ArgErr() + } + + return rule, nil +} diff --git a/pkg/apigateway/caddy-plugin/swagger/auto_load.go b/pkg/apigateway/caddy-plugin/swagger/auto_load.go new file mode 100644 index 000000000..2991e550d --- /dev/null +++ b/pkg/apigateway/caddy-plugin/swagger/auto_load.go @@ -0,0 +1,100 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package authenticate + +import ( + "fmt" + "net/http" + + "github.com/mholt/caddy" + "github.com/mholt/caddy/caddyhttp/httpserver" +) + +func init() { + caddy.RegisterPlugin("swagger", caddy.Plugin{ + ServerType: "http", + Action: Setup, + }) +} + +func Setup(c *caddy.Controller) error { + + handler, err := parse(c) + + if err != nil { + return err + } + + c.OnStartup(func() error { + fmt.Println("Swagger middleware is initiated") + return nil + }) + + httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { + return &Swagger{Next: next, Handler: handler} + }) + + return nil +} +func parse(c *caddy.Controller) (Handler, error) { + + handler := Handler{URL: "/swagger-ui", FilePath: "/var/static/swagger-ui"} + + if c.Next() { + args := c.RemainingArgs() + switch len(args) { + case 0: + for c.NextBlock() { + switch c.Val() { + case "url": + if !c.NextArg() { + return handler, c.ArgErr() + } + + handler.URL = c.Val() + + if c.NextArg() { + return handler, c.ArgErr() + } + case "filePath": + if !c.NextArg() { + return handler, c.ArgErr() + } + + handler.FilePath = c.Val() + + if c.NextArg() { + return handler, c.ArgErr() + } + default: + return handler, c.ArgErr() + } + } + default: + return handler, c.ArgErr() + } + } + + if c.Next() { + return handler, c.ArgErr() + } + + handler.Handler = http.StripPrefix(handler.URL, http.FileServer(http.Dir(handler.FilePath))) + + return handler, nil +} diff --git a/pkg/apigateway/caddy-plugin/swagger/swagger.go b/pkg/apigateway/caddy-plugin/swagger/swagger.go new file mode 100644 index 000000000..6c847a447 --- /dev/null +++ b/pkg/apigateway/caddy-plugin/swagger/swagger.go @@ -0,0 +1,45 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package authenticate + +import ( + "net/http" + + "github.com/mholt/caddy/caddyhttp/httpserver" +) + +type Swagger struct { + Handler Handler + Next httpserver.Handler +} + +type Handler struct { + URL string + FilePath string + Handler http.Handler +} + +func (h Swagger) ServeHTTP(resp http.ResponseWriter, req *http.Request) (int, error) { + + if httpserver.Path(req.URL.Path).Matches(h.Handler.URL) { + h.Handler.Handler.ServeHTTP(resp, req) + return http.StatusOK, nil + } + + return h.Next.ServeHTTP(resp, req) +} diff --git a/pkg/monitoring/monitoring.go b/pkg/apis/iam/install/install.go similarity index 66% rename from pkg/monitoring/monitoring.go rename to pkg/apis/iam/install/install.go index 01c698a91..984a97523 100644 --- a/pkg/monitoring/monitoring.go +++ b/pkg/apis/iam/install/install.go @@ -15,17 +15,19 @@ limitations under the License. */ -package monitoring +package install import ( "github.com/emicklei/go-restful" - - "kubesphere.io/kubesphere/pkg/monitoring/v1alpha2" + urlruntime "k8s.io/apimachinery/pkg/util/runtime" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" ) -const apiGroup = "monitoring.kubesphere.io" - -func AddToContainer(container *restful.Container) error { - container.Add(v1alpha2.WebService(apiGroup)) - return nil +func init() { + Install(runtime.Container) +} + +func Install(container *restful.Container) { + urlruntime.Must(iamv1alpha2.AddToContainer(container)) } diff --git a/pkg/apis/iam/v1alpha2/register.go b/pkg/apis/iam/v1alpha2/register.go new file mode 100644 index 000000000..0e3f485dc --- /dev/null +++ b/pkg/apis/iam/v1alpha2/register.go @@ -0,0 +1,214 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package v1alpha2 + +import ( + "github.com/emicklei/go-restful" + "github.com/emicklei/go-restful-openapi" + "k8s.io/apimachinery/pkg/runtime/schema" + "kubesphere.io/kubesphere/pkg/apiserver/iam" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models" +) + +const GroupName = "iam.kubesphere.io" + +var GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +var ( + WebServiceBuilder = runtime.NewContainerBuilder(addWebService) + AddToContainer = WebServiceBuilder.AddToContainer +) + +func addWebService(c *restful.Container) error { + tags := []string{"IAM"} + ws := runtime.NewWebService(GroupVersion) + + ws.Route(ws.POST("/authenticate"). + To(iam.TokenReviewHandler). + Doc("Token review"). + Reads(iam.TokenReview{}). + Writes(iam.TokenReview{}). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.POST("/login"). + To(iam.LoginHandler). + Doc("User login"). + Reads(iam.LoginRequest{}). + Writes(models.Token{}). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/users/{name}"). + To(iam.UserDetail). + Doc("User detail"). + Writes(models.User{}). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.POST("/users"). + To(iam.CreateUser). + Reads(models.User{}). + Writes(errors.Error{}). + Doc("Create user"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.DELETE("/users/{name}"). + To(iam.DeleteUser). + Doc("Delete user"). + Writes(errors.Error{}). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.PUT("/users/{name}"). + To(iam.UpdateUser). + Reads(models.User{}). + Writes(errors.Error{}). + Doc("Update user"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/users/{name}/log"). + To(iam.UserLoginLog). + Doc("User login log"). + Writes([]map[string]string{}). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/users"). + To(iam.UserList). + Doc("User list"). + Writes(models.PageableResponse{}). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/groups"). + To(iam.RootGroupList). + Writes([]models.Group{}). + Doc("User group list"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/groups/{path}"). + To(iam.GroupDetail). + Doc("User group detail"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/groups/{path}/users"). + To(iam.GroupUsers). + Doc("Group user list"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.POST("/groups"). + To(iam.CreateGroup). + Reads(models.Group{}). + Doc("Create user group"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.DELETE("/groups/{path}"). + To(iam.DeleteGroup). + Doc("Delete user group"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.PUT("/groups/{path}"). + To(iam.UpdateGroup). + Doc("Update user group"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/users/{username}/roles"). + To(iam.UserRoles). + Doc("Get user role list"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/namespaces/{namespace}/roles/{role}/users"). + To(iam.RoleUsers). + Doc("Get user list by role"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/namespaces/{namespace}/roles/{role}/rules"). + To(iam.RoleRules). + Doc("Get role detail"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/namespaces/{namespace}/users"). + To(iam.NamespaceUsers). + Doc("Get user list by namespace"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/clusterroles/{clusterrole}/users"). + To(iam.ClusterRoleUsers). + Doc("Get user list by cluster role"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/clusterroles/{clusterrole}/rules"). + To(iam.ClusterRoleRules). + Doc("Get cluster role detail"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/rulesmapping/clusterroles"). + To(iam.ClusterRulesMappingHandler). + Doc("Get cluster role policy rules mapping"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/rulesmapping/roles"). + To(iam.RulesMappingHandler). + Doc("Get role policy rules mapping"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/workspaces/{workspace}/rules"). + To(iam.WorkspaceRulesHandler). + Doc("Get workspace level policy rules"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{workspace}/members"). + To(iam.WorkspaceMemberList). + Doc("Get workspace member list"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/namespaces/{namespace}/rules"). + To(iam.NamespacesRulesHandler). + Doc("Get namespace level policy rules"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/devops/{devops}/rules"). + To(iam.DevopsRulesHandler). + Doc("Get devops project level policy rules"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + tags = []string{"Workspace"} + + ws.Route(ws.GET("/workspaces"). + To(iam.UserWorkspaceListHandler). + Doc("Get workspace list"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes([]models.Workspace{})) + ws.Route(ws.POST("/workspaces"). + To(iam.WorkspaceCreateHandler). + Doc("Create workspace"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes(models.Workspace{})) + ws.Route(ws.DELETE("/workspaces/{name}"). + To(iam.DeleteWorkspaceHandler). + Doc("Delete workspace"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes(errors.Error{})) + ws.Route(ws.GET("/workspaces/{name}"). + To(iam.WorkspaceDetailHandler). + Doc("Get workspace detail"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes(models.Workspace{})) + ws.Route(ws.PUT("/workspaces/{name}"). + To(iam.WorkspaceEditHandler). + Doc("Update workspace"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes(models.Workspace{})) + + ws.Route(ws.GET("/workspaces/{name}/members/{member}"). + To(iam.WorkspaceMemberDetail). + Doc("Get workspace member detail"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{name}/roles"). + To(iam.WorkspaceRoles). + Doc("Get workspace roles"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.POST("/workspaces/{name}/members"). + To(iam.WorkspaceMemberInvite). + Doc("Add user to workspace"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.DELETE("/workspaces/{name}/members"). + To(iam.WorkspaceMemberRemove). + Doc("Delete user from workspace"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + c.Add(ws) + return nil +} diff --git a/pkg/apis/metrics/group.go b/pkg/apis/metrics/group.go deleted file mode 100644 index b2c090fbb..000000000 --- a/pkg/apis/metrics/group.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package metrics diff --git a/pkg/apis/metrics/v1alpha2/register.go b/pkg/apis/metrics/v1alpha2/register.go index a0e2a6150..53e628797 100644 --- a/pkg/apis/metrics/v1alpha2/register.go +++ b/pkg/apis/metrics/v1alpha2/register.go @@ -19,6 +19,7 @@ package v1alpha2 import ( "github.com/emicklei/go-restful" + "github.com/emicklei/go-restful-openapi" "k8s.io/apimachinery/pkg/runtime/schema" "kubesphere.io/kubesphere/pkg/apiserver/metrics" "kubesphere.io/kubesphere/pkg/apiserver/runtime" @@ -36,8 +37,20 @@ var ( func addWebService(c *restful.Container) error { webservice := runtime.NewWebService(GroupVersion) - webservice.Route(webservice.GET("/storageclasses/{storageclass}").To(metrics.GetScMetrics)) - webservice.Route(webservice.GET("/metrics/storageclass").To(metrics.GetScMetricsList)) + tags := []string{"metrics"} + + webservice.Route(webservice.GET("/storageclasses/{storageclass}"). + To(metrics.GetScMetrics). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc(""). + Param(webservice.PathParameter("storageclass", "storageclass's name")). + Writes(metrics.ScMetricsItem{})) + + webservice.Route(webservice.GET("/storageclasses"). + To(metrics.GetScMetricsList). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc(""). + Writes([]metrics.ScMetricsItem{})) c.Add(webservice) diff --git a/pkg/monitoring/v1alpha2/webservice.go b/pkg/apis/monitoring/install/install.go similarity index 63% rename from pkg/monitoring/v1alpha2/webservice.go rename to pkg/apis/monitoring/install/install.go index 07c185d0e..4d9ce94ad 100644 --- a/pkg/monitoring/v1alpha2/webservice.go +++ b/pkg/apis/monitoring/install/install.go @@ -15,22 +15,19 @@ limitations under the License. */ -package v1alpha2 +package install import ( "github.com/emicklei/go-restful" + urlruntime "k8s.io/apimachinery/pkg/util/runtime" + monitoringv1alpha2 "kubesphere.io/kubesphere/pkg/apis/monitoring/v1alpha2" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" ) -const apiVersion = "v1alpha2" - -var addToWebServiceFuncs []func(ws *restful.WebService) - -func WebService(apiGroup string) *restful.WebService { - ws := new(restful.WebService) - ws.Path("/apis/" + apiGroup + "/" + apiVersion). - Produces(restful.MIME_JSON).Consumes(restful.MIME_JSON) - for _, f := range addToWebServiceFuncs { - f(ws) - } - return ws +func init() { + Install(runtime.Container) +} + +func Install(container *restful.Container) { + urlruntime.Must(monitoringv1alpha2.AddToContainer(container)) } diff --git a/pkg/apis/monitoring/v1alpha2/register.go b/pkg/apis/monitoring/v1alpha2/register.go new file mode 100644 index 000000000..ae0ca055f --- /dev/null +++ b/pkg/apis/monitoring/v1alpha2/register.go @@ -0,0 +1,207 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package v1alpha2 + +import ( + "github.com/emicklei/go-restful" + "github.com/emicklei/go-restful-openapi" + "k8s.io/apimachinery/pkg/runtime/schema" + "kubesphere.io/kubesphere/pkg/apiserver/monitoring" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" +) + +const GroupName = "monitoring.kubesphere.io" + +var GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +var ( + WebServiceBuilder = runtime.NewContainerBuilder(addWebService) + AddToContainer = WebServiceBuilder.AddToContainer +) + +func addWebService(c *restful.Container) error { + ws := runtime.NewWebService(GroupVersion) + + tags := []string{"Monitoring"} + + ws.Route(ws.GET("/clusters").To(monitoring.MonitorCluster). + Doc("monitor cluster level metrics"). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("cluster_cpu_utilisation")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/nodes").To(monitoring.MonitorNode). + Doc("monitor nodes level metrics"). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("node_cpu_utilisation")). + Param(ws.QueryParameter("nodes_filter", "node re2 expression filter").Required(false).DefaultValue("")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("4")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/nodes/{node}").To(monitoring.MonitorNode). + Doc("monitor specific node level metrics"). + Param(ws.PathParameter("node", "specific node").Required(true).DefaultValue("")). + Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").Required(true).DefaultValue("node_cpu_utilisation")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/namespaces").To(monitoring.MonitorNamespace). + Doc("monitor namespaces level metrics"). + Param(ws.QueryParameter("namespaces_filter", "namespaces re2 expression filter").Required(false).DefaultValue("")). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("namespace_memory_utilisation")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("4")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/namespaces/{namespace}").To(monitoring.MonitorNamespace). + Doc("monitor specific namespace level metrics"). + Param(ws.PathParameter("namespace", "specific namespace").Required(true).DefaultValue("monitoring")). + Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").Required(true).DefaultValue("namespace_memory_utilisation")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/namespaces/{namespace}/pods").To(monitoring.MonitorPod). + Doc("monitor pods level metrics"). + Param(ws.PathParameter("namespace", "specific namespace").Required(true).DefaultValue("monitoring")). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("pod_memory_utilisation_wo_cache")). + Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").Required(false).DefaultValue("")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("4")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}").To(monitoring.MonitorPod). + Doc("monitor specific pod level metrics"). + Param(ws.PathParameter("namespace", "specific namespace").Required(true).DefaultValue("monitoring")). + Param(ws.PathParameter("pod", "specific pod").Required(true).DefaultValue("")). + Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").Required(true).DefaultValue("pod_memory_utilisation_wo_cache")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/nodes/{node}/pods").To(monitoring.MonitorPod). + Doc("monitor pods level metrics by nodeid"). + Param(ws.PathParameter("node", "specific node").Required(true).DefaultValue("i-k89a62il")). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("pod_memory_utilisation_wo_cache")). + Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").Required(false).DefaultValue("openpitrix.*")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("4")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/nodes/{node}/pods/{pod}").To(monitoring.MonitorPod). + Doc("monitor specific pod level metrics by nodeid"). + Param(ws.PathParameter("node", "specific node").Required(true).DefaultValue("i-k89a62il")). + Param(ws.PathParameter("pod", "specific pod").Required(true).DefaultValue("")). + Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").Required(true).DefaultValue("pod_memory_utilisation_wo_cache")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/nodes/{node}/pods/{pod}/containers").To(monitoring.MonitorContainer). + Doc("monitor specific pod level metrics by nodeid"). + Param(ws.PathParameter("node", "specific node").Required(true)). + Param(ws.PathParameter("pod", "specific pod").Required(true)). + Param(ws.QueryParameter("containers_filter", "container re2 expression filter").Required(false).DefaultValue("")). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...").Required(false)). + Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").Required(true).DefaultValue("pod_memory_utilisation_wo_cache")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("4")). + Param(ws.QueryParameter("type", "rank, statistic").Required(false).DefaultValue("rank")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}/containers").To(monitoring.MonitorContainer). + Doc("monitor containers level metrics"). + Param(ws.PathParameter("namespace", "specific namespace").Required(true).DefaultValue("monitoring")). + Param(ws.PathParameter("pod", "specific pod").Required(true).DefaultValue("")). + Param(ws.QueryParameter("containers_filter", "container re2 expression filter").Required(false).DefaultValue("")). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...").Required(false)). + Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").Required(true).DefaultValue("container_memory_utilisation_wo_cache")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("4")). + Param(ws.QueryParameter("type", "rank, statistic").Required(false).DefaultValue("rank")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}/containers/{container}").To(monitoring.MonitorContainer). + Doc("monitor specific container level metrics"). + Param(ws.PathParameter("namespace", "specific namespace").Required(true).DefaultValue("monitoring")). + Param(ws.PathParameter("pod", "specific pod").Required(true).DefaultValue("")). + Param(ws.PathParameter("container", "specific container").Required(true).DefaultValue("")). + Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").Required(true).DefaultValue("container_memory_utilisation_wo_cache")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/namespaces/{namespace}/workloads/{workload_kind}").To(monitoring.MonitorWorkload). + Doc("monitor specific workload level metrics"). + Param(ws.PathParameter("namespace", "namespace").Required(true).DefaultValue("kube-system")). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...").Required(false)). + Param(ws.PathParameter("workload_kind", "workload kind").Required(false).DefaultValue("daemonset")). + Param(ws.QueryParameter("workload_name", "workload name").Required(true).DefaultValue("")). + Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").Required(false).DefaultValue("openpitrix.*")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "max metric items in a page").Required(false).DefaultValue("4")). + Param(ws.QueryParameter("type", "rank, statistic").Required(false).DefaultValue("rank")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/namespaces/{namespace}/workloads").To(monitoring.MonitorWorkload). + Doc("monitor all workload level metrics"). + Param(ws.PathParameter("namespace", "namespace").Required(true).DefaultValue("kube-system")). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...").Required(false)). + Param(ws.QueryParameter("workloads_filter", "pod re2 expression filter").Required(false).DefaultValue("")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("4")). + Param(ws.QueryParameter("type", "rank, statistic").Required(false).DefaultValue("rank")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + // list all namespace in this workspace by selected metrics + ws.Route(ws.GET("/workspaces/{workspace}").To(monitoring.MonitorOneWorkspace). + Doc("monitor workspaces level metrics"). + Param(ws.PathParameter("workspace", "workspace name").Required(true)). + Param(ws.QueryParameter("namespaces_filter", "namespaces filter").Required(false).DefaultValue("k.*")). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("namespace_memory_utilisation_wo_cache")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("4")). + Param(ws.QueryParameter("type", "rank, statistic").Required(false).DefaultValue("rank")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/workspaces").To(monitoring.MonitorAllWorkspaces). + Doc("monitor workspaces level metrics"). + Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("workspace_memory_utilisation")). + Param(ws.QueryParameter("workspaces_filter", "workspaces re2 expression filter").Required(false).DefaultValue(".*")). + Param(ws.QueryParameter("sort_metric", "sort metric").Required(false)). + Param(ws.QueryParameter("sort_type", "ascending descending order").Required(false)). + Param(ws.QueryParameter("page", "page number").Required(false).DefaultValue("1")). + Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").Required(false).DefaultValue("4")). + Param(ws.QueryParameter("type", "rank, statistic").Required(false).DefaultValue("rank")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/components").To(monitoring.MonitorComponentStatus). + Doc("monitor k8s components status"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + c.Add(ws) + return nil +} diff --git a/pkg/apis/operations/group.go b/pkg/apis/operations/group.go deleted file mode 100644 index a01cd668a..000000000 --- a/pkg/apis/operations/group.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package operations diff --git a/pkg/apis/operations/v1alpha2/register.go b/pkg/apis/operations/v1alpha2/register.go index deded7792..a42ddca9c 100644 --- a/pkg/apis/operations/v1alpha2/register.go +++ b/pkg/apis/operations/v1alpha2/register.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "kubesphere.io/kubesphere/pkg/apiserver/operations" "kubesphere.io/kubesphere/pkg/apiserver/runtime" + "kubesphere.io/kubesphere/pkg/errors" ) const GroupName = "operations.kubesphere.io" @@ -36,20 +37,24 @@ var ( func addWebService(c *restful.Container) error { + tags := []string{"Operations"} + webservice := runtime.NewWebService(GroupVersion) - webservice.Route(webservice.POST("/nodes/{node}/drainage").To(operations.DrainNode)) + webservice.Route(webservice.POST("/nodes/{node}/drainage"). + To(operations.DrainNode). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc(""). + Param(webservice.PathParameter("node", "node name")). + Writes(errors.Error{})) webservice.Route(webservice.POST("/namespaces/{namespace}/jobs/{job}"). To(operations.RerunJob). - Metadata(restfulspec.KeyOpenAPITags, []string{"jobs"}). + Metadata(restfulspec.KeyOpenAPITags, tags). Doc("Handle job operation"). - Param(webservice.PathParameter("job", "job name"). - DataType("string")). - Param(webservice.PathParameter("namespace", "job's namespace"). - DataType("string")). - Param(webservice.QueryParameter("a", "action"). - DataType("string")). + Param(webservice.PathParameter("job", "job name")). + Param(webservice.PathParameter("namespace", "job's namespace")). + Param(webservice.QueryParameter("a", "action")). Writes("")) c.Add(webservice) diff --git a/pkg/apis/resources/group.go b/pkg/apis/resources/group.go deleted file mode 100644 index b2c090fbb..000000000 --- a/pkg/apis/resources/group.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package metrics diff --git a/pkg/apis/resources/v1alpha2/register.go b/pkg/apis/resources/v1alpha2/register.go index c7ca9ff59..594247342 100644 --- a/pkg/apis/resources/v1alpha2/register.go +++ b/pkg/apis/resources/v1alpha2/register.go @@ -20,9 +20,20 @@ package v1alpha2 import ( "github.com/emicklei/go-restful" "github.com/emicklei/go-restful-openapi" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "kubesphere.io/kubesphere/pkg/apiserver/components" + "kubesphere.io/kubesphere/pkg/apiserver/quotas" + "kubesphere.io/kubesphere/pkg/apiserver/registries" "kubesphere.io/kubesphere/pkg/apiserver/resources" + "kubesphere.io/kubesphere/pkg/apiserver/revisions" + "kubesphere.io/kubesphere/pkg/apiserver/routers" "kubesphere.io/kubesphere/pkg/apiserver/runtime" + "kubesphere.io/kubesphere/pkg/apiserver/workloadstatuses" + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/params" ) const GroupName = "resources.kubesphere.io" @@ -38,18 +49,182 @@ func addWebService(c *restful.Container) error { webservice := runtime.NewWebService(GroupVersion) - webservice.Route(webservice.GET("/namespaces/{namespace}/{resources}").To(resources.NamespaceResourceHandler)) + tags := []string{"Namespace resources"} - webservice.Route(webservice.GET("/{resources}").To(resources.ClusterResourceHandler)) + webservice.Route(webservice.GET("/namespaces/{namespace}/{resources}"). + To(resources.NamespaceResourceHandler). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Namespace level resource query"). + Param(webservice.PathParameter("namespace", "which namespace")). + Param(webservice.PathParameter("resources", "namespace level resource type")). + Param(webservice.QueryParameter(params.ConditionsParam, "query conditions"). + Required(false). + DataFormat("key=%s,key~%s")). + Param(webservice.QueryParameter(params.PagingParam, "page"). + Required(false). + DataFormat("limit=%d,page=%d"). + DefaultValue("limit=10,page=1")). + Writes(models.PageableResponse{})) - webservice.Route(webservice.GET("/storageclasses/{storageclass}/persistentvolumeclaims").To(resources.GetPvcListBySc)) - webservice.Route(webservice.GET("/namespaces/{namespace}/persistentvolumeclaims/{pvc}/pods").To(resources.GetPodListByPvc)) + tags = []string{"Cluster resources"} - tags := []string{"users"} - webservice.Route(webservice.GET("/users/{username}/kubectl").Doc("get user's kubectl pod").Param(webservice.PathParameter("username", - "username").DataType("string")).Metadata(restfulspec.KeyOpenAPITags, tags).To(resources.GetKubectl)) - webservice.Route(webservice.GET("/users/{username}/kubeconfig").Doc("get users' kubeconfig").Param(webservice.PathParameter("username", - "username").DataType("string")).Metadata(restfulspec.KeyOpenAPITags, tags).To(resources.GetKubeconfig)) + webservice.Route(webservice.GET("/{resources}"). + To(resources.ClusterResourceHandler). + Writes(models.PageableResponse{}). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Cluster level resource query"). + Param(webservice.PathParameter("resources", "cluster level resource type"))). + Param(webservice.QueryParameter(params.ConditionsParam, "query conditions"). + Required(false). + DataFormat("key=value,key~value"). + DefaultValue("")). + Param(webservice.QueryParameter(params.PagingParam, "page"). + Required(false). + DataFormat("limit=%d,page=%d"). + DefaultValue("limit=10,page=1")) + + webservice.Route(webservice.GET("/storageclasses/{storageclass}/persistentvolumeclaims"). + To(resources.GetPvcListBySc). + Doc("get user's kubectl pod"). + Param(webservice.PathParameter("username", "username")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + webservice.Route(webservice.GET("/namespaces/{namespace}/persistentvolumeclaims/{pvc}/pods"). + To(resources.GetPodListByPvc)) + + tags = []string{"User resources"} + + webservice.Route(webservice.GET("/users/{username}/kubectl"). + To(resources.GetKubectl). + Doc("get user's kubectl pod"). + Param(webservice.PathParameter("username", "username")). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes(models.PodInfo{})) + + webservice.Route(webservice.GET("/users/{username}/kubeconfig"). + To(resources.GetKubeconfig). + Doc("get users' kubeconfig"). + Param(webservice.PathParameter("username", "username")). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + tags = []string{"Components"} + + webservice.Route(webservice.GET("/components"). + To(components.GetComponents). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc(""). + Writes(map[string]models.Component{})) + webservice.Route(webservice.GET("/components/{component}"). + To(components.GetComponentStatus). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc(""). + Param(webservice.PathParameter("component", "component name")). + Writes(models.Component{})) + webservice.Route(webservice.GET("/health"). + To(components.GetSystemHealthStatus). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc(""). + Writes(map[string]int{})) + + tags = []string{"Quotas"} + + webservice.Route(webservice.GET("/quotas"). + To(quotas.GetClusterQuotas). + Doc("get whole cluster's resource usage"). + Writes(models.ResourceQuota{}). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + webservice.Route(webservice.GET("/namespaces/{namespace}/quotas"). + Doc("get specified namespace's resource quota and usage"). + Param(webservice.PathParameter("namespace", "namespace's name")). + Writes(models.ResourceQuota{}). + Metadata(restfulspec.KeyOpenAPITags, tags). + To(quotas.GetNamespaceQuotas)) + + tags = []string{"Registries"} + + webservice.Route(webservice.POST("registries/verify"). + To(registries.RegistryVerify). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("docker registry verify"). + Writes(errors.Error{})) + + tags = []string{"Revision"} + + webservice.Route(webservice.GET("/namespaces/{namespace}/daemonsets/{daemonset}/revisions/{revision}"). + To(revisions.GetDaemonSetRevision). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Handle daemonset operation"). + Param(webservice.PathParameter("daemonset", "daemonset's name")). + Param(webservice.PathParameter("namespace", "daemonset's namespace")). + Param(webservice.PathParameter("revision", "daemonset's revision")). + Writes(appsv1.DaemonSet{})) + webservice.Route(webservice.GET("/namespaces/{namespace}/deployments/{deployment}/revisions/{revision}"). + To(revisions.GetDeployRevision). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Handle deployment operation"). + Param(webservice.PathParameter("deployment", "deployment's name")). + Param(webservice.PathParameter("namespace", "deployment's namespace")). + Param(webservice.PathParameter("revision", "deployment's revision")). + Writes(appsv1.ReplicaSet{})) + webservice.Route(webservice.GET("/namespaces/{namespace}/statefulsets/{statefulset}/revisions/{revision}"). + To(revisions.GetStatefulSetRevision). + Metadata(restfulspec.KeyOpenAPITags, tags). + Doc("Handle statefulset operation"). + Param(webservice.PathParameter("statefulset", "statefulset's name")). + Param(webservice.PathParameter("namespace", "statefulset's namespace")). + Param(webservice.PathParameter("revision", "statefulset's revision")). + Writes(appsv1.StatefulSet{})) + + tags = []string{"Router"} + + webservice.Route(webservice.GET("/routers"). + To(routers.GetAllRouters). + Doc("Get all routers"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes(corev1.Service{})) + + webservice.Route(webservice.GET("/users/{username}/routers"). + To(routers.GetAllRoutersOfUser). + Doc("Get routers for user"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Param(webservice.PathParameter("username", "")). + Writes(corev1.Service{})) + + webservice.Route(webservice.GET("/namespaces/{namespace}/router"). + To(routers.GetRouter). + Doc("Get router of a specified project"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Param(webservice.PathParameter("namespace", "name of the project"))) + + webservice.Route(webservice.DELETE("/namespaces/{namespace}/router"). + To(routers.DeleteRouter). + Doc("Get router of a specified project"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Param(webservice.PathParameter("namespace", "name of the project"))) + + webservice.Route(webservice.POST("/namespaces/{namespace}/router"). + To(routers.CreateRouter). + Doc("Create a router for a specified project"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Param(webservice.PathParameter("namespace", "name of the project"))) + + webservice.Route(webservice.PUT("/namespaces/{namespace}/router"). + To(routers.UpdateRouter). + Doc("Update a router for a specified project"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Param(webservice.PathParameter("namespace", "name of the project"))) + + tags = []string{"WorkloadStatus"} + + webservice.Route(webservice.GET("/workloadstatuses"). + Doc("get abnormal workloads' count of whole cluster"). + Metadata(restfulspec.KeyOpenAPITags, tags). + To(workloadstatuses.GetClusterResourceStatus)) + webservice.Route(webservice.GET("/namespaces/{namespace}/workloadstatuses"). + Doc("get abnormal workloads' count of specified namespace"). + Param(webservice.PathParameter("namespace", "the name of namespace")). + Metadata(restfulspec.KeyOpenAPITags, tags). + To(workloadstatuses.GetNamespacesResourceStatus)) c.Add(webservice) diff --git a/pkg/apiserver/components/components.go b/pkg/apiserver/components/components.go index 4aefd5fab..21d20b322 100644 --- a/pkg/apiserver/components/components.go +++ b/pkg/apiserver/components/components.go @@ -22,18 +22,14 @@ import ( "github.com/emicklei/go-restful" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/components" + "net/http" ) -func V1Alpha2(ws *restful.WebService) { - ws.Route(ws.GET("/components").To(getComponents)) - ws.Route(ws.GET("/components/{component}").To(getComponentStatus)) - ws.Route(ws.GET("/health").To(getSystemHealthStatus)) -} +func GetSystemHealthStatus(request *restful.Request, response *restful.Response) { + result, err := components.GetSystemHealthStatus() -func getSystemHealthStatus(request *restful.Request, response *restful.Response) { - result, err := components.GetAllComponentsStatus() - - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } @@ -41,12 +37,13 @@ func getSystemHealthStatus(request *restful.Request, response *restful.Response) } // get a specific component status -func getComponentStatus(request *restful.Request, response *restful.Response) { +func GetComponentStatus(request *restful.Request, response *restful.Response) { component := request.PathParameter("component") result, err := components.GetComponentStatus(component) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } @@ -54,11 +51,12 @@ func getComponentStatus(request *restful.Request, response *restful.Response) { } // get all componentsHandler -func getComponents(request *restful.Request, response *restful.Response) { +func GetComponents(request *restful.Request, response *restful.Response) { result, err := components.GetAllComponentsStatus() - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } diff --git a/pkg/apiserver/hpa/hpa.go b/pkg/apiserver/hpa/hpa.go deleted file mode 100644 index 271003fee..000000000 --- a/pkg/apiserver/hpa/hpa.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ - -package hpa - -import ( - "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-openapi" - "k8s.io/api/autoscaling/v1" - - "kubesphere.io/kubesphere/pkg/errors" - "kubesphere.io/kubesphere/pkg/models/hpa" -) - -func V1Alpha2(ws *restful.WebService) { - ws.Route(ws.GET("/namespaces/{namespace}/horizontalpodautoscalers/{horizontalpodautoscaler}"). - To(getHpa). - Metadata(restfulspec.KeyOpenAPITags, []string{"hpa"}). - Doc("get horizontalpodautoscalers"). - Param(ws.PathParameter("namespace", "horizontalpodautoscalers's namespace"). - DataType("string")). - Param(ws.PathParameter("horizontalpodautoscaler", "horizontalpodautoscaler's name")). - Writes(v1.HorizontalPodAutoscaler{})) -} - -func getHpa(req *restful.Request, resp *restful.Response) { - name := req.PathParameter("horizontalpodautoscaler") - namespace := req.PathParameter("namespace") - - result, err := hpa.GetHPA(namespace, name) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(result) -} diff --git a/pkg/apiserver/iam/am.go b/pkg/apiserver/iam/am.go new file mode 100644 index 000000000..5ca967a13 --- /dev/null +++ b/pkg/apiserver/iam/am.go @@ -0,0 +1,190 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package iam + +import ( + "github.com/emicklei/go-restful" + "k8s.io/api/rbac/v1" + "net/http" + "sort" + + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models/iam" + "kubesphere.io/kubesphere/pkg/models/iam/policy" +) + +type roleList struct { + ClusterRoles []*v1.ClusterRole `json:"clusterRoles" protobuf:"bytes,2,rep,name=clusterRoles"` + Roles []*v1.Role `json:"roles" protobuf:"bytes,2,rep,name=roles"` +} + +func RoleRules(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + roleName := req.PathParameter("role") + + role, err := iam.GetRole(namespace, roleName) + + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + + rules, err := iam.GetRoleSimpleRules([]*v1.Role{role}, namespace) + + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + + resp.WriteAsJson(rules[namespace]) +} + +func RoleUsers(req *restful.Request, resp *restful.Response) { + roleName := req.PathParameter("role") + namespace := req.PathParameter("namespace") + + users, err := iam.RoleUsers(namespace, roleName) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(users) +} + +func NamespaceUsers(req *restful.Request, resp *restful.Response) { + + namespace := req.PathParameter("namespace") + + users, err := iam.NamespaceUsers(namespace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + sort.Slice(users, func(i, j int) bool { + return users[i].Username < users[j].Username + }) + + resp.WriteAsJson(users) +} + +func UserRoles(req *restful.Request, resp *restful.Response) { + + username := req.PathParameter("username") + + roles, err := iam.GetRoles(username, "") + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + clusterRoles, err := iam.GetClusterRoles(username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + roleList := roleList{} + roleList.Roles = roles + roleList.ClusterRoles = clusterRoles + + resp.WriteAsJson(roleList) +} + +func NamespaceRulesHandler(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + username := req.HeaderParameter(constants.UserNameHeader) + + clusterRoles, err := iam.GetClusterRoles(username) + + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + + roles, err := iam.GetRoles(username, namespace) + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + + for _, clusterRole := range clusterRoles { + role := new(v1.Role) + role.Name = clusterRole.Name + role.Labels = clusterRole.Labels + role.Namespace = namespace + role.Annotations = clusterRole.Annotations + role.Kind = "Role" + role.Rules = clusterRole.Rules + roles = append(roles, role) + } + + rules, err := iam.GetRoleSimpleRules(roles, namespace) + + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + + resp.WriteAsJson(rules[namespace]) +} + +func RulesMappingHandler(req *restful.Request, resp *restful.Response) { + rules := policy.RoleRuleMapping + resp.WriteAsJson(rules) +} + +func ClusterRulesMappingHandler(req *restful.Request, resp *restful.Response) { + rules := policy.ClusterRoleRuleMapping + resp.WriteAsJson(rules) +} + +func ClusterRoleRules(req *restful.Request, resp *restful.Response) { + clusterRoleName := req.PathParameter("clusterrole") + clusterRole, err := iam.GetClusterRole(clusterRoleName) + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + rules, err := iam.GetClusterRoleSimpleRules([]*v1.ClusterRole{clusterRole}) + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + + resp.WriteAsJson(rules) +} + +func ClusterRoleUsers(req *restful.Request, resp *restful.Response) { + clusterRoleName := req.PathParameter("clusterrole") + + users, err := iam.ClusterRoleUsers(clusterRoleName) + + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + + resp.WriteAsJson(users) +} diff --git a/pkg/apiserver/iam/auth.go b/pkg/apiserver/iam/auth.go new file mode 100644 index 000000000..86782847a --- /dev/null +++ b/pkg/apiserver/iam/auth.go @@ -0,0 +1,142 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package iam + +import ( + "fmt" + "github.com/dgrijalva/jwt-go" + "github.com/emicklei/go-restful" + "kubesphere.io/kubesphere/pkg/models" + "net/http" + + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models/iam" + "kubesphere.io/kubesphere/pkg/utils" + jwtutils "kubesphere.io/kubesphere/pkg/utils/jwt" +) + +type Spec struct { + Token string `json:"token"` +} + +type Status struct { + Authenticated bool `json:"authenticated"` + User map[string]interface{} `json:"user,omitempty"` +} + +type TokenReview struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Spec *Spec `json:"spec,omitempty"` + Status *Status `json:"status,omitempty"` +} + +type LoginRequest struct { + Username string `json:"username"` + Password string `json:"password"` +} + +const ( + APIVersion = "authentication.k8s.io/v1beta1" + KindTokenReview = "TokenReview" +) + +func LoginHandler(req *restful.Request, resp *restful.Response) { + var loginRequest LoginRequest + + err := req.ReadEntity(&loginRequest) + + if err != nil || loginRequest.Username == "" || loginRequest.Password == "" { + resp.WriteHeaderAndEntity(http.StatusUnauthorized, errors.Wrap(fmt.Errorf("incorrect username or password"))) + return + } + + ip := utils.RemoteIp(req.Request) + + token, err := iam.Login(loginRequest.Username, loginRequest.Password, ip) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusUnauthorized, errors.Wrap(err)) + return + } + + resp.WriteAsJson(models.Token{Token: token}) +} + +// k8s token review +func TokenReviewHandler(req *restful.Request, resp *restful.Response) { + var tokenReview TokenReview + + err := req.ReadEntity(&tokenReview) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + if tokenReview.Spec == nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("token must not be null"))) + return + } + + uToken := tokenReview.Spec.Token + + token, err := jwtutils.ValidateToken(uToken) + + if err != nil { + failed := TokenReview{APIVersion: APIVersion, + Kind: KindTokenReview, + Status: &Status{ + Authenticated: false, + }, + } + resp.WriteAsJson(failed) + return + } + + claims := token.Claims.(jwt.MapClaims) + + username := claims["username"].(string) + + conn, err := iam.NewConnection() + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + defer conn.Close() + + user, err := iam.UserDetail(username, conn) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + success := TokenReview{APIVersion: APIVersion, + Kind: KindTokenReview, + Status: &Status{ + Authenticated: true, + User: map[string]interface{}{"username": user.Username, "uid": user.Username, "groups": user.Groups}, + }, + } + + resp.WriteAsJson(success) + return +} diff --git a/pkg/apiserver/iam/groups.go b/pkg/apiserver/iam/groups.go new file mode 100644 index 000000000..65ad3c0f9 --- /dev/null +++ b/pkg/apiserver/iam/groups.go @@ -0,0 +1,253 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package iam + +import ( + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/emicklei/go-restful" + "github.com/go-ldap/ldap" + + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/models/iam" +) + +func CreateGroup(req *restful.Request, resp *restful.Response) { + //var json map[string]interface{} + + var group models.Group + + err := req.ReadEntity(&group) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + if !regexp.MustCompile("[a-z0-9]([-a-z0-9]*[a-z0-9])?").MatchString(group.Name) { + resp.WriteHeaderAndEntity(http.StatusBadRequest, fmt.Errorf("incalid group name %s", group)) + return + } + + if group.Creator == "" { + resp.WriteHeaderAndEntity(http.StatusBadRequest, fmt.Errorf("creator should not be null")) + return + } + + created, err := iam.CreateGroup(group) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(created) +} + +func DeleteGroup(req *restful.Request, resp *restful.Response) { + path := req.PathParameter("path") + + if path == "" { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("group path must not be null"))) + return + } + + err := iam.DeleteGroup(path) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) + +} + +func UpdateGroup(req *restful.Request, resp *restful.Response) { + groupPathInPath := req.PathParameter("path") + + var group models.Group + + req.ReadEntity(&group) + + if groupPathInPath != group.Path { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("the path of group (%s) does not match the path on the URL (%s)", group.Path, groupPathInPath))) + return + } + + edited, err := iam.UpdateGroup(&group) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(edited) + +} + +func GroupDetail(req *restful.Request, resp *restful.Response) { + + path := req.PathParameter("path") + + conn, err := iam.NewConnection() + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + defer conn.Close() + + group, err := iam.GroupDetail(path, conn) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(group) + +} + +func GroupUsers(req *restful.Request, resp *restful.Response) { + + path := req.PathParameter("path") + + conn, err := iam.NewConnection() + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + defer conn.Close() + + group, err := iam.GroupDetail(path, conn) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + users := make([]*models.User, 0) + + modify := false + + for i := 0; i < len(group.Members); i++ { + name := group.Members[i] + user, err := iam.UserDetail(name, conn) + + if err != nil { + if ldap.IsErrorWithCode(err, 32) { + group.Members = append(group.Members[:i], group.Members[i+1:]...) + i-- + modify = true + continue + } else { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + } + + clusterRoles, err := iam.GetClusterRoles(name) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + for i := 0; i < len(clusterRoles); i++ { + if clusterRoles[i].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { + user.ClusterRole = clusterRoles[i].Name + break + } + } + + if group.Path == group.Name { + workspaceRole := iam.GetWorkspaceRole(clusterRoles, group.Name) + user.WorkspaceRole = workspaceRole + } + + users = append(users, user) + } + + if modify { + go iam.UpdateGroup(group) + } + + resp.WriteAsJson(users) + +} + +func CountHandler(req *restful.Request, resp *restful.Response) { + count, err := iam.CountChild("") + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(map[string]int{"total_count": count}) +} + +func RootGroupList(req *restful.Request, resp *restful.Response) { + + array := req.QueryParameter("path") + + if array == "" { + groups, err := iam.ChildList("") + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(groups) + } else { + paths := strings.Split(array, ",") + + groups := make([]*models.Group, 0) + + conn, err := iam.NewConnection() + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + defer conn.Close() + + for _, v := range paths { + path := strings.TrimSpace(v) + group, err := iam.GroupDetail(path, conn) + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + groups = append(groups, group) + } + + resp.WriteAsJson(groups) + } + +} diff --git a/pkg/apiserver/iam/users.go b/pkg/apiserver/iam/users.go new file mode 100644 index 000000000..e35f5ac77 --- /dev/null +++ b/pkg/apiserver/iam/users.go @@ -0,0 +1,365 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package iam + +import ( + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/emicklei/go-restful" + "github.com/go-ldap/ldap" + + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/models/iam" +) + +const ( + emailRegex = "^[a-z0-9]+([._\\-]*[a-z0-9])*@([a-z0-9]+[-a-z0-9]*[a-z0-9]+.){1,63}[a-z0-9]+$" +) + +func CreateUser(req *restful.Request, resp *restful.Response) { + var user models.User + + err := req.ReadEntity(&user) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + if user.Username == "" { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid username"))) + return + } + + if !regexp.MustCompile(emailRegex).MatchString(user.Email) { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid email"))) + return + } + + if len(user.Password) < 6 { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid password"))) + return + } + + err = iam.CreateUser(user) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func DeleteUser(req *restful.Request, resp *restful.Response) { + username := req.PathParameter("name") + + operator := req.HeaderParameter(constants.UserNameHeader) + + if operator == username { + resp.WriteHeaderAndEntity(http.StatusForbidden, errors.Wrap(fmt.Errorf("cannot delete yourself"))) + return + } + + err := iam.DeleteUser(username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func UpdateUser(req *restful.Request, resp *restful.Response) { + + usernameInPath := req.PathParameter("name") + username := req.HeaderParameter(constants.UserNameHeader) + var user models.User + + err := req.ReadEntity(&user) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + if usernameInPath != user.Username { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("the name of user (%s) does not match the name on the URL (%s)", user.Username, usernameInPath))) + return + } + + if !regexp.MustCompile(emailRegex).MatchString(user.Email) { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid email"))) + return + } + + if user.Password != "" && len(user.Password) < 6 { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid password"))) + return + } + + if username == user.Username && user.Password != "" { + _, err = iam.Login(username, user.CurrentPassword, "") + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("incorrect current password"))) + return + } + } + + err = iam.UpdateUser(user) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func UserLoginLog(req *restful.Request, resp *restful.Response) { + username := req.PathParameter("name") + logs, err := iam.LoginLog(username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + result := make([]map[string]string, 0) + + for _, v := range logs { + item := strings.Split(v, ",") + time := item[0] + var ip string + if len(item) > 1 { + ip = item[1] + } + result = append(result, map[string]string{"login_time": time, "login_ip": ip}) + } + + resp.WriteAsJson(result) +} + +func CurrentUserDetail(req *restful.Request, resp *restful.Response) { + + username := req.HeaderParameter(constants.UserNameHeader) + + conn, err := iam.NewConnection() + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + defer conn.Close() + + user, err := iam.UserDetail(username, conn) + + if err != nil { + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { + resp.WriteHeaderAndEntity(http.StatusForbidden, errors.Wrap(err)) + } else { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + } + return + } + + clusterRoles, err := iam.GetClusterRoles(username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + clusterRules, err := iam.GetClusterRoleSimpleRules(clusterRoles) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + for i := 0; i < len(clusterRoles); i++ { + if clusterRoles[i].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { + user.ClusterRole = clusterRoles[i].Name + break + } + } + + user.ClusterRules = clusterRules + + resp.WriteAsJson(user) +} + +func NamespacesListHandler(req *restful.Request, resp *restful.Response) { + username := req.PathParameter("name") + + namespaces, err := iam.GetNamespaces(username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(namespaces) +} + +func UserDetail(req *restful.Request, resp *restful.Response) { + username := req.PathParameter("name") + + conn, err := iam.NewConnection() + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + defer conn.Close() + + user, err := iam.UserDetail(username, conn) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + clusterRoles, err := iam.GetClusterRoles(username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + clusterRules, err := iam.GetClusterRoleSimpleRules(clusterRoles) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + workspaceRoles := iam.GetWorkspaceRoles(clusterRoles) + + for i := 0; i < len(clusterRoles); i++ { + if clusterRoles[i].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { + user.ClusterRole = clusterRoles[i].Name + break + } + } + + user.ClusterRules = clusterRules + + user.WorkspaceRoles = workspaceRoles + + resp.WriteAsJson(user) +} + +func UserList(req *restful.Request, resp *restful.Response) { + + limit, err := strconv.Atoi(req.QueryParameter("limit")) + if err != nil { + limit = 65535 + } + offset, err := strconv.Atoi(req.QueryParameter("offset")) + if err != nil { + offset = 0 + } + + if check := req.QueryParameter("check"); check != "" { + exist, err := iam.UserCreateCheck(check) + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(map[string]bool{"exist": exist}) + return + } + + conn, err := iam.NewConnection() + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + defer conn.Close() + + if query := req.QueryParameter("name"); query != "" { + names := strings.Split(query, ",") + users := make([]*models.User, 0) + for _, name := range names { + user, err := iam.UserDetail(name, conn) + if err != nil { + if ldap.IsErrorWithCode(err, 32) { + continue + } else { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + } + users = append(users, user) + } + + resp.WriteAsJson(users) + return + } + + var total int + var users []models.User + + if query := req.QueryParameter("search"); query != "" { + total, users, err = iam.Search(query, limit, offset) + } else if query := req.QueryParameter("keyword"); query != "" { + total, users, err = iam.Search(query, limit, offset) + } else { + total, users, err = iam.UserList(limit, offset) + } + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + for i := 0; i < len(users); i++ { + clusterRoles, err := iam.GetClusterRoles(users[i].Username) + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + for j := 0; j < len(clusterRoles); j++ { + if clusterRoles[j].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { + users[i].ClusterRole = clusterRoles[j].Name + break + } + } + } + + items := make([]interface{}, 0) + + for _, u := range users { + items = append(items, u) + } + + resp.WriteAsJson(models.PageableResponse{Items: items, TotalCount: total}) +} diff --git a/pkg/apiserver/iam/workspaces.go b/pkg/apiserver/iam/workspaces.go new file mode 100644 index 000000000..64d70836c --- /dev/null +++ b/pkg/apiserver/iam/workspaces.go @@ -0,0 +1,746 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package iam + +import ( + "fmt" + "net/http" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/emicklei/go-restful" + "github.com/go-ldap/ldap" + "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + apierror "k8s.io/apimachinery/pkg/api/errors" + + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/models/iam" + "kubesphere.io/kubesphere/pkg/models/metrics" + "kubesphere.io/kubesphere/pkg/models/workspaces" + sliceutils "kubesphere.io/kubesphere/pkg/utils" +) + +const UserNameHeader = "X-Token-Username" + +func WorkspaceRoles(req *restful.Request, resp *restful.Response) { + + name := req.PathParameter("name") + + workspace, err := workspaces.Detail(name) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + roles, err := workspaces.Roles(workspace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(roles) +} + +func WorkspaceMemberQuery(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("name") + keyword := req.QueryParameter("keyword") + + users, err := workspaces.GetWorkspaceMembers(workspace, keyword) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(users) +} + +func WorkspaceMemberDetail(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("name") + username := req.PathParameter("member") + + user, err := iam.GetUser(username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + namespaces, err := workspaces.Namespaces(workspace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + user.WorkspaceRole = user.WorkspaceRoles[workspace] + + roles := make(map[string]string) + + for _, namespace := range namespaces { + if role := user.Roles[namespace.Name]; role != "" { + roles[namespace.Name] = role + } + } + + user.Roles = roles + user.Rules = nil + user.WorkspaceRules = nil + user.WorkspaceRoles = nil + user.ClusterRules = nil + resp.WriteAsJson(user) +} + +func WorkspaceMemberInvite(req *restful.Request, resp *restful.Response) { + var users []models.UserInvite + workspace := req.PathParameter("name") + err := req.ReadEntity(&users) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + err = workspaces.Invite(workspace, users) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func WorkspaceMemberRemove(req *restful.Request, resp *restful.Response) { + query := req.QueryParameter("name") + workspace := req.PathParameter("name") + + names := strings.Split(query, ",") + + err := workspaces.RemoveMembers(workspace, names) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func NamespaceCheckHandler(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + + exist, err := workspaces.NamespaceExistCheck(namespace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(map[string]bool{"exist": exist}) +} + +func NamespaceDeleteHandler(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + workspace := req.PathParameter("name") + + err := workspaces.DeleteNamespace(workspace, namespace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func DevOpsProjectDeleteHandler(req *restful.Request, resp *restful.Response) { + devops := req.PathParameter("id") + workspace := req.PathParameter("name") + force := req.QueryParameter("force") + username := req.HeaderParameter(UserNameHeader) + + err := workspaces.UnBindDevopsProject(workspace, devops) + + if err != nil && force != "true" { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + err = workspaces.DeleteDevopsProject(username, devops) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func DevOpsProjectCreateHandler(req *restful.Request, resp *restful.Response) { + + workspace := req.PathParameter("name") + username := req.HeaderParameter(UserNameHeader) + + var devops models.DevopsProject + + err := req.ReadEntity(&devops) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + project, err := workspaces.CreateDevopsProject(username, workspace, devops) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(project) + +} + +func NamespaceCreateHandler(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("name") + username := req.HeaderParameter(UserNameHeader) + + namespace := &v1.Namespace{} + + err := req.ReadEntity(namespace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + if namespace.Annotations == nil { + namespace.Annotations = make(map[string]string, 0) + } + + namespace.Annotations["creator"] = username + namespace.Annotations["workspace"] = workspace + + if namespace.Labels == nil { + namespace.Labels = make(map[string]string, 0) + } + + namespace.Labels["kubesphere.io/workspace"] = workspace + + namespace, err = workspaces.CreateNamespace(namespace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid workspace name"))) + return + } + + resp.WriteAsJson(namespace) +} + +func DevOpsProjectHandler(req *restful.Request, resp *restful.Response) { + + workspace := req.PathParameter("name") + username := req.PathParameter("username") + keyword := req.QueryParameter("keyword") + + if username == "" { + username = req.HeaderParameter(UserNameHeader) + } + + limit := 65535 + offset := 0 + orderBy := "createTime" + reverse := true + + if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(req.QueryParameter("paging")); len(groups) == 3 { + limit, _ = strconv.Atoi(groups[1]) + page, _ := strconv.Atoi(groups[2]) + offset = (page - 1) * limit + } + + if groups := regexp.MustCompile(`^(createTime|name)$`).FindStringSubmatch(req.QueryParameter("order")); len(groups) == 2 { + orderBy = groups[1] + reverse = false + } + + if q := req.QueryParameter("reverse"); q != "" { + b, err := strconv.ParseBool(q) + if err == nil { + reverse = b + } + } + + total, devOpsProjects, err := workspaces.ListDevopsProjectsByUser(username, workspace, keyword, orderBy, reverse, limit, offset) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + result := models.PageableResponse{} + result.TotalCount = total + result.Items = make([]interface{}, 0) + for _, n := range devOpsProjects { + result.Items = append(result.Items, n) + } + resp.WriteAsJson(result) +} + +func WorkspaceCreateHandler(req *restful.Request, resp *restful.Response) { + var workspace models.Workspace + username := req.HeaderParameter(UserNameHeader) + err := req.ReadEntity(&workspace) + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + if workspace.Name == "" || strings.Contains(workspace.Name, ":") { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid workspace name"))) + return + } + + workspace.Path = workspace.Name + workspace.Members = nil + + if workspace.Admin != "" { + workspace.Creator = workspace.Admin + } else { + workspace.Creator = username + } + + created, err := workspaces.Create(&workspace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(created) + +} + +func DeleteWorkspaceHandler(req *restful.Request, resp *restful.Response) { + name := req.PathParameter("name") + + if name == "" || strings.Contains(name, ":") { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid workspace name"))) + return + } + + workspace, err := workspaces.Detail(name) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + err = workspaces.Delete(workspace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} +func WorkspaceEditHandler(req *restful.Request, resp *restful.Response) { + var workspace models.Workspace + name := req.PathParameter("name") + err := req.ReadEntity(&workspace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + if name != workspace.Name { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("the name of workspace (%s) does not match the name on the URL (%s)", workspace.Name, name))) + return + } + + if workspace.Name == "" || strings.Contains(workspace.Name, ":") { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid workspace name"))) + return + } + + workspace.Path = workspace.Name + + workspace.Members = nil + + edited, err := workspaces.Edit(&workspace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(edited) +} +func WorkspaceDetailHandler(req *restful.Request, resp *restful.Response) { + + name := req.PathParameter("name") + + workspace, err := workspaces.Detail(name) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(workspace) +} + +// List all workspaces for the current user +func UserWorkspaceListHandler(req *restful.Request, resp *restful.Response) { + keyword := req.QueryParameter("keyword") + username := req.HeaderParameter(constants.UserNameHeader) + + ws, err := workspaces.ListWorkspaceByUser(username, keyword) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + sort.Slice(ws, func(i, j int) bool { + t1, err := ws[i].GetCreateTime() + if err != nil { + return false + } + t2, err := ws[j].GetCreateTime() + if err != nil { + return true + } + return t1.After(t2) + }) + + resp.WriteAsJson(ws) +} + +func UserNamespaceListHandler(req *restful.Request, resp *restful.Response) { + withMetrics, err := strconv.ParseBool(req.QueryParameter("metrics")) + + if err != nil { + withMetrics = false + } + + username := req.PathParameter("username") + keyword := req.QueryParameter("keyword") + if username == "" { + username = req.HeaderParameter(UserNameHeader) + } + limit := 65535 + offset := 0 + orderBy := "createTime" + reverse := true + + if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(req.QueryParameter("paging")); len(groups) == 3 { + limit, _ = strconv.Atoi(groups[1]) + page, _ := strconv.Atoi(groups[2]) + if page < 0 { + page = 1 + } + offset = (page - 1) * limit + } + + if groups := regexp.MustCompile(`^(createTime|name)$`).FindStringSubmatch(req.QueryParameter("order")); len(groups) == 2 { + orderBy = groups[1] + reverse = false + } + + if q := req.QueryParameter("reverse"); q != "" { + b, err := strconv.ParseBool(q) + if err == nil { + reverse = b + } + } + + workspaceName := req.PathParameter("workspace") + + total, namespaces, err := workspaces.ListNamespaceByUser(workspaceName, username, keyword, orderBy, reverse, limit, offset) + + if withMetrics { + namespaces = metrics.GetNamespacesWithMetrics(namespaces) + } + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + result := models.PageableResponse{} + result.TotalCount = total + result.Items = make([]interface{}, 0) + for _, n := range namespaces { + result.Items = append(result.Items, n) + } + + resp.WriteAsJson(result) +} + +func DevopsRulesHandler(req *restful.Request, resp *restful.Response) { + //workspaceName := req.PathParameter("workspace") + username := req.HeaderParameter(constants.UserNameHeader) + devopsName := req.PathParameter("devops") + + var rules []models.SimpleRule + + role, err := iam.GetDevopsRole(devopsName, username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + switch role { + case "developer": + rules = []models.SimpleRule{ + {Name: "pipelines", Actions: []string{"view", "trigger"}}, + {Name: "roles", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"view"}}, + {Name: "devops", Actions: []string{"view"}}, + } + break + case "owner": + rules = []models.SimpleRule{ + {Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}}, + {Name: "roles", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"create", "edit", "view", "delete"}}, + {Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}}, + {Name: "devops", Actions: []string{"edit", "view", "delete"}}, + } + break + case "maintainer": + rules = []models.SimpleRule{ + {Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}}, + {Name: "roles", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"view"}}, + {Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}}, + {Name: "devops", Actions: []string{"view"}}, + } + break + case "reporter": + fallthrough + default: + rules = []models.SimpleRule{ + {Name: "pipelines", Actions: []string{"view"}}, + {Name: "roles", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"view"}}, + {Name: "devops", Actions: []string{"view"}}, + } + break + } + + resp.WriteAsJson(rules) +} + +func NamespacesRulesHandler(req *restful.Request, resp *restful.Response) { + workspaceName := req.PathParameter("workspace") + username := req.HeaderParameter(constants.UserNameHeader) + namespaceName := req.PathParameter("namespace") + + namespace, err := iam.GetNamespace(namespaceName) + + if err != nil { + if apierror.IsNotFound(err) { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("permission undefined"))) + } else { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + } + return + } + + if namespace.Labels == nil || namespace.Labels["kubesphere.io/workspace"] != workspaceName { + resp.WriteHeaderAndEntity(http.StatusForbidden, errors.Wrap(fmt.Errorf("permission undefined"))) + return + } + + clusterRoles, err := iam.GetClusterRoles(username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + roles, err := iam.GetRoles(username, namespaceName) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + for _, clusterRole := range clusterRoles { + role := new(rbac.Role) + role.Name = clusterRole.Name + role.Labels = clusterRole.Labels + role.Namespace = namespaceName + role.Annotations = clusterRole.Annotations + role.Kind = "Role" + role.Rules = clusterRole.Rules + roles = append(roles, role) + } + + rules, err := iam.GetRoleSimpleRules(roles, namespaceName) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + if rules[namespaceName] == nil { + resp.WriteAsJson(make([]models.SimpleRule, 0)) + } else { + resp.WriteAsJson(rules[namespaceName]) + } +} + +func WorkspaceRulesHandler(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("workspace") + + username := req.HeaderParameter(constants.UserNameHeader) + + clusterRoles, err := iam.GetClusterRoles(username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + rules := iam.GetWorkspaceSimpleRules(clusterRoles, workspace) + + if rules[workspace] != nil { + resp.WriteAsJson(rules[workspace]) + } else if rules["*"] != nil { + resp.WriteAsJson(rules["*"]) + } else { + resp.WriteAsJson(make([]models.SimpleRule, 0)) + } +} + +func WorkspaceMemberList(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("workspace") + limit, err := strconv.Atoi(req.QueryParameter("limit")) + if err != nil { + limit = 500 + } + offset, err := strconv.Atoi(req.QueryParameter("offset")) + if err != nil { + offset = 0 + } + + conn, err := iam.NewConnection() + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + defer conn.Close() + + group, err := iam.GroupDetail(workspace, conn) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + keyword := "" + + if query := req.QueryParameter("keyword"); query != "" { + keyword = query + } + + users := make([]*models.User, 0) + + total := len(group.Members) + + members := sliceutils.RemoveString(group.Members, func(item string) bool { + return keyword != "" && !strings.Contains(item, keyword) + }) + + for i := 0; i < len(members); i++ { + username := members[i] + + if i < offset { + continue + } + + if len(users) == limit { + break + } + + user, err := iam.UserDetail(username, conn) + + if err != nil { + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { + group.Members = sliceutils.RemoveString(group.Members, func(item string) bool { + return item == username + }) + continue + } else { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + } + + clusterRoles, err := iam.GetClusterRoles(username) + + for i := 0; i < len(clusterRoles); i++ { + if clusterRoles[i].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { + user.ClusterRole = clusterRoles[i].Name + break + } + } + + if group.Path == group.Name { + + workspaceRole := iam.GetWorkspaceRole(clusterRoles, group.Name) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + user.WorkspaceRole = workspaceRole + } + + users = append(users, user) + } + + if total > len(group.Members) { + go iam.UpdateGroup(group) + } + if req.QueryParameter("limit") != "" { + resp.WriteAsJson(map[string]interface{}{"items": users, "total_count": len(members)}) + } else { + resp.WriteAsJson(users) + } +} diff --git a/pkg/apiserver/metrics/metrics.go b/pkg/apiserver/metrics/metrics.go index ebac4d7db..0a7403a96 100644 --- a/pkg/apiserver/metrics/metrics.go +++ b/pkg/apiserver/metrics/metrics.go @@ -21,9 +21,10 @@ import ( "github.com/emicklei/go-restful" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/storage" + "net/http" ) -type scMetricsItem struct { +type ScMetricsItem struct { Name string `json:"name"` Metrics *storage.ScMetrics `json:"metrics"` } @@ -35,11 +36,12 @@ func GetScMetrics(request *restful.Request, response *restful.Response) { metrics, err := storage.GetScMetrics(scName) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - result := scMetricsItem{ + result := ScMetricsItem{ Name: scName, Metrics: metrics, } @@ -51,21 +53,23 @@ func GetScMetrics(request *restful.Request, response *restful.Response) { func GetScMetricsList(request *restful.Request, response *restful.Response) { scList, err := storage.GetScList() - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } // Set return value - items := make([]scMetricsItem, 0) + items := make([]ScMetricsItem, 0) for _, v := range scList { metrics, err := storage.GetScMetrics(v.GetName()) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - item := scMetricsItem{ + item := ScMetricsItem{ Name: v.GetName(), Metrics: metrics, } diff --git a/pkg/apiserver/monitoring/monitoring.go b/pkg/apiserver/monitoring/monitoring.go new file mode 100644 index 000000000..76beaeabb --- /dev/null +++ b/pkg/apiserver/monitoring/monitoring.go @@ -0,0 +1,220 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package monitoring + +import ( + "github.com/emicklei/go-restful" + "kubesphere.io/kubesphere/pkg/client" + "kubesphere.io/kubesphere/pkg/models/metrics" +) + +func MonitorPod(request *restful.Request, response *restful.Response) { + requestParams := client.ParseMonitoringRequestParams(request) + podName := requestParams.PodName + metricName := requestParams.MetricsName + if podName != "" { + // single pod single metric + queryType, params, nullRule := metrics.AssemblePodMetricRequestInfo(requestParams, metricName) + var res *metrics.FormatedMetric + if !nullRule { + res = metrics.GetMetric(queryType, params, metricName) + } + response.WriteAsJson(res) + + } else { + // multiple + rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelPod) + // sorting + sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelPodName) + // paging + pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) + + response.WriteAsJson(pagedMetrics) + } +} + +func MonitorContainer(request *restful.Request, response *restful.Response) { + requestParams := client.ParseMonitoringRequestParams(request) + metricName := requestParams.MetricsName + if requestParams.MetricsFilter != "" { + rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelContainer) + // sorting + sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelContainerName) + // paging + pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) + + response.WriteAsJson(pagedMetrics) + + } else { + res := metrics.MonitorContainer(requestParams, metricName) + response.WriteAsJson(res) + } + +} + +func MonitorWorkload(request *restful.Request, response *restful.Response) { + requestParams := client.ParseMonitoringRequestParams(request) + + rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkload) + + var sortedMetrics *metrics.FormatedLevelMetric + var maxMetricCount int + + wlKind := requestParams.WorkloadKind + + // sorting + if wlKind == "" { + + sortedMetrics, maxMetricCount = metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelWorkload) + } else { + + sortedMetrics, maxMetricCount = metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelPodName) + } + + // paging + pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) + + response.WriteAsJson(pagedMetrics) + +} + +func MonitorAllWorkspaces(request *restful.Request, response *restful.Response) { + + requestParams := client.ParseMonitoringRequestParams(request) + + tp := requestParams.Tp + if tp == "_statistics" { + // merge multiple metric: all-devops, all-roles, all-projects...this api is designed for admin + res := metrics.MonitorAllWorkspacesStatistics() + + response.WriteAsJson(res) + + } else if tp == "rank" { + rawMetrics := metrics.MonitorAllWorkspaces(requestParams) + // sorting + sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelWorkspace) + // paging + pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) + + response.WriteAsJson(pagedMetrics) + } else { + res := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkspace) + response.WriteAsJson(res) + } +} + +func MonitorOneWorkspace(request *restful.Request, response *restful.Response) { + requestParams := client.ParseMonitoringRequestParams(request) + + tp := requestParams.Tp + if tp == "rank" { + // multiple + rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkspace) + // sorting + sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelNamespace) + // paging + pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) + + response.WriteAsJson(pagedMetrics) + + } else if tp == "_statistics" { + wsName := requestParams.WsName + + // merge multiple metric: devops, roles, projects... + res := metrics.MonitorOneWorkspaceStatistics(wsName) + response.WriteAsJson(res) + } else { + res := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkspace) + response.WriteAsJson(res) + } +} + +func MonitorNamespace(request *restful.Request, response *restful.Response) { + requestParams := client.ParseMonitoringRequestParams(request) + metricName := requestParams.MetricsName + nsName := requestParams.NsName + if nsName != "" { + // single + queryType, params := metrics.AssembleNamespaceMetricRequestInfo(requestParams, metricName) + res := metrics.GetMetric(queryType, params, metricName) + response.WriteAsJson(res) + } else { + // multiple + rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelNamespace) + // sorting + sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelNamespace) + // paging + pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) + + response.WriteAsJson(pagedMetrics) + } +} + +func MonitorCluster(request *restful.Request, response *restful.Response) { + requestParams := client.ParseMonitoringRequestParams(request) + + metricName := requestParams.MetricsName + if metricName != "" { + // single + queryType, params := metrics.AssembleClusterMetricRequestInfo(requestParams, metricName) + res := metrics.GetMetric(queryType, params, metricName) + + response.WriteAsJson(res) + } else { + // multiple + res := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelCluster) + response.WriteAsJson(res) + } +} + +func MonitorNode(request *restful.Request, response *restful.Response) { + requestParams := client.ParseMonitoringRequestParams(request) + + metricName := requestParams.MetricsName + if metricName != "" { + // single + queryType, params := metrics.AssembleNodeMetricRequestInfo(requestParams, metricName) + res := metrics.GetMetric(queryType, params, metricName) + nodeAddress := metrics.GetNodeAddressInfo() + metrics.AddNodeAddressMetric(res, nodeAddress) + response.WriteAsJson(res) + } else { + // multiple + rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelNode) + nodeAddress := metrics.GetNodeAddressInfo() + + for i := 0; i < len(rawMetrics.Results); i++ { + metrics.AddNodeAddressMetric(&rawMetrics.Results[i], nodeAddress) + } + + // sorting + sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelNode) + // paging + pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) + + response.WriteAsJson(pagedMetrics) + } +} + +// k8s component(controller, scheduler, etcd) status +func MonitorComponentStatus(request *restful.Request, response *restful.Response) { + requestParams := client.ParseMonitoringRequestParams(request) + + status := metrics.MonitorComponentStatus(requestParams) + response.WriteAsJson(status) +} diff --git a/pkg/apiserver/operations/job.go b/pkg/apiserver/operations/job.go index 9f44304b0..e87411030 100644 --- a/pkg/apiserver/operations/job.go +++ b/pkg/apiserver/operations/job.go @@ -19,10 +19,9 @@ package operations import ( - "net/http" - "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/workloads" + "net/http" "github.com/emicklei/go-restful" @@ -40,10 +39,11 @@ func RerunJob(req *restful.Request, resp *restful.Response) { case "rerun": err = workloads.JobReRun(namespace, job) default: - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, fmt.Sprintf("invalid operation %s", action))) + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid operation %s", action))) return } - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } diff --git a/pkg/apiserver/operations/node.go b/pkg/apiserver/operations/node.go index 689ec3ddf..a209ef86f 100644 --- a/pkg/apiserver/operations/node.go +++ b/pkg/apiserver/operations/node.go @@ -20,6 +20,7 @@ package operations import ( "github.com/emicklei/go-restful" + "net/http" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/nodes" @@ -31,7 +32,8 @@ func DrainNode(request *restful.Request, response *restful.Response) { err := nodes.DrainNode(nodeName) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } diff --git a/pkg/apiserver/quotas/quotas.go b/pkg/apiserver/quotas/quotas.go index a8d08b418..d6819d6b1 100644 --- a/pkg/apiserver/quotas/quotas.go +++ b/pkg/apiserver/quotas/quotas.go @@ -20,48 +20,30 @@ package quotas import ( "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-openapi" + "net/http" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/quotas" ) -func V1Alpha2(ws *restful.WebService) { - - tags := []string{"quotas"} - - ws.Route(ws.GET("/quotas"). - To(getClusterQuotas). - Doc("get whole cluster's resource usage"). - Writes(quotas.ResourceQuota{}). - Metadata(restfulspec.KeyOpenAPITags, tags)) - - ws.Route(ws.GET("/namespaces/{namespace}/quotas"). - Doc("get specified namespace's resource quota and usage"). - Param(ws.PathParameter("namespace", "namespace's name"). - DataType("string")). - Writes(quotas.ResourceQuota{}). - Metadata(restfulspec.KeyOpenAPITags, tags). - To(getNamespaceQuotas)) - -} - -func getNamespaceQuotas(req *restful.Request, resp *restful.Response) { +func GetNamespaceQuotas(req *restful.Request, resp *restful.Response) { namespace := req.PathParameter("namespace") quota, err := quotas.GetNamespaceQuotas(namespace) - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } resp.WriteAsJson(quota) } -func getClusterQuotas(req *restful.Request, resp *restful.Response) { +func GetClusterQuotas(req *restful.Request, resp *restful.Response) { quota, err := quotas.GetClusterQuotas() - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } diff --git a/pkg/apiserver/registries/registries.go b/pkg/apiserver/registries/registries.go index f732d839f..d22bb90b0 100644 --- a/pkg/apiserver/registries/registries.go +++ b/pkg/apiserver/registries/registries.go @@ -20,55 +20,29 @@ package registries import ( "github.com/emicklei/go-restful" + "net/http" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/registries" ) -func V1Alpha2(ws *restful.WebService) { - - ws.Route(ws.POST("registries/verify").To(registryVerify)) - -} - -func registryVerify(request *restful.Request, response *restful.Response) { +func RegistryVerify(request *restful.Request, response *restful.Response) { authInfo := registries.AuthInfo{} err := request.ReadEntity(&authInfo) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } err = registries.RegistryVerify(authInfo) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } response.WriteAsJson(errors.None) } - -//func (c *registriesHandler) handlerImageSearch(request *restful.Request, response *restful.Response) { -// -// registry := request.PathParameter("name") -// searchWord := request.PathParameter("searchWord") -// namespace := request.PathParameter("namespace") -// -// res := c.registries.ImageSearch(namespace, registry, searchWord) -// -// response.WriteAsJson(res) -// -//} -// -//func (c *registriesHandler) handlerGetImageTags(request *restful.Request, response *restful.Response) { -// -// registry := request.PathParameter("name") -// image := request.QueryParameter("image") -// namespace := request.PathParameter("namespace") -// -// res := c.registries.GetImageTags(namespace, registry, image) -// -// response.WriteAsJson(res) -//} diff --git a/pkg/apiserver/resources/cluster_resources.go b/pkg/apiserver/resources/cluster_resources.go index 1c662cf29..4b94d45a6 100644 --- a/pkg/apiserver/resources/cluster_resources.go +++ b/pkg/apiserver/resources/cluster_resources.go @@ -19,6 +19,7 @@ package resources import ( "github.com/emicklei/go-restful" + "net/http" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/resources" @@ -27,14 +28,15 @@ import ( func ClusterResourceHandler(req *restful.Request, resp *restful.Response) { resourceName := req.PathParameter("resources") - conditions := req.QueryParameter(params.Conditions) - orderBy := req.QueryParameter(params.OrderBy) - limit, offset := params.ParsePaging(req.QueryParameter(params.Paging)) - reverse := params.ParseReserve(req.QueryParameter(params.Reserve)) + conditions, err := params.ParseConditions(req) + orderBy := req.QueryParameter(params.OrderByParam) + limit, offset := params.ParsePaging(req) + reverse := params.ParseReverse(req) result, err := resources.ListClusterResource(resourceName, conditions, orderBy, reverse, limit, offset) - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } diff --git a/pkg/apiserver/resources/namespace_resources.go b/pkg/apiserver/resources/namespace_resources.go index cf1463312..84f2bc006 100644 --- a/pkg/apiserver/resources/namespace_resources.go +++ b/pkg/apiserver/resources/namespace_resources.go @@ -19,6 +19,7 @@ package resources import ( "github.com/emicklei/go-restful" + "net/http" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/resources" @@ -28,14 +29,15 @@ import ( func NamespaceResourceHandler(req *restful.Request, resp *restful.Response) { namespace := req.PathParameter("namespace") resourceName := req.PathParameter("resources") - conditions := req.QueryParameter(params.Conditions) - orderBy := req.QueryParameter(params.OrderBy) - limit, offset := params.ParsePaging(req.QueryParameter(params.Paging)) - reverse := params.ParseReserve(req.QueryParameter(params.Reserve)) + conditions, err := params.ParseConditions(req) + orderBy := req.QueryParameter(params.OrderByParam) + limit, offset := params.ParsePaging(req) + reverse := params.ParseReverse(req) result, err := resources.ListNamespaceResource(namespace, resourceName, conditions, orderBy, reverse, limit, offset) - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } diff --git a/pkg/apiserver/resources/storage.go b/pkg/apiserver/resources/storage.go index 4da8196f2..953dede0a 100644 --- a/pkg/apiserver/resources/storage.go +++ b/pkg/apiserver/resources/storage.go @@ -20,6 +20,7 @@ package resources import ( "github.com/emicklei/go-restful" "k8s.io/api/core/v1" + "net/http" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/storage" @@ -43,7 +44,8 @@ func GetPodListByPvc(request *restful.Request, response *restful.Response) { pvcName := request.PathParameter("pvc") nsName := request.PathParameter("namespace") pods, err := storage.GetPodListByPvc(pvcName, nsName) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } result := podListByPvc{Name: pvcName, Namespace: nsName, Pods: pods} @@ -56,7 +58,8 @@ func GetPvcListBySc(request *restful.Request, response *restful.Response) { scName := request.PathParameter("storageclass") claims, err := storage.GetPvcListBySc(scName) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } diff --git a/pkg/apiserver/resources/user.go b/pkg/apiserver/resources/user.go index 8e948ca11..fc293e2d3 100644 --- a/pkg/apiserver/resources/user.go +++ b/pkg/apiserver/resources/user.go @@ -19,6 +19,7 @@ package resources import ( "github.com/emicklei/go-restful" + "net/http" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/kubeconfig" @@ -31,7 +32,8 @@ func GetKubectl(req *restful.Request, resp *restful.Response) { kubectlPod, err := kubectl.GetKubectlPod(user) - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } @@ -44,7 +46,8 @@ func GetKubeconfig(req *restful.Request, resp *restful.Response) { kubectlConfig, err := kubeconfig.GetKubeConfig(user) - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } diff --git a/pkg/apiserver/revisions/revisions.go b/pkg/apiserver/revisions/revisions.go index 355e5aed9..722d112dd 100644 --- a/pkg/apiserver/revisions/revisions.go +++ b/pkg/apiserver/revisions/revisions.go @@ -22,93 +22,59 @@ import ( "strconv" "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-openapi" - "k8s.io/api/apps/v1" - "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/revisions" ) -func V1Alpha2(ws *restful.WebService) { - ws.Route(ws.GET("/namespaces/{namespace}/daemonsets/{daemonset}/revisions/{revision}"). - To(getDaemonSetRevision). - Metadata(restfulspec.KeyOpenAPITags, []string{"daemonsets", "revision"}). - Doc("Handle daemonset operation"). - Param(ws.PathParameter("daemonset", "daemonset's name"). - DataType("string")). - Param(ws.PathParameter("namespace", "daemonset's namespace"). - DataType("string")). - Param(ws.PathParameter("revision", "daemonset's revision")). - Writes(v1.DaemonSet{})) - ws.Route(ws.GET("/namespaces/{namespace}/deployments/{deployment}/revisions/{revision}"). - To(getDeployRevision). - Metadata(restfulspec.KeyOpenAPITags, []string{"deployments", "revision"}). - Doc("Handle deployment operation"). - Param(ws.PathParameter("deployment", "deployment's name"). - DataType("string")). - Param(ws.PathParameter("namespace", - "deployment's namespace"). - DataType("string")). - Param(ws.PathParameter("deployment", "deployment's name")). - Writes(v1.ReplicaSet{})) - ws.Route(ws.GET("/namespaces/{namespace}/statefulsets/{statefulset}/revisions/{revision}"). - To(getStatefulSetRevision). - Metadata(restfulspec.KeyOpenAPITags, []string{"statefulsets", "revisions"}). - Doc("Handle statefulset operation"). - Param(ws.PathParameter("statefulset", "statefulset's name"). - DataType("string")). - Param(ws.PathParameter("namespace", "statefulset's namespace"). - DataType("string")). - Param(ws.PathParameter("revision", "statefulset's revision")). - Writes(v1.StatefulSet{})) -} - -func getDaemonSetRevision(req *restful.Request, resp *restful.Response) { +func GetDaemonSetRevision(req *restful.Request, resp *restful.Response) { daemonset := req.PathParameter("daemonset") namespace := req.PathParameter("namespace") revision, err := strconv.Atoi(req.PathParameter("revision")) if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, err.Error())) + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) return } result, err := revisions.GetDaemonSetRevision(namespace, daemonset, revision) - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } resp.WriteAsJson(result) } -func getDeployRevision(req *restful.Request, resp *restful.Response) { +func GetDeployRevision(req *restful.Request, resp *restful.Response) { deploy := req.PathParameter("deployment") namespace := req.PathParameter("namespace") revision := req.PathParameter("revision") result, err := revisions.GetDeployRevision(namespace, deploy, revision) - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } resp.WriteAsJson(result) } -func getStatefulSetRevision(req *restful.Request, resp *restful.Response) { +func GetStatefulSetRevision(req *restful.Request, resp *restful.Response) { statefulset := req.PathParameter("statefulset") namespace := req.PathParameter("namespace") revision, err := strconv.Atoi(req.PathParameter("revision")) if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, err.Error())) + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) return } result, err := revisions.GetStatefulSetRevision(namespace, statefulset, revision) - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } diff --git a/pkg/apiserver/routers/routers.go b/pkg/apiserver/routers/routers.go index ad6ea7d0f..deda6549a 100644 --- a/pkg/apiserver/routers/routers.go +++ b/pkg/apiserver/routers/routers.go @@ -19,58 +19,31 @@ package routers import ( + "fmt" "github.com/emicklei/go-restful" + "net/http" "kubesphere.io/kubesphere/pkg/errors" - "net/http" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "kubesphere.io/kubesphere/pkg/models/routers" ) -func V1Alpha2(ws *restful.WebService) { - ws.Route(ws.GET("/routers").To(getAllRouters). - Doc("Get all routers")) - - ws.Route(ws.GET("/users/{username}/routers").To(getAllRoutersOfUser). - Doc("Get routers for user")) - - ws.Route(ws.GET("/namespaces/{namespace}/router").To(getRouter). - Doc("Get router of a specified project"). - Param(ws.PathParameter("namespace", "name of the project"). - DataType("string"))) - - ws.Route(ws.DELETE("/namespaces/{namespace}/router").To(deleteRouter). - Doc("Get router of a specified project"). - Param(ws.PathParameter("namespace", "name of the project"). - DataType("string"))) - - ws.Route(ws.POST("/namespaces/{namespace}/router").To(createRouter). - Doc("Create a router for a specified project"). - Param(ws.PathParameter("namespace", "name of the project"). - DataType("string"))) - - ws.Route(ws.PUT("/namespaces/{namespace}/router").To(updateRouter). - Doc("Update a router for a specified project"). - Param(ws.PathParameter("namespace", "name of the project"). - DataType("string"))) -} - type Router struct { RouterType string `json:"type"` Annotations map[string]string `json:"annotations"` } // Get all namespace ingress controller services -func getAllRouters(request *restful.Request, response *restful.Response) { +func GetAllRouters(request *restful.Request, response *restful.Response) { routers, err := routers.GetAllRouters() - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } @@ -78,13 +51,14 @@ func getAllRouters(request *restful.Request, response *restful.Response) { } // Get all namespace ingress controller services for user -func getAllRoutersOfUser(request *restful.Request, response *restful.Response) { +func GetAllRoutersOfUser(request *restful.Request, response *restful.Response) { username := request.PathParameter("username") routers, err := routers.GetAllRoutersOfUser(username) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } @@ -92,12 +66,13 @@ func getAllRoutersOfUser(request *restful.Request, response *restful.Response) { } // Get ingress controller service for specified namespace -func getRouter(request *restful.Request, response *restful.Response) { +func GetRouter(request *restful.Request, response *restful.Response) { namespace := request.PathParameter("namespace") router, err := routers.GetRouter(namespace) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } @@ -105,7 +80,7 @@ func getRouter(request *restful.Request, response *restful.Response) { } // Create ingress controller and related services -func createRouter(request *restful.Request, response *restful.Response) { +func CreateRouter(request *restful.Request, response *restful.Response) { namespace := request.PathParameter("namespace") @@ -119,18 +94,17 @@ func createRouter(request *restful.Request, response *restful.Response) { var router *v1.Service - serviceType, annotationMap, err := ParseParameter(newRouter) + serviceType, annotationMap, err := parseParameter(newRouter) if err != nil { - glog.Error("Wrong annotations, missing key or value") - response.WriteHeaderAndEntity(http.StatusBadRequest, - errors.New(errors.InvalidArgument, "Wrong annotations, missing key or value")) + response.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("wrong annotations, missing key or value"))) return } router, err = routers.CreateRouter(namespace, serviceType, annotationMap) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } @@ -138,19 +112,20 @@ func createRouter(request *restful.Request, response *restful.Response) { } // Delete ingress controller and services -func deleteRouter(request *restful.Request, response *restful.Response) { +func DeleteRouter(request *restful.Request, response *restful.Response) { namespace := request.PathParameter("namespace") router, err := routers.DeleteRouter(namespace) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } response.WriteAsJson(router) } -func updateRouter(request *restful.Request, response *restful.Response) { +func UpdateRouter(request *restful.Request, response *restful.Response) { namespace := request.PathParameter("namespace") @@ -158,23 +133,23 @@ func updateRouter(request *restful.Request, response *restful.Response) { err := request.ReadEntity(&newRouter) if err != nil { - glog.Error(err) - response.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, err.Error())) + response.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) return } - serviceType, annotationMap, err := ParseParameter(newRouter) + serviceType, annotationMap, err := parseParameter(newRouter) router, err := routers.UpdateRouter(namespace, serviceType, annotationMap) - if errors.HandlerError(err, response) { + if err != nil { + response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } response.WriteAsJson(router) } -func ParseParameter(router Router) (routerType v1.ServiceType, annotationMap map[string]string, err error) { +func parseParameter(router Router) (routerType v1.ServiceType, annotationMap map[string]string, err error) { routerType = v1.ServiceTypeNodePort diff --git a/pkg/apiserver/runtime/runtime.go b/pkg/apiserver/runtime/runtime.go index 564aa5d67..a87f12aff 100644 --- a/pkg/apiserver/runtime/runtime.go +++ b/pkg/apiserver/runtime/runtime.go @@ -35,7 +35,9 @@ type ContainerBuilder []func(c *restful.Container) error func NewWebService(gv schema.GroupVersion) *restful.WebService { webservice := restful.WebService{} - webservice.Path(ApiRootPath + "/" + gv.String()) + webservice.Path(ApiRootPath + "/" + gv.String()). + Consumes(restful.MIME_JSON). + Produces(restful.MIME_JSON) return &webservice } diff --git a/pkg/apiserver/workloadstatuses/workloadstatuses.go b/pkg/apiserver/workloadstatuses/workloadstatuses.go index fa04b2c3b..934441283 100644 --- a/pkg/apiserver/workloadstatuses/workloadstatuses.go +++ b/pkg/apiserver/workloadstatuses/workloadstatuses.go @@ -20,39 +20,25 @@ package workloadstatuses import ( "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-openapi" + "net/http" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/status" ) -func V1Alpha2(ws *restful.WebService) { - tags := []string{"workloadStatus"} - - ws.Route(ws.GET("/workloadstatuses"). - Doc("get abnormal workloads' count of whole cluster"). - Metadata(restfulspec.KeyOpenAPITags, tags). - To(getClusterResourceStatus)) - ws.Route(ws.GET("/namespaces/{namespace}/workloadstatuses"). - Doc("get abnormal workloads' count of specified namespace"). - Param(ws.PathParameter("namespace", "the name of namespace"). - DataType("string")). - Metadata(restfulspec.KeyOpenAPITags, tags). - To(getNamespacesResourceStatus)) - -} - -func getClusterResourceStatus(req *restful.Request, resp *restful.Response) { +func GetClusterResourceStatus(req *restful.Request, resp *restful.Response) { res, err := status.GetClusterResourceStatus() - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } resp.WriteAsJson(res) } -func getNamespacesResourceStatus(req *restful.Request, resp *restful.Response) { +func GetNamespacesResourceStatus(req *restful.Request, resp *restful.Response) { res, err := status.GetNamespacesResourceStatus(req.PathParameter("namespace")) - if errors.HandlerError(err, resp) { + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } resp.WriteAsJson(res) diff --git a/pkg/apiserver/workspaces/workspaces.go b/pkg/apiserver/workspaces/workspaces.go index 9114d3ef1..d09231c12 100644 --- a/pkg/apiserver/workspaces/workspaces.go +++ b/pkg/apiserver/workspaces/workspaces.go @@ -16,491 +16,3 @@ */ package workspaces - -import ( - "net/http" - - "kubesphere.io/kubesphere/pkg/errors" - "kubesphere.io/kubesphere/pkg/models" - "kubesphere.io/kubesphere/pkg/models/metrics" - - "github.com/emicklei/go-restful" - "k8s.io/api/core/v1" - - "fmt" - "strings" - - "strconv" - - "regexp" - - "sort" - - "kubesphere.io/kubesphere/pkg/models/iam" - "kubesphere.io/kubesphere/pkg/models/workspaces" -) - -const UserNameHeader = "X-Token-Username" - -func V1Alpha2(ws *restful.WebService) { - ws.Route(ws.GET("/workspaces").To(UserWorkspaceListHandler)) - ws.Route(ws.POST("/workspaces").To(WorkspaceCreateHandler)) - ws.Route(ws.DELETE("/workspaces/{name}").To(DeleteWorkspaceHandler)) - ws.Route(ws.GET("/workspaces/{name}").To(WorkspaceDetailHandler)) - ws.Route(ws.PUT("/workspaces/{name}").To(WorkspaceEditHandler)) - ws.Route(ws.GET("/workspaces/{workspace}/namespaces").To(UserNamespaceListHandler)) - ws.Route(ws.GET("/workspaces/{workspace}/members/{username}/namespaces").To(UserNamespaceListHandler)) - ws.Route(ws.POST("/workspaces/{name}/namespaces").To(NamespaceCreateHandler)) - ws.Route(ws.DELETE("/workspaces/{name}/namespaces/{namespace}").To(NamespaceDeleteHandler)) - ws.Route(ws.GET("/workspaces/{name}/namespaces/{namespace}").To(NamespaceCheckHandler)) - ws.Route(ws.GET("/namespaces/{namespace}").To(NamespaceCheckHandler)) - ws.Route(ws.GET("/workspaces/{name}/devops").To(DevOpsProjectHandler)) - ws.Route(ws.GET("/workspaces/{name}/members/{username}/devops").To(DevOpsProjectHandler)) - ws.Route(ws.POST("/workspaces/{name}/devops").To(DevOpsProjectCreateHandler)) - ws.Route(ws.DELETE("/workspaces/{name}/devops/{id}").To(DevOpsProjectDeleteHandler)) - - ws.Route(ws.GET("/workspaces/{name}/members").To(MembersHandler)) - ws.Route(ws.GET("/workspaces/{name}/members/{member}").To(MemberHandler)) - ws.Route(ws.GET("/workspaces/{name}/roles").To(RolesHandler)) - // TODO /workspaces/{name}/roles/{role} - ws.Route(ws.POST("/workspaces/{name}/members").To(MembersInviteHandler)) - ws.Route(ws.DELETE("/workspaces/{name}/members").To(MembersRemoveHandler)) -} - -func RolesHandler(req *restful.Request, resp *restful.Response) { - - name := req.PathParameter("name") - - workspace, err := workspaces.Detail(name) - - if errors.HandlerError(err, resp) { - return - } - - roles, err := workspaces.Roles(workspace) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(roles) -} - -func MembersHandler(req *restful.Request, resp *restful.Response) { - workspace := req.PathParameter("name") - keyword := req.QueryParameter("keyword") - - users, err := workspaces.GetWorkspaceMembers(workspace, keyword) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(users) -} - -func MemberHandler(req *restful.Request, resp *restful.Response) { - workspace := req.PathParameter("name") - username := req.PathParameter("member") - - user, err := iam.GetUser(username) - if errors.HandlerError(err, resp) { - return - } - - namespaces, err := workspaces.Namespaces(workspace) - - if errors.HandlerError(err, resp) { - return - } - - user.WorkspaceRole = user.WorkspaceRoles[workspace] - - roles := make(map[string]string) - - for _, namespace := range namespaces { - if role := user.Roles[namespace.Name]; role != "" { - roles[namespace.Name] = role - } - } - - user.Roles = roles - user.Rules = nil - user.WorkspaceRules = nil - user.WorkspaceRoles = nil - user.ClusterRules = nil - resp.WriteAsJson(user) -} - -func MembersInviteHandler(req *restful.Request, resp *restful.Response) { - var users []workspaces.UserInvite - workspace := req.PathParameter("name") - err := req.ReadEntity(&users) - - if errors.HandlerError(err, resp) { - return - } - - err = workspaces.Invite(workspace, users) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(errors.None) -} - -func MembersRemoveHandler(req *restful.Request, resp *restful.Response) { - query := req.QueryParameter("name") - workspace := req.PathParameter("name") - - names := strings.Split(query, ",") - - err := workspaces.RemoveMembers(workspace, names) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(errors.None) -} - -func NamespaceCheckHandler(req *restful.Request, resp *restful.Response) { - namespace := req.PathParameter("namespace") - - exist, err := workspaces.NamespaceExistCheck(namespace) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(map[string]bool{"exist": exist}) -} - -func NamespaceDeleteHandler(req *restful.Request, resp *restful.Response) { - namespace := req.PathParameter("namespace") - workspace := req.PathParameter("name") - - err := workspaces.DeleteNamespace(workspace, namespace) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(errors.None) -} - -func DevOpsProjectDeleteHandler(req *restful.Request, resp *restful.Response) { - devops := req.PathParameter("id") - workspace := req.PathParameter("name") - force := req.QueryParameter("force") - username := req.HeaderParameter(UserNameHeader) - - err := workspaces.UnBindDevopsProject(workspace, devops) - - if err != nil && force != "true" { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.New(errors.Internal, err.Error())) - return - } - - err = workspaces.DeleteDevopsProject(username, devops) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(errors.None) -} - -func DevOpsProjectCreateHandler(req *restful.Request, resp *restful.Response) { - - workspace := req.PathParameter("name") - username := req.HeaderParameter(UserNameHeader) - - var devops workspaces.DevopsProject - - err := req.ReadEntity(&devops) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, err.Error())) - return - } - - project, err := workspaces.CreateDevopsProject(username, workspace, devops) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(project) - -} - -func NamespaceCreateHandler(req *restful.Request, resp *restful.Response) { - workspace := req.PathParameter("name") - username := req.HeaderParameter(UserNameHeader) - - namespace := &v1.Namespace{} - - err := req.ReadEntity(namespace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, err.Error())) - return - } - - if namespace.Annotations == nil { - namespace.Annotations = make(map[string]string, 0) - } - - namespace.Annotations["creator"] = username - namespace.Annotations["workspace"] = workspace - - if namespace.Labels == nil { - namespace.Labels = make(map[string]string, 0) - } - - namespace.Labels["kubesphere.io/workspace"] = workspace - - namespace, err = workspaces.CreateNamespace(namespace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, err.Error())) - return - } - - resp.WriteAsJson(namespace) -} - -func DevOpsProjectHandler(req *restful.Request, resp *restful.Response) { - - workspace := req.PathParameter("name") - username := req.PathParameter("username") - keyword := req.QueryParameter("keyword") - - if username == "" { - username = req.HeaderParameter(UserNameHeader) - } - - limit := 65535 - offset := 0 - orderBy := "createTime" - reverse := true - - if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(req.QueryParameter("paging")); len(groups) == 3 { - limit, _ = strconv.Atoi(groups[1]) - page, _ := strconv.Atoi(groups[2]) - offset = (page - 1) * limit - } - - if groups := regexp.MustCompile(`^(createTime|name)$`).FindStringSubmatch(req.QueryParameter("order")); len(groups) == 2 { - orderBy = groups[1] - reverse = false - } - - if q := req.QueryParameter("reverse"); q != "" { - b, err := strconv.ParseBool(q) - if err == nil { - reverse = b - } - } - - total, devOpsProjects, err := workspaces.ListDevopsProjectsByUser(username, workspace, keyword, orderBy, reverse, limit, offset) - - if errors.HandlerError(err, resp) { - return - } - - result := models.PageableResponse{} - result.TotalCount = total - result.Items = make([]interface{}, 0) - for _, n := range devOpsProjects { - result.Items = append(result.Items, n) - } - resp.WriteAsJson(result) -} - -func WorkspaceCreateHandler(req *restful.Request, resp *restful.Response) { - var workspace workspaces.Workspace - username := req.HeaderParameter(UserNameHeader) - err := req.ReadEntity(&workspace) - if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, err.Error())) - return - } - if workspace.Name == "" || strings.Contains(workspace.Name, ":") { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, "invalid workspace name")) - return - } - - workspace.Path = workspace.Name - workspace.Members = nil - - if workspace.Admin != "" { - workspace.Creator = workspace.Admin - } else { - workspace.Creator = username - } - - created, err := workspaces.Create(&workspace) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(created) - -} - -func DeleteWorkspaceHandler(req *restful.Request, resp *restful.Response) { - name := req.PathParameter("name") - - if name == "" || strings.Contains(name, ":") { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, "invalid workspace name")) - return - } - - workspace, err := workspaces.Detail(name) - - if errors.HandlerError(err, resp) { - return - } - - err = workspaces.Delete(workspace) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(errors.None) -} -func WorkspaceEditHandler(req *restful.Request, resp *restful.Response) { - var workspace workspaces.Workspace - name := req.PathParameter("name") - err := req.ReadEntity(&workspace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, err.Error())) - return - } - - if name != workspace.Name { - resp.WriteError(http.StatusBadRequest, fmt.Errorf("the name of workspace (%s) does not match the name on the URL (%s)", workspace.Name, name)) - return - } - - if workspace.Name == "" || strings.Contains(workspace.Name, ":") { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(errors.InvalidArgument, "invalid workspace name")) - return - } - - workspace.Path = workspace.Name - - workspace.Members = nil - - edited, err := workspaces.Edit(&workspace) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(edited) -} -func WorkspaceDetailHandler(req *restful.Request, resp *restful.Response) { - - name := req.PathParameter("name") - - workspace, err := workspaces.Detail(name) - - if errors.HandlerError(err, resp) { - return - } - - resp.WriteAsJson(workspace) -} - -// List all workspaces for the current user -func UserWorkspaceListHandler(req *restful.Request, resp *restful.Response) { - keyword := req.QueryParameter("keyword") - username := req.HeaderParameter(UserNameHeader) - - ws, err := workspaces.ListWorkspaceByUser(username, keyword) - - if errors.HandlerError(err, resp) { - return - } - - sort.Slice(ws, func(i, j int) bool { - t1, err := ws[i].GetCreateTime() - if err != nil { - return false - } - t2, err := ws[j].GetCreateTime() - if err != nil { - return true - } - return t1.After(t2) - }) - - resp.WriteAsJson(ws) -} - -func UserNamespaceListHandler(req *restful.Request, resp *restful.Response) { - withMetrics, err := strconv.ParseBool(req.QueryParameter("metrics")) - - if err != nil { - withMetrics = false - } - - username := req.PathParameter("username") - keyword := req.QueryParameter("keyword") - if username == "" { - username = req.HeaderParameter(UserNameHeader) - } - limit := 65535 - offset := 0 - orderBy := "createTime" - reverse := true - - if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(req.QueryParameter("paging")); len(groups) == 3 { - limit, _ = strconv.Atoi(groups[1]) - page, _ := strconv.Atoi(groups[2]) - if page < 0 { - page = 1 - } - offset = (page - 1) * limit - } - - if groups := regexp.MustCompile(`^(createTime|name)$`).FindStringSubmatch(req.QueryParameter("order")); len(groups) == 2 { - orderBy = groups[1] - reverse = false - } - - if q := req.QueryParameter("reverse"); q != "" { - b, err := strconv.ParseBool(q) - if err == nil { - reverse = b - } - } - - workspaceName := req.PathParameter("workspace") - - total, namespaces, err := workspaces.ListNamespaceByUser(workspaceName, username, keyword, orderBy, reverse, limit, offset) - - if withMetrics { - namespaces = metrics.GetNamespacesWithMetrics(namespaces) - } - - if errors.HandlerError(err, resp) { - return - } - - result := models.PageableResponse{} - result.TotalCount = total - result.Items = make([]interface{}, 0) - for _, n := range namespaces { - result.Items = append(result.Items, n) - } - - resp.WriteAsJson(result) -} diff --git a/pkg/client/dbclient.go b/pkg/client/dbclient.go index 92d6696b8..0015cb598 100644 --- a/pkg/client/dbclient.go +++ b/pkg/client/dbclient.go @@ -1,51 +1,56 @@ /* -Copyright 2018 The KubeSphere Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ package client import ( - "fmt" + "flag" + "log" + "sync" _ "github.com/go-sql-driver/mysql" - "github.com/golang/glog" "github.com/jinzhu/gorm" ) -var dbClient *gorm.DB +var ( + dbClientOnce sync.Once + dbClient *gorm.DB + dsn string +) -func NewDBClient() *gorm.DB { - conn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local", "", "", "", "") - - db, err := gorm.Open("mysql", conn) - - if err != nil { - glog.Error(err) - panic(err) - } - return db +func init() { + flag.StringVar(&dsn, "database-connection", "root@tcp(localhost:3306)/kubesphere?charset=utf8&parseTime=True", "data source name") } -func NewSharedDBClient() *gorm.DB { +func DBClient() *gorm.DB { + dbClientOnce.Do(func() { + var err error + dbClient, err = gorm.Open("mysql", dsn) - if dbClient != nil { - err := dbClient.DB().Ping() - if err == nil { - return dbClient - } else { - glog.Error(err) - panic(err) + if err != nil { + log.Fatalln(err) } - } - return NewDBClient() + if err := dbClient.DB().Ping(); err != nil { + log.Fatalln(err) + } + }) + + return dbClient + } diff --git a/pkg/client/k8sclient.go b/pkg/client/k8sclient.go index 62ce10f6f..416f2986c 100644 --- a/pkg/client/k8sclient.go +++ b/pkg/client/k8sclient.go @@ -19,25 +19,31 @@ package client import ( + "flag" "fmt" + "log" "os" "sync" + "k8s.io/client-go/tools/clientcmd" + "github.com/mitchellh/go-homedir" - "github.com/golang/glog" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" ) var ( - KubeConfigFile string + kubeConfigFile string k8sClient *kubernetes.Clientset k8sClientOnce sync.Once KubeConfig *rest.Config ) +func init() { + flag.StringVar(&kubeConfigFile, "kubeconfig", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME")), "path to kubeconfig file") +} + func K8sClient() *kubernetes.Clientset { k8sClientOnce.Do(func() { @@ -45,14 +51,10 @@ func K8sClient() *kubernetes.Clientset { config, err := getKubeConfig() if err != nil { - glog.Fatalf("cannot load kubeconfig: %v", err) + log.Fatalln(err) } - k8sClient, err = kubernetes.NewForConfig(config) - - if err != nil { - glog.Fatalf("cannot create k8s client: %v", err) - } + k8sClient = kubernetes.NewForConfigOrDie(config) KubeConfig = config }) @@ -62,36 +64,28 @@ func K8sClient() *kubernetes.Clientset { func getKubeConfig() (kubeConfig *rest.Config, err error) { - if KubeConfigFile == "" { + if kubeConfigFile == "" { if env := os.Getenv("KUBECONFIG"); env != "" { - KubeConfigFile = env + kubeConfigFile = env } else { if home, err := homedir.Dir(); err == nil { - KubeConfigFile = fmt.Sprintf("%s/.kube/config", home) + kubeConfigFile = fmt.Sprintf("%s/.kube/config", home) } } } - if KubeConfigFile != "" { - - kubeConfig, err = clientcmd.BuildConfigFromFlags("", KubeConfigFile) - - if err != nil { - return nil, err - } - + if _, err = os.Stat(kubeConfigFile); err == nil { + kubeConfig, err = clientcmd.BuildConfigFromFlags("", kubeConfigFile) } else { - kubeConfig, err = rest.InClusterConfig() + } - if err != nil { - return nil, err - } + if err != nil { + return nil, err } kubeConfig.QPS = 1e6 kubeConfig.Burst = 1e6 return kubeConfig, nil - } diff --git a/pkg/client/ldap/channel.go b/pkg/client/ldap/channel.go new file mode 100644 index 000000000..fad2e0b09 --- /dev/null +++ b/pkg/client/ldap/channel.go @@ -0,0 +1,188 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package ldap + +import ( + "errors" + "log" + "sync" + + "github.com/go-ldap/ldap" +) + +// channelPool implements the Pool interface based on buffered channels. +type channelPool struct { + // storage for our net.Conn connections + mu sync.Mutex + conns chan ldap.Client + + name string + aliveChecks bool + + // net.Conn generator + factory PoolFactory + closeAt []uint16 +} + +// PoolFactory is a function to create new connections. +type PoolFactory func(string) (ldap.Client, error) + +// NewChannelPool returns a new pool based on buffered channels with an initial +// capacity and maximum capacity. Factory is used when initial capacity is +// greater than zero to fill the pool. A zero initialCap doesn't fill the Pool +// until a new Get() is called. During a Get(), If there is no new connection +// available in the pool, a new connection will be created via the Factory() +// method. +// +// closeAt will automagically mark the connection as unusable if the return code +// of the call is one of those passed, most likely you want to set this to something +// like +// []uint8{ldap.LDAPResultTimeLimitExceeded, ldap.ErrorNetwork} +func NewChannelPool(initialCap, maxCap int, name string, factory PoolFactory, closeAt []uint16) (Pool, error) { + if initialCap < 0 || maxCap <= 0 || initialCap > maxCap { + return nil, errors.New("invalid capacity settings") + } + + c := &channelPool{ + conns: make(chan ldap.Client, maxCap), + name: name, + factory: factory, + closeAt: closeAt, + aliveChecks: true, + } + + // create initial connections, if something goes wrong, + // just close the pool error out. + for i := 0; i < initialCap; i++ { + conn, err := factory(c.name) + if err != nil { + c.Close() + return nil, errors.New("factory is not able to fill the pool: " + err.Error()) + } + c.conns <- conn + } + + return c, nil +} + +func (c *channelPool) AliveChecks(on bool) { + c.mu.Lock() + c.aliveChecks = on + c.mu.Unlock() +} + +func (c *channelPool) getConns() chan ldap.Client { + c.mu.Lock() + conns := c.conns + c.mu.Unlock() + return conns +} + +// Get implements the Pool interfaces Get() method. If there is no new +// connection available in the pool, a new connection will be created via the +// Factory() method. +func (c *channelPool) Get() (*PoolConn, error) { + conns := c.getConns() + if conns == nil { + return nil, ErrClosed + } + + // wrap our connections with our ldap.Client implementation (wrapConn + // method) that puts the connection back to the pool if it's closed. + select { + case conn := <-conns: + if conn == nil { + return nil, ErrClosed + } + if !c.aliveChecks || isAlive(conn) { + return c.wrapConn(conn, c.closeAt), nil + } + conn.Close() + return c.NewConn() + default: + return c.NewConn() + } +} + +func isAlive(conn ldap.Client) bool { + _, err := conn.Search(&ldap.SearchRequest{BaseDN: "", Scope: ldap.ScopeBaseObject, Filter: "(&)", Attributes: []string{"1.1"}}) + return err == nil +} + +func (c *channelPool) NewConn() (*PoolConn, error) { + conn, err := c.factory(c.name) + if err != nil { + return nil, err + } + return c.wrapConn(conn, c.closeAt), nil +} + +// put puts the connection back to the pool. If the pool is full or closed, +// conn is simply closed. A nil conn will be rejected. +func (c *channelPool) put(conn ldap.Client) { + if conn == nil { + log.Printf("connection is nil. rejecting") + return + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.conns == nil { + // pool is closed, close passed connection + conn.Close() + return + } + + // put the resource back into the pool. If the pool is full, this will + // block and the default case will be executed. + select { + case c.conns <- conn: + return + default: + // pool is full, close passed connection + conn.Close() + return + } +} + +func (c *channelPool) Close() { + c.mu.Lock() + conns := c.conns + c.conns = nil + c.factory = nil + c.mu.Unlock() + + if conns == nil { + return + } + + close(conns) + for conn := range conns { + conn.Close() + } + return +} + +func (c *channelPool) Len() int { return len(c.getConns()) } + +func (c *channelPool) wrapConn(conn ldap.Client, closeAt []uint16) *PoolConn { + p := &PoolConn{c: c, closeAt: closeAt} + p.Conn = conn + return p +} diff --git a/pkg/client/ldap/conn.go b/pkg/client/ldap/conn.go new file mode 100644 index 000000000..4d6dc3e83 --- /dev/null +++ b/pkg/client/ldap/conn.go @@ -0,0 +1,113 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package ldap + +import ( + "crypto/tls" + "log" + "time" + + "github.com/go-ldap/ldap" +) + +// PoolConn implements Client to override the Close() method +type PoolConn struct { + Conn ldap.Client + c *channelPool + unusable bool + closeAt []uint16 +} + +func (p *PoolConn) Start() { + p.Conn.Start() +} + +func (p *PoolConn) StartTLS(config *tls.Config) error { + // FIXME - check if already TLS and then ignore? + return p.Conn.StartTLS(config) +} + +// Close() puts the given connects back to the pool instead of closing it. +func (p *PoolConn) Close() { + if p.unusable { + log.Printf("Closing unusable connection") + if p.Conn != nil { + p.Conn.Close() + } + return + } + p.c.put(p.Conn) +} + +func (p *PoolConn) SimpleBind(simpleBindRequest *ldap.SimpleBindRequest) (*ldap.SimpleBindResult, error) { + return p.Conn.SimpleBind(simpleBindRequest) +} + +func (p *PoolConn) Bind(username, password string) error { + return p.Conn.Bind(username, password) +} + +func (p *PoolConn) ModifyDN(modifyDNRequest *ldap.ModifyDNRequest) error { + return p.Conn.ModifyDN(modifyDNRequest) +} + +// MarkUnusable() marks the connection not usable any more, to let the pool close it +// instead of returning it to pool. +func (p *PoolConn) MarkUnusable() { + p.unusable = true +} + +func (p *PoolConn) autoClose(err error) { + for _, code := range p.closeAt { + if ldap.IsErrorWithCode(err, code) { + p.MarkUnusable() + return + } + } +} + +func (p *PoolConn) SetTimeout(t time.Duration) { + p.Conn.SetTimeout(t) +} + +func (p *PoolConn) Add(addRequest *ldap.AddRequest) error { + return p.Conn.Add(addRequest) +} + +func (p *PoolConn) Del(delRequest *ldap.DelRequest) error { + return p.Conn.Del(delRequest) +} + +func (p *PoolConn) Modify(modifyRequest *ldap.ModifyRequest) error { + return p.Conn.Modify(modifyRequest) +} + +func (p *PoolConn) Compare(dn, attribute, value string) (bool, error) { + return p.Conn.Compare(dn, attribute, value) +} + +func (p *PoolConn) PasswordModify(passwordModifyRequest *ldap.PasswordModifyRequest) (*ldap.PasswordModifyResult, error) { + return p.Conn.PasswordModify(passwordModifyRequest) +} + +func (p *PoolConn) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) { + return p.Conn.Search(searchRequest) +} +func (p *PoolConn) SearchWithPaging(searchRequest *ldap.SearchRequest, pagingSize uint32) (*ldap.SearchResult, error) { + return p.Conn.SearchWithPaging(searchRequest, pagingSize) +} diff --git a/pkg/client/ldap/pool.go b/pkg/client/ldap/pool.go new file mode 100644 index 000000000..be4715282 --- /dev/null +++ b/pkg/client/ldap/pool.go @@ -0,0 +1,43 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package ldap + +import ( + "errors" +) + +var ( + // ErrClosed is the error resulting if the pool is closed via pool.Close(). + ErrClosed = errors.New("pool is closed") +) + +// Pool interface describes a pool implementation. A pool should have maximum +// capacity. An ideal pool is threadsafe and easy to use. +type Pool interface { + // Get returns a new connection from the pool. Closing the connections puts + // it back to the Pool. Closing it when the pool is destroyed or full will + // be counted as an error. + Get() (*PoolConn, error) + + // Close closes the pool and all its connections. After Close() the pool is + // no longer usable. + Close() + + // Len returns the current number of connections of the pool. + Len() int +} diff --git a/pkg/client/ldapclient.go b/pkg/client/ldapclient.go new file mode 100644 index 000000000..8fa1b605c --- /dev/null +++ b/pkg/client/ldapclient.go @@ -0,0 +1,65 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package client + +import ( + "flag" + "fmt" + "github.com/go-ldap/ldap" + ldapPool "kubesphere.io/kubesphere/pkg/client/ldap" + "os" + "sync" +) + +var ( + once sync.Once + pool ldapPool.Pool + ldapHost string + ManagerDN string + ManagerPassword string + UserSearchBase string + GroupSearchBase string +) + +func init() { + flag.StringVar(&ldapHost, "ldap-server", "localhost:389", "ldap server host") + flag.StringVar(&ManagerDN, "ldap-manager-dn", "cn=admin,dc=example,dc=org", "ldap manager dn") + flag.StringVar(&ManagerPassword, "ldap-manager-password", "admin", "ldap manager password") + flag.StringVar(&UserSearchBase, "ldap-user-search-base", "ou=Users,dc=example,dc=org", "ldap user search base") + flag.StringVar(&GroupSearchBase, "ldap-group-search-base", "ou=Groups,dc=example,dc=org", "ldap group search base") +} + +func LdapClient() ldapPool.Pool { + + once.Do(func() { + var err error + pool, err = ldapPool.NewChannelPool(8, 96, "kubesphere", func(s string) (ldap.Client, error) { + conn, err := ldap.Dial("tcp", ldapHost) + if err != nil { + return nil, err + } + return conn, nil + }, []uint16{ldap.LDAPResultTimeLimitExceeded, ldap.ErrorNetwork}) + + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + panic(err) + } + }) + return pool +} diff --git a/pkg/client/prometheusclient.go b/pkg/client/prometheusclient.go index 1b23944df..0cca94e75 100644 --- a/pkg/client/prometheusclient.go +++ b/pkg/client/prometheusclient.go @@ -1,18 +1,24 @@ /* -Copyright 2018 The KubeSphere Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ package client import ( + "flag" "io/ioutil" "net/http" "net/url" @@ -20,31 +26,23 @@ import ( "strings" "time" - "os" - "github.com/emicklei/go-restful" "github.com/golang/glog" ) const ( - DefaultScheme = "http" - DefaultPrometheusPort = "9090" - PrometheusApiPath = "/api/v1/" - DefaultQueryStep = "10m" - DefaultQueryTimeout = "10s" - RangeQueryType = "query_range?" - DefaultQueryType = "query?" - PrometheusAPIServerEnv = "PROMETHEUS_API_SERVER" + DefaultQueryStep = "10m" + DefaultQueryTimeout = "10s" + RangeQueryType = "query_range?" + DefaultQueryType = "query?" ) -var PrometheusAPIServer = "prometheus-k8s.kubesphere-monitoring-system.svc" -var PrometheusEndpointUrl string +var ( + prometheusAPIEndpoint string +) func init() { - if env := os.Getenv(PrometheusAPIServerEnv); env != "" { - PrometheusAPIServer = env - } - PrometheusEndpointUrl = DefaultScheme + "://" + PrometheusAPIServer + ":" + DefaultPrometheusPort + PrometheusApiPath + flag.StringVar(&prometheusAPIEndpoint, "prometheus-endpoint", "http://prometheus-k8s.kubesphere-monitoring-system.svc:9090/api/v1/", "prometheus api endpoint") } type MonitoringRequestParams struct { @@ -72,11 +70,10 @@ type MonitoringRequestParams struct { WorkloadKind string } -var client = &http.Client{} - func SendMonitoringRequest(queryType string, params string) string { - epurl := PrometheusEndpointUrl + queryType + params - response, err := client.Get(epurl) + epurl := prometheusAPIEndpoint + queryType + params + + response, err := http.DefaultClient.Get(epurl) if err != nil { glog.Error(err) } else { @@ -116,11 +113,11 @@ func ParseMonitoringRequestParams(request *restful.Request) *MonitoringRequestPa metricsName := strings.Trim(request.QueryParameter("metrics_name"), " ") workloadName := strings.Trim(request.QueryParameter("workload_name"), " ") - nodeId := strings.Trim(request.PathParameter("node_id"), " ") - wsName := strings.Trim(request.PathParameter("workspace_name"), " ") - nsName := strings.Trim(request.PathParameter("ns_name"), " ") - podName := strings.Trim(request.PathParameter("pod_name"), " ") - containerName := strings.Trim(request.PathParameter("container_name"), " ") + nodeId := strings.Trim(request.PathParameter("node"), " ") + wsName := strings.Trim(request.PathParameter("workspace"), " ") + nsName := strings.Trim(request.PathParameter("namespace"), " ") + podName := strings.Trim(request.PathParameter("pod"), " ") + containerName := strings.Trim(request.PathParameter("container"), " ") workloadKind := strings.Trim(request.PathParameter("workload_kind"), " ") var requestParams = MonitoringRequestParams{ diff --git a/pkg/client/redis.go b/pkg/client/redis.go new file mode 100644 index 000000000..f7cfd2e9d --- /dev/null +++ b/pkg/client/redis.go @@ -0,0 +1,56 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package client + +import ( + "flag" + "log" + "sync" + + "github.com/go-redis/redis" +) + +var ( + redisHost string + redisPassword string + redisDB int + redisClientOnce sync.Once + redisClient *redis.Client +) + +func init() { + flag.StringVar(&redisHost, "redis-server", "localhost:6379", "redis server host") + flag.StringVar(&redisPassword, "redis-password", "", "redis password") + flag.IntVar(&redisDB, "redis-db", 0, "redis db") +} + +func RedisClient() *redis.Client { + + redisClientOnce.Do(func() { + redisClient = redis.NewClient(&redis.Options{ + Addr: redisHost, + Password: redisPassword, + DB: redisDB, + }) + if err := redisClient.Ping().Err(); err != nil { + log.Fatalln(err) + } + }) + + return redisClient +} diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 3581c30c2..3a3915486 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -1,6 +1,21 @@ -package constants +/* -import "os" + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package constants const ( APIVersion = "v1alpha1" @@ -24,32 +39,18 @@ const ( DevopsOwner = "owner" DevopsReporter = "reporter" - DevopsAPIServerEnv = "DEVOPS_API_SERVER" - AccountAPIServerEnv = "ACCOUNT_API_SERVER" - DevopsProxyTokenEnv = "DEVOPS_PROXY_TOKEN" - OpenPitrixProxyTokenEnv = "OPENPITRIX_PROXY_TOKEN" + envDevopsAPIServer = "DEVOPS_API_SERVER" + envAccountAPIServer = "ACCOUNT_API_SERVER" + envDevopsProxyToken = "DEVOPS_PROXY_TOKEN" + envOpenPitrixProxyToken = "OPENPITRIX_PROXY_TOKEN" + + UserNameHeader = "X-Token-Username" ) var ( - WorkSpaceRoles = []string{WorkspaceAdmin, WorkspaceRegular, WorkspaceViewer} - DevopsAPIServer = "ks-devops-apiserver.kubesphere-system.svc" - AccountAPIServer = "ks-account.kubesphere-system.svc" - DevopsProxyToken = "" - OpenPitrixProxyToken = "" - SystemNamespaces = []string{KubeSystemNamespace, OpenPitrixNamespace, KubeSystemNamespace} + WorkSpaceRoles = []string{WorkspaceAdmin, WorkspaceRegular, WorkspaceViewer} + SystemWorkspace = "system-workspace" + DevopsAPIServer = "ks-devops-apiserver.kubesphere-system.svc" + AccountAPIServer = "ks-account.kubesphere-system.svc" + SystemNamespaces = []string{KubeSystemNamespace, OpenPitrixNamespace, KubeSystemNamespace} ) - -func init() { - if env := os.Getenv(DevopsAPIServerEnv); env != "" { - DevopsAPIServer = env - } - if env := os.Getenv(AccountAPIServerEnv); env != "" { - AccountAPIServer = env - } - if env := os.Getenv(DevopsProxyTokenEnv); env != "" { - DevopsProxyToken = env - } - if env := os.Getenv(OpenPitrixProxyTokenEnv); env != "" { - OpenPitrixProxyToken = env - } -} diff --git a/pkg/errors/code.go b/pkg/errors/code.go deleted file mode 100644 index 3b9fbd74b..000000000 --- a/pkg/errors/code.go +++ /dev/null @@ -1,17 +0,0 @@ -package errors - -type Code int - -const ( - OK Code = iota - Canceled - Unknown - InvalidArgument - Internal // 5 - Unavailable - AlreadyExists - NotFound - NotImplement - VerifyFailed - Conflict -) diff --git a/pkg/errors/code_string.go b/pkg/errors/code_string.go deleted file mode 100644 index c29c37196..000000000 --- a/pkg/errors/code_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=Code"; DO NOT EDIT. - -package errors - -import "strconv" - -const _Code_name = "OKCanceledUnknownInvalidArgumentInternalUnavailableAlreadyExistsWTFNotFoundNotImplementVerifyFailed" - -var _Code_index = [...]uint8{0, 2, 10, 17, 32, 40, 51, 64, 67, 75, 87, 99} - -func (i Code) String() string { - if i < 0 || i >= Code(len(_Code_index)-1) { - return "Code(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Code_name[_Code_index[i]:_Code_index[i+1]] -} diff --git a/pkg/errors/code_test.go b/pkg/errors/code_test.go deleted file mode 100644 index 85565226f..000000000 --- a/pkg/errors/code_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package errors - -import "testing" - -func TestCode_String(t *testing.T) { - t.Log(Code(1).String()) -} diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index 417b3bb79..cf341903b 100644 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -1,105 +1,42 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ package errors import ( "encoding/json" "errors" - "fmt" - "net" - "net/http" - "reflect" - - k8sError "k8s.io/apimachinery/pkg/api/errors" - - "github.com/emicklei/go-restful" - "github.com/go-sql-driver/mysql" - "github.com/golang/glog" ) type Error struct { - Code Code `json:"code"` Message string `json:"message"` } -var None = New(OK, "success") +var None = Error{Message: "success"} func (e *Error) Error() string { - return fmt.Sprintf("error: %v,message: %v", e.Code.String(), e.Message) -} -func (e *Error) HttpStatusCode() int { - switch e.Code { - case OK: - return http.StatusOK - case InvalidArgument: - return http.StatusBadRequest - case AlreadyExists: - return http.StatusConflict - case Unavailable: - return http.StatusServiceUnavailable - case NotImplement: - return http.StatusNotImplemented - case VerifyFailed: - return http.StatusBadRequest - case Conflict: - return http.StatusConflict - case Internal: - fallthrough - case Unknown: - fallthrough - default: - return http.StatusInternalServerError - } + return e.Message } -func New(code Code, message string) error { - if message == "" { - message = code.String() - } - return &Error{Code: code, Message: message} +func Wrap(err error) Error { + return Error{Message: err.Error()} } -func HandlerError(err error, resp *restful.Response) bool { - - if err == nil { - return false - } - - glog.Errorln(reflect.TypeOf(err), err) - - resp.WriteHeaderAndEntity(wrapper(err)) - - return true -} - -func wrapper(err error) (int, interface{}) { - switch err.(type) { - case *Error: - case *json.UnmarshalTypeError: - err = New(InvalidArgument, err.Error()) - case *mysql.MySQLError: - err = wrapperMysqlError(err.(*mysql.MySQLError)) - case *net.OpError: - err = New(Internal, err.Error()) - default: - if k8sError.IsNotFound(err) { - err = New(NotFound, err.Error()) - } else { - err = New(Unknown, err.Error()) - } - - } - return err.(*Error).HttpStatusCode(), err -} - -func wrapperMysqlError(sqlError *mysql.MySQLError) error { - switch sqlError.Number { - case 1062: - return New(AlreadyExists, sqlError.Message) - default: - return New(Unknown, sqlError.Message) - } -} - -func Wrap(data []byte) error { +func Parse(data []byte) error { var j map[string]string err := json.Unmarshal(data, &j) if err != nil { diff --git a/pkg/models/components/components.go b/pkg/models/components/components.go index eabb69143..af26140ce 100644 --- a/pkg/models/components/components.go +++ b/pkg/models/components/components.go @@ -18,47 +18,27 @@ package components import ( - "time" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "kubesphere.io/kubesphere/pkg/models" - lister "k8s.io/client-go/listers/core/v1" + "kubesphere.io/kubesphere/pkg/client" "kubesphere.io/kubesphere/pkg/informers" "github.com/golang/glog" - coreV1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "kubesphere.io/kubesphere/pkg/constants" ) -type Component struct { - Name string `json:"name"` - Namespace string `json:"namespace"` - SelfLink string `json:"selfLink"` - Label interface{} `json:"label"` - StartedAt time.Time `json:"startedAt"` - TotalBackends int `json:"totalBackends"` - HealthyBackends int `json:"healthyBackends"` -} - -var ( - componentStatusLister lister.ComponentStatusLister - serviceLister lister.ServiceLister - podLister lister.PodLister - nodeLister lister.NodeLister -) - -func init() { - componentStatusLister = informers.SharedInformerFactory().Core().V1().ComponentStatuses().Lister() - serviceLister = informers.SharedInformerFactory().Core().V1().Services().Lister() - podLister = informers.SharedInformerFactory().Core().V1().Pods().Lister() - nodeLister = informers.SharedInformerFactory().Core().V1().Nodes().Lister() -} - func GetComponentStatus(name string) (interface{}, error) { - var service *coreV1.Service + var service *corev1.Service var err error + + serviceLister := informers.SharedInformerFactory().Core().V1().Services().Lister() + for _, ns := range constants.SystemNamespaces { service, err = serviceLister.Services(ns).Get(name) if err == nil { @@ -70,13 +50,15 @@ func GetComponentStatus(name string) (interface{}, error) { return nil, err } + podLister := informers.SharedInformerFactory().Core().V1().Pods().Lister() + pods, err := podLister.Pods(service.Namespace).List(labels.SelectorFromValidatedSet(service.Spec.Selector)) if err != nil { return nil, err } - component := Component{ + component := models.Component{ Name: service.Name, Namespace: service.Namespace, SelfLink: service.SelfLink, @@ -102,11 +84,11 @@ func GetSystemHealthStatus() (map[string]interface{}, error) { status := make(map[string]interface{}) - componentStatuses, err := componentStatusLister.List(labels.Everything()) + componentStatuses, err := client.K8sClient().CoreV1().ComponentStatuses().List(meta_v1.ListOptions{}) if err != nil { return nil, err } - for _, cs := range componentStatuses { + for _, cs := range componentStatuses.Items { status[cs.Name] = cs.Conditions[0] } @@ -119,6 +101,8 @@ func GetSystemHealthStatus() (map[string]interface{}, error) { for k, v := range systemComponentStatus { status[k] = v } + + nodeLister := informers.SharedInformerFactory().Core().V1().Nodes().Lister() // get node status nodes, err := nodeLister.List(labels.Everything()) if err != nil { @@ -132,7 +116,7 @@ func GetSystemHealthStatus() (map[string]interface{}, error) { for _, nodes := range nodes { totalNodes++ for _, condition := range nodes.Status.Conditions { - if condition.Type == coreV1.NodeReady && condition.Status == coreV1.ConditionTrue { + if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue { healthyNodes++ } } @@ -147,11 +131,12 @@ func GetSystemHealthStatus() (map[string]interface{}, error) { } func GetAllComponentsStatus() (map[string]interface{}, error) { + serviceLister := informers.SharedInformerFactory().Core().V1().Services().Lister() + podLister := informers.SharedInformerFactory().Core().V1().Pods().Lister() status := make(map[string]interface{}) var err error - for _, ns := range constants.SystemNamespaces { nsStatus := make(map[string]interface{}) @@ -164,7 +149,7 @@ func GetAllComponentsStatus() (map[string]interface{}, error) { } for _, service := range services { - component := Component{ + component := models.Component{ Name: service.Name, Namespace: service.Namespace, SelfLink: service.SelfLink, diff --git a/pkg/models/iam/am.go b/pkg/models/iam/am.go new file mode 100644 index 000000000..03631fc3e --- /dev/null +++ b/pkg/models/iam/am.go @@ -0,0 +1,822 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package iam + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "kubesphere.io/kubesphere/pkg/informers" + "log" + "net/http" + "regexp" + "strings" + + "github.com/go-ldap/ldap" + corev1 "k8s.io/api/core/v1" + "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/kubernetes/pkg/util/slice" + + "kubesphere.io/kubesphere/pkg/client" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/models/iam/policy" +) + +func GetNamespaces(username string) ([]*corev1.Namespace, error) { + + roles, err := GetRoles(username, "") + + if err != nil { + return nil, err + } + + namespaces := make([]*corev1.Namespace, 0) + namespaceLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister() + for _, role := range roles { + namespace, err := namespaceLister.Get(role.Name) + if err != nil { + return nil, err + } + namespaces = append(namespaces, namespace) + } + + return namespaces, nil +} + +func GetNamespacesByWorkspace(workspace string) ([]*corev1.Namespace, error) { + namespaceLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister() + return namespaceLister.List(labels.SelectorFromSet(labels.Set{"kubesphere.io/workspace": workspace})) +} + +func GetDevopsRole(projectId string, username string) (string, error) { + + //Hard fix + if username == "admin" { + return "owner", nil + } + + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/api/v1alpha/projects/%s/members", constants.DevopsAPIServer, projectId), nil) + + if err != nil { + return "", err + } + req.Header.Set(constants.UserNameHeader, username) + resp, err := http.DefaultClient.Do(req) + + if err != nil { + return "", err + } + + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return "", err + } + + if resp.StatusCode > 200 { + return "", errors.New(string(data)) + } + + var result []map[string]string + + err = json.Unmarshal(data, &result) + + if err != nil { + return "", err + } + + for _, item := range result { + if item["username"] == username { + return item["role"], nil + } + } + + return "", nil +} + +func GetNamespace(namespaceName string) (*corev1.Namespace, error) { + namespaceLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister() + return namespaceLister.Get(namespaceName) +} + +func GetRoles(username string, namespace string) ([]*v1.Role, error) { + clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() + roleBindingLister := informers.SharedInformerFactory().Rbac().V1().RoleBindings().Lister() + roleLister := informers.SharedInformerFactory().Rbac().V1().Roles().Lister() + roleBindings, err := roleBindingLister.RoleBindings(namespace).List(labels.Everything()) + + if err != nil { + return nil, err + } + + roles := make([]*v1.Role, 0) + + for _, roleBinding := range roleBindings { + + for _, subject := range roleBinding.Subjects { + if subject.Kind == v1.UserKind && subject.Name == username { + if roleBinding.RoleRef.Kind == ClusterRoleKind { + clusterRole, err := clusterRoleLister.Get(roleBinding.RoleRef.Name) + if err == nil { + var role = v1.Role{TypeMeta: (*clusterRole).TypeMeta, ObjectMeta: (*clusterRole).ObjectMeta, Rules: (*clusterRole).Rules} + role.Namespace = roleBinding.Namespace + roles = append(roles, &role) + break + } else if apierrors.IsNotFound(err) { + log.Println(err) + break + } else { + return nil, err + } + } else { + if subject.Kind == v1.UserKind && subject.Name == username { + rule, err := roleLister.Roles(roleBinding.Namespace).Get(roleBinding.RoleRef.Name) + if err == nil { + roles = append(roles, rule) + break + } else if apierrors.IsNotFound(err) { + log.Println(err) + break + } else { + return nil, err + } + + } + } + } + } + } + + return roles, nil +} + +func GetClusterRoles(username string) ([]*v1.ClusterRole, error) { + clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() + clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() + clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything()) + + if err != nil { + return nil, err + } + + roles := make([]*v1.ClusterRole, 0) + + for _, rb := range clusterRoleBindings { + if rb.RoleRef.Kind == ClusterRoleKind { + for _, subject := range rb.Subjects { + if subject.Kind == v1.UserKind && subject.Name == username { + + role, err := clusterRoleLister.Get(rb.RoleRef.Name) + role = role.DeepCopy() + if err == nil { + if role.Annotations == nil { + role.Annotations = make(map[string]string, 0) + } + + role.Annotations["rbac.authorization.k8s.io/clusterrolebinding"] = rb.Name + + if rb.Annotations != nil && + rb.Annotations["rbac.authorization.k8s.io/clusterrole"] == rb.RoleRef.Name { + role.Annotations["rbac.authorization.k8s.io/clusterrole"] = "true" + } + + roles = append(roles, role) + break + } else if apierrors.IsNotFound(err) { + log.Println(err) + break + } else { + return nil, err + } + } + } + } + } + + return roles, nil +} + +func GetRoleBindings(namespace string, roleName string) ([]*v1.RoleBinding, error) { + roleBindingLister := informers.SharedInformerFactory().Rbac().V1().RoleBindings().Lister() + roleBindingList, err := roleBindingLister.List(labels.Everything()) + + if err != nil { + return nil, err + } + + items := make([]*v1.RoleBinding, 0) + + for _, roleBinding := range roleBindingList { + if roleName == "" { + items = append(items, roleBinding) + } else if roleBinding.RoleRef.Name == roleName { + items = append(items, roleBinding) + } + } + + return items, nil +} + +func GetClusterRoleBindings(clusterRoleName string) ([]*v1.ClusterRoleBinding, error) { + clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() + roleBindingList, err := clusterRoleBindingLister.List(labels.Everything()) + + if err != nil { + return nil, err + } + + items := make([]*v1.ClusterRoleBinding, 0) + + for _, roleBinding := range roleBindingList { + if roleBinding.RoleRef.Name == clusterRoleName { + items = append(items, roleBinding) + } + } + + return items, nil +} + +func ClusterRoleUsers(clusterRoleName string) ([]*models.User, error) { + + roleBindings, err := GetClusterRoleBindings(clusterRoleName) + + if err != nil { + return nil, err + } + + conn, err := NewConnection() + + if err != nil { + return nil, err + } + + defer conn.Close() + + names := make([]string, 0) + users := make([]*models.User, 0) + for _, roleBinding := range roleBindings { + for _, subject := range roleBinding.Subjects { + if subject.Kind == v1.UserKind && !strings.HasPrefix(subject.Name, "system") && + !slice.ContainsString(names, subject.Name, nil) { + names = append(names, subject.Name) + + user, err := UserDetail(subject.Name, conn) + + if ldap.IsErrorWithCode(err, 32) { + continue + } + + if err != nil { + return nil, err + } + + users = append(users, user) + } + } + } + + return users, nil + +} + +func RoleUsers(namespace string, roleName string) ([]*models.User, error) { + roleBindings, err := GetRoleBindings(namespace, roleName) + + if err != nil { + return nil, err + } + + conn, err := NewConnection() + + if err != nil { + return nil, err + } + + defer conn.Close() + + names := make([]string, 0) + users := make([]*models.User, 0) + for _, roleBinding := range roleBindings { + for _, subject := range roleBinding.Subjects { + if subject.Kind == v1.UserKind && + !strings.HasPrefix(subject.Name, "system") && + !slice.ContainsString(names, subject.Name, nil) { + names = append(names, subject.Name) + user, err := UserDetail(subject.Name, conn) + if ldap.IsErrorWithCode(err, 32) { + continue + } + + if err != nil { + return nil, err + } + + users = append(users, user) + } + } + } + return users, nil +} + +func NamespaceUsers(namespaceName string) ([]*models.User, error) { + roleBindings, err := GetRoleBindings(namespaceName, "") + if err != nil { + return nil, err + } + conn, err := NewConnection() + + if err != nil { + return nil, err + } + + defer conn.Close() + + names := make([]string, 0) + users := make([]*models.User, 0) + + for _, roleBinding := range roleBindings { + + for _, subject := range roleBinding.Subjects { + if subject.Kind == v1.UserKind && + !slice.ContainsString(names, subject.Name, nil) && + !strings.HasPrefix(subject.Name, "system") { + if roleBinding.Name == "viewer" { + continue + } + if roleBinding.Name == "admin" { + continue + } + names = append(names, subject.Name) + user, err := UserDetail(subject.Name, conn) + if ldap.IsErrorWithCode(err, 32) { + continue + } + if err != nil { + return nil, err + } + user.Role = roleBinding.RoleRef.Name + user.RoleBinding = roleBinding.Name + users = append(users, user) + } + } + } + + return users, nil +} + +func GetWorkspaceRoles(clusterRoles []*v1.ClusterRole) map[string]string { + + workspaceRoles := make(map[string]string, 0) + + for _, v := range clusterRoles { + if groups := regexp.MustCompile(fmt.Sprintf(`^system:(\S+):(%s)$`, strings.Join(constants.WorkSpaceRoles, "|"))).FindStringSubmatch(v.Name); len(groups) == 3 { + workspaceRoles[groups[1]] = groups[2] + } + } + + return workspaceRoles +} + +func GetWorkspaceRole(clusterRoles []*v1.ClusterRole, workspace string) string { + + for _, v := range clusterRoles { + if groups := regexp.MustCompile(fmt.Sprintf(`^system:(\S+):(%s)$`, strings.Join(constants.WorkSpaceRoles, "|"))).FindStringSubmatch(v.Name); len(groups) == 3 { + if groups[1] == workspace { + return groups[2] + } + } + } + + return "" +} + +func GetWorkspaceSimpleRules(clusterRoles []*v1.ClusterRole, workspace string) map[string][]models.SimpleRule { + + workspaceRules := make(map[string][]models.SimpleRule, 0) + + clusterSimpleRules := make([]models.SimpleRule, 0) + clusterRules := make([]v1.PolicyRule, 0) + for _, clusterRole := range clusterRoles { + clusterRules = append(clusterRules, clusterRole.Rules...) + } + + for i := 0; i < len(policy.WorkspaceRoleRuleMapping); i++ { + rule := models.SimpleRule{Name: policy.WorkspaceRoleRuleMapping[i].Name} + rule.Actions = make([]string, 0) + for j := 0; j < (len(policy.WorkspaceRoleRuleMapping[i].Actions)); j++ { + if RulesMatchesAction(clusterRules, policy.WorkspaceRoleRuleMapping[i].Actions[j]) { + rule.Actions = append(rule.Actions, policy.WorkspaceRoleRuleMapping[i].Actions[j].Name) + } + } + if len(rule.Actions) > 0 { + clusterSimpleRules = append(clusterSimpleRules, rule) + } + } + + if len(clusterRules) > 0 { + workspaceRules["*"] = clusterSimpleRules + } + + for _, v := range clusterRoles { + + if groups := regexp.MustCompile(fmt.Sprintf(`^system:(\S+):(%s)$`, strings.Join(constants.WorkSpaceRoles, "|"))).FindStringSubmatch(v.Name); len(groups) == 3 { + + if workspace != "" && groups[1] != workspace { + continue + } + + policyRules := make([]v1.PolicyRule, 0) + + for _, rule := range v.Rules { + rule.ResourceNames = nil + policyRules = append(policyRules, rule) + } + + rules := make([]models.SimpleRule, 0) + + for i := 0; i < len(policy.WorkspaceRoleRuleMapping); i++ { + rule := models.SimpleRule{Name: policy.WorkspaceRoleRuleMapping[i].Name} + rule.Actions = make([]string, 0) + for j := 0; j < (len(policy.WorkspaceRoleRuleMapping[i].Actions)); j++ { + action := policy.WorkspaceRoleRuleMapping[i].Actions[j] + if RulesMatchesAction(policyRules, action) { + rule.Actions = append(rule.Actions, action.Name) + } + } + if len(rule.Actions) > 0 { + rules = append(rules, rule) + } + } + + workspaceRules[groups[1]] = merge(rules, clusterSimpleRules) + } + } + + return workspaceRules +} + +func merge(clusterRules, rules []models.SimpleRule) []models.SimpleRule { + for _, clusterRule := range clusterRules { + exist := false + + for i := 0; i < len(rules); i++ { + if rules[i].Name == clusterRule.Name { + exist = true + + for _, action := range clusterRule.Actions { + if !slice.ContainsString(rules[i].Actions, action, nil) { + rules[i].Actions = append(rules[i].Actions, action) + } + } + } + } + + if !exist { + rules = append(rules, clusterRule) + } + } + return rules +} + +// Convert cluster roles to rules +func GetClusterRoleSimpleRules(clusterRoles []*v1.ClusterRole) ([]models.SimpleRule, error) { + + clusterRules := make([]v1.PolicyRule, 0) + + for _, v := range clusterRoles { + clusterRules = append(clusterRules, v.Rules...) + } + + rules := make([]models.SimpleRule, 0) + + for i := 0; i < len(policy.ClusterRoleRuleMapping); i++ { + validActions := make([]string, 0) + for j := 0; j < (len(policy.ClusterRoleRuleMapping[i].Actions)); j++ { + if RulesMatchesAction(clusterRules, policy.ClusterRoleRuleMapping[i].Actions[j]) { + validActions = append(validActions, policy.ClusterRoleRuleMapping[i].Actions[j].Name) + } + } + if len(validActions) > 0 { + rules = append(rules, models.SimpleRule{Name: policy.ClusterRoleRuleMapping[i].Name, Actions: validActions}) + } + } + + return rules, nil +} + +// Convert roles to rules +func GetRoleSimpleRules(roles []*v1.Role, namespace string) (map[string][]models.SimpleRule, error) { + + rulesMapping := make(map[string][]models.SimpleRule, 0) + + policyRulesMapping := make(map[string][]v1.PolicyRule, 0) + + for _, v := range roles { + + if namespace != "" && v.Namespace != namespace { + continue + } + + policyRules := policyRulesMapping[v.Namespace] + + if policyRules == nil { + policyRules = make([]v1.PolicyRule, 0) + } + + policyRules = append(policyRules, v.Rules...) + + policyRulesMapping[v.Namespace] = policyRules + } + + for namespace, policyRules := range policyRulesMapping { + + rules := make([]models.SimpleRule, 0) + + for i := 0; i < len(policy.RoleRuleMapping); i++ { + rule := models.SimpleRule{Name: policy.RoleRuleMapping[i].Name} + rule.Actions = make([]string, 0) + for j := 0; j < len(policy.RoleRuleMapping[i].Actions); j++ { + if RulesMatchesAction(policyRules, policy.RoleRuleMapping[i].Actions[j]) { + rule.Actions = append(rule.Actions, policy.RoleRuleMapping[i].Actions[j].Name) + } + } + if len(rule.Actions) > 0 { + rules = append(rules, rule) + } + } + + rulesMapping[namespace] = rules + } + + return rulesMapping, nil +} + +func CreateClusterRoleBinding(username string, clusterRoleName string) error { + clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() + _, err := clusterRoleLister.Get(clusterRoleName) + + if err != nil { + return err + } + + clusterRoles, err := GetClusterRoles(username) + + if err != nil { + return err + } + + for _, clusterRole := range clusterRoles { + + if clusterRole.Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { + + if clusterRole.Name == clusterRoleName { + return nil + } + + clusterRoleBindingName := clusterRole.Annotations["rbac.authorization.k8s.io/clusterrolebinding"] + clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() + clusterRoleBinding, err := clusterRoleBindingLister.Get(clusterRoleBindingName) + + if err != nil { + return err + } + + for i, v := range clusterRoleBinding.Subjects { + if v.Kind == v1.UserKind && v.Name == username { + clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects[:i], clusterRoleBinding.Subjects[i+1:]...) + break + } + } + + _, err = client.K8sClient().RbacV1().ClusterRoleBindings().Update(clusterRoleBinding) + + if err != nil { + return err + } + + break + } + } + clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() + clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything()) + + if err != nil { + return err + } + + var clusterRoleBinding *v1.ClusterRoleBinding + + for _, roleBinding := range clusterRoleBindings { + if roleBinding.Annotations != nil && roleBinding.Annotations["rbac.authorization.k8s.io/clusterrole"] == clusterRoleName && + roleBinding.RoleRef.Name == clusterRoleName { + clusterRoleBinding = roleBinding + break + } + } + + if clusterRoleBinding != nil { + clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, v1.Subject{Kind: v1.UserKind, Name: username}) + _, err := client.K8sClient().RbacV1().ClusterRoleBindings().Update(clusterRoleBinding) + if err != nil { + return err + } + } else { + clusterRoleBinding = new(v1.ClusterRoleBinding) + clusterRoleBinding.Annotations = map[string]string{"rbac.authorization.k8s.io/clusterrole": clusterRoleName} + clusterRoleBinding.Name = clusterRoleName + clusterRoleBinding.RoleRef = v1.RoleRef{Name: clusterRoleName, Kind: ClusterRoleKind} + clusterRoleBinding.Subjects = []v1.Subject{{Kind: v1.UserKind, Name: username}} + + _, err = client.K8sClient().RbacV1().ClusterRoleBindings().Create(clusterRoleBinding) + + if err != nil { + return err + } + } + + return nil +} + +func GetRole(namespace string, roleName string) (*v1.Role, error) { + return informers.SharedInformerFactory().Rbac().V1().Roles().Lister().Roles(namespace).Get(roleName) +} +func GetClusterRole(clusterRoleName string) (*v1.ClusterRole, error) { + clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() + return clusterRoleLister.Get(clusterRoleName) +} + +func RulesMatchesAction(rules []v1.PolicyRule, action models.Action) bool { + + for _, required := range action.Rules { + if !rulesMatchesRequired(rules, required) { + return false + } + } + + return true +} + +func rulesMatchesRequired(rules []v1.PolicyRule, required v1.PolicyRule) bool { + for _, rule := range rules { + if ruleMatchesRequired(rule, required) { + return true + } + } + return false +} + +func ruleMatchesRequired(rule v1.PolicyRule, required v1.PolicyRule) bool { + + if len(required.NonResourceURLs) == 0 { + for _, apiGroup := range required.APIGroups { + for _, resource := range required.Resources { + resources := strings.Split(resource, "/") + resource = resources[0] + var subsource string + if len(resources) > 1 { + subsource = resources[1] + } + + if len(required.ResourceNames) == 0 { + for _, verb := range required.Verbs { + if !ruleMatchesRequest(rule, apiGroup, "", resource, subsource, "", verb) { + return false + } + } + } else { + for _, resourceName := range required.ResourceNames { + for _, verb := range required.Verbs { + if !ruleMatchesRequest(rule, apiGroup, "", resource, subsource, resourceName, verb) { + return false + } + } + } + } + } + } + } else { + for _, apiGroup := range required.APIGroups { + for _, nonResourceURL := range required.NonResourceURLs { + for _, verb := range required.Verbs { + if !ruleMatchesRequest(rule, apiGroup, nonResourceURL, "", "", "", verb) { + return false + } + } + } + } + } + return true +} + +func ruleMatchesResources(rule v1.PolicyRule, apiGroup string, resource string, subresource string, resourceName string) bool { + + if resource == "" { + return false + } + + if !hasString(rule.APIGroups, apiGroup) && !hasString(rule.APIGroups, v1.ResourceAll) { + return false + } + + if len(rule.ResourceNames) > 0 && !hasString(rule.ResourceNames, resourceName) { + return false + } + + combinedResource := resource + + if subresource != "" { + combinedResource = combinedResource + "/" + subresource + } + + for _, res := range rule.Resources { + + // match "*" + if res == v1.ResourceAll || res == combinedResource { + return true + } + + // match "*/subresource" + if len(subresource) > 0 && strings.HasPrefix(res, "*/") && subresource == strings.TrimLeft(res, "*/") { + return true + } + // match "resource/*" + if strings.HasSuffix(res, "/*") && resource == strings.TrimRight(res, "/*") { + return true + } + } + + return false +} + +func ruleMatchesRequest(rule v1.PolicyRule, apiGroup string, nonResourceURL string, resource string, subresource string, resourceName string, verb string) bool { + + if !hasString(rule.Verbs, verb) && !hasString(rule.Verbs, v1.VerbAll) { + return false + } + + if nonResourceURL == "" { + return ruleMatchesResources(rule, apiGroup, resource, subresource, resourceName) + } else { + return ruleMatchesNonResource(rule, nonResourceURL) + } +} + +func ruleMatchesNonResource(rule v1.PolicyRule, nonResourceURL string) bool { + + if nonResourceURL == "" { + return false + } + + for _, spec := range rule.NonResourceURLs { + if pathMatches(nonResourceURL, spec) { + return true + } + } + + return false +} + +func pathMatches(path, spec string) bool { + // Allow wildcard match + if spec == "*" { + return true + } + // Allow exact match + if spec == path { + return true + } + // Allow a trailing * subpath match + if strings.HasSuffix(spec, "*") && strings.HasPrefix(path, strings.TrimRight(spec, "*")) { + return true + } + return false +} + +func hasString(slice []string, value string) bool { + for _, s := range slice { + if s == value { + return true + } + } + return false +} diff --git a/pkg/models/iam/counter.go b/pkg/models/iam/counter.go new file mode 100644 index 000000000..e4ecfb814 --- /dev/null +++ b/pkg/models/iam/counter.go @@ -0,0 +1,54 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package iam + +import "sync" + +type Counter struct { + value int + m *sync.Mutex +} + +func NewCounter(value int) Counter { + c := Counter{} + c.m = &sync.Mutex{} + c.Set(value) + return c +} + +func (c *Counter) Set(value int) { + c.m.Lock() + c.value = value + c.m.Unlock() +} + +func (c *Counter) Add(value int) { + c.m.Lock() + c.value += value + c.m.Unlock() +} + +func (c *Counter) Sub(value int) { + c.m.Lock() + c.value -= value + c.m.Unlock() +} + +func (c *Counter) Get() int { + return c.value +} diff --git a/pkg/models/iam/iam.go b/pkg/models/iam/iam.go index 6df7206dc..f771fd1ec 100644 --- a/pkg/models/iam/iam.go +++ b/pkg/models/iam/iam.go @@ -1,3 +1,20 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ package iam import ( @@ -7,38 +24,22 @@ import ( "net/http" "strings" - v12 "k8s.io/client-go/listers/rbac/v1" - "kubesphere.io/kubesphere/pkg/informers" - "github.com/golang/glog" "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/util/slice" "kubesphere.io/kubesphere/pkg/constants" - ksErr "kubesphere.io/kubesphere/pkg/errors" + kserr "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models" ) const ClusterRoleKind = "ClusterRole" -var ( - clusterRoleBindingLister v12.ClusterRoleBindingLister - clusterRoleLister v12.ClusterRoleLister - roleBindingLister v12.RoleBindingLister - roleLister v12.RoleLister -) - -func init() { - clusterRoleBindingLister = informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() - clusterRoleLister = informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() - roleBindingLister = informers.SharedInformerFactory().Rbac().V1().RoleBindings().Lister() - roleLister = informers.SharedInformerFactory().Rbac().V1().Roles().Lister() -} - // Get user list based on workspace role -func WorkspaceRoleUsers(workspace string, roleName string) ([]User, error) { +func WorkspaceRoleUsers(workspace string, roleName string) ([]models.User, error) { + + clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() workspaceRoleBinding, err := clusterRoleBindingLister.Get(fmt.Sprintf("system:%s:%s", workspace, roleName)) @@ -67,11 +68,11 @@ func WorkspaceRoleUsers(workspace string, roleName string) ([]User, error) { return users, nil } -func GetUsers(names []string) ([]User, error) { - var users []User +func GetUsers(names []string) ([]models.User, error) { + var users []models.User if names == nil || len(names) == 0 { - return make([]User, 0), nil + return make([]models.User, 0), nil } result, err := http.Get(fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/users?name=%s", constants.AccountAPIServer, strings.Join(names, ","))) @@ -88,7 +89,7 @@ func GetUsers(names []string) ([]User, error) { } if result.StatusCode > 200 { - return nil, ksErr.Wrap(data) + return nil, kserr.Parse(data) } err = json.Unmarshal(data, &users) @@ -100,7 +101,7 @@ func GetUsers(names []string) ([]User, error) { return users, nil } -func GetUser(name string) (*User, error) { +func GetUser(name string) (*models.User, error) { result, err := http.Get(fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/users/%s", constants.AccountAPIServer, name)) @@ -116,10 +117,10 @@ func GetUser(name string) (*User, error) { } if result.StatusCode > 200 { - return nil, ksErr.Wrap(data) + return nil, kserr.Parse(data) } - var user User + var user models.User err = json.Unmarshal(data, &user) @@ -187,16 +188,8 @@ func GetUserNamespaces(username string, requiredRule v1.PolicyRule) (allNamespac return false, namespaces, nil } -func GetRole(namespace string, name string) (*v1.Role, error) { - role, err := roleLister.Roles(namespace).Get(name) - if err != nil { - return nil, err - } - - return role.DeepCopy(), nil -} - func GetWorkspaceUsers(workspace string, workspaceRole string) ([]string, error) { + clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() clusterRoleBinding, err := clusterRoleBindingLister.Get(fmt.Sprintf("system:%s:%s", workspace, workspaceRole)) if err != nil { @@ -213,108 +206,6 @@ func GetWorkspaceUsers(workspace string, workspaceRole string) ([]string, error) return users, nil } -func GetClusterRole(name string) (*v1.ClusterRole, error) { - - role, err := clusterRoleLister.Get(name) - - if err != nil { - return nil, err - } - return role.DeepCopy(), nil -} - -func GetRoles(namespace string, username string) ([]v1.Role, error) { - roleBindings, err := roleBindingLister.RoleBindings(namespace).List(labels.Everything()) - - if err != nil { - return nil, err - } - - roles := make([]v1.Role, 0) - - for _, roleBinding := range roleBindings { - - for _, subject := range roleBinding.Subjects { - if subject.Kind == v1.UserKind && subject.Name == username { - if roleBinding.RoleRef.Kind == ClusterRoleKind { - clusterRole, err := clusterRoleLister.Get(roleBinding.RoleRef.Name) - if err == nil { - var role = v1.Role{TypeMeta: (*clusterRole).TypeMeta, ObjectMeta: (*clusterRole).ObjectMeta, Rules: (*clusterRole).Rules} - role.Namespace = roleBinding.Namespace - roles = append(roles, role) - break - } else if apierrors.IsNotFound(err) { - glog.Infoln(err.Error()) - break - } else { - return nil, err - } - - } else { - if subject.Kind == v1.UserKind && subject.Name == username { - role, err := roleLister.Roles(roleBinding.Namespace).Get(roleBinding.RoleRef.Name) - if err == nil { - roles = append(roles, *role) - break - } else if apierrors.IsNotFound(err) { - glog.Infoln(err.Error()) - break - } else { - return nil, err - } - - } - - } - } - } - - } - - return roles, nil -} - -// Get cluster roles by username -func GetClusterRoles(username string) ([]v1.ClusterRole, error) { - clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything()) - - if err != nil { - return nil, err - } - - roles := make([]v1.ClusterRole, 0) - - for _, roleBinding := range clusterRoleBindings { - for _, subject := range roleBinding.Subjects { - if subject.Kind == v1.UserKind && subject.Name == username { - if roleBinding.RoleRef.Kind == ClusterRoleKind { - role, err := clusterRoleLister.Get(roleBinding.RoleRef.Name) - if err == nil { - role = role.DeepCopy() - if role.Annotations == nil { - role.Annotations = make(map[string]string, 0) - } - role.Annotations["rbac.authorization.k8s.io/clusterrolebinding"] = roleBinding.Name - if roleBinding.Annotations != nil && - roleBinding.Annotations["rbac.authorization.k8s.io/clusterrole"] == roleBinding.RoleRef.Name { - role.Annotations["rbac.authorization.k8s.io/clusterrole"] = "true" - } - roles = append(roles, *role) - break - } else if apierrors.IsNotFound(err) { - glog.Warning(err) - break - } else { - return nil, err - } - } - } - } - } - - return roles, nil -} - func RulesMatchesRequired(rules []v1.PolicyRule, required v1.PolicyRule) bool { for _, rule := range rules { if ruleMatchesRequired(rule, required) { @@ -323,139 +214,3 @@ func RulesMatchesRequired(rules []v1.PolicyRule, required v1.PolicyRule) bool { } return false } - -func ruleMatchesRequired(rule v1.PolicyRule, required v1.PolicyRule) bool { - - if len(required.NonResourceURLs) == 0 { - for _, apiGroup := range required.APIGroups { - for _, resource := range required.Resources { - resources := strings.Split(resource, "/") - resource = resources[0] - var subsource string - if len(resources) > 1 { - subsource = resources[1] - } - - if len(required.ResourceNames) == 0 { - for _, verb := range required.Verbs { - if !ruleMatchesRequest(rule, apiGroup, "", resource, subsource, "", verb) { - return false - } - } - } else { - for _, resourceName := range required.ResourceNames { - for _, verb := range required.Verbs { - if !ruleMatchesRequest(rule, apiGroup, "", resource, subsource, resourceName, verb) { - return false - } - } - } - } - } - } - } else { - for _, apiGroup := range required.APIGroups { - for _, nonResourceURL := range required.NonResourceURLs { - for _, verb := range required.Verbs { - if !ruleMatchesRequest(rule, apiGroup, nonResourceURL, "", "", "", verb) { - return false - } - } - } - } - } - return true -} - -func ruleMatchesResources(rule v1.PolicyRule, apiGroup string, resource string, subresource string, resourceName string) bool { - - if resource == "" { - return false - } - - if !hasString(rule.APIGroups, apiGroup) && !hasString(rule.APIGroups, v1.ResourceAll) { - return false - } - - if len(rule.ResourceNames) > 0 && !hasString(rule.ResourceNames, resourceName) { - return false - } - - combinedResource := resource - - if subresource != "" { - combinedResource = combinedResource + "/" + subresource - } - - for _, res := range rule.Resources { - - // match "*" - if res == v1.ResourceAll || res == combinedResource { - return true - } - - // match "*/subresource" - if len(subresource) > 0 && strings.HasPrefix(res, "*/") && subresource == strings.TrimLeft(res, "*/") { - return true - } - // match "resource/*" - if strings.HasSuffix(res, "/*") && resource == strings.TrimRight(res, "/*") { - return true - } - } - - return false -} - -func ruleMatchesRequest(rule v1.PolicyRule, apiGroup string, nonResourceURL string, resource string, subresource string, resourceName string, verb string) bool { - - if !hasString(rule.Verbs, verb) && !hasString(rule.Verbs, v1.VerbAll) { - return false - } - - if nonResourceURL == "" { - return ruleMatchesResources(rule, apiGroup, resource, subresource, resourceName) - } else { - return ruleMatchesNonResource(rule, nonResourceURL) - } -} - -func ruleMatchesNonResource(rule v1.PolicyRule, nonResourceURL string) bool { - - if nonResourceURL == "" { - return false - } - - for _, spec := range rule.NonResourceURLs { - if pathMatches(nonResourceURL, spec) { - return true - } - } - - return false -} - -func pathMatches(path, spec string) bool { - // Allow wildcard match - if spec == "*" { - return true - } - // Allow exact match - if spec == path { - return true - } - // Allow a trailing * subpath match - if strings.HasSuffix(spec, "*") && strings.HasPrefix(path, strings.TrimRight(spec, "*")) { - return true - } - return false -} - -func hasString(slice []string, value string) bool { - for _, s := range slice { - if s == value { - return true - } - } - return false -} diff --git a/pkg/models/iam/im.go b/pkg/models/iam/im.go new file mode 100644 index 000000000..bb9e37973 --- /dev/null +++ b/pkg/models/iam/im.go @@ -0,0 +1,1155 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package iam + +import ( + "errors" + "fmt" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/informers" + "log" + "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/dgrijalva/jwt-go" + "github.com/go-ldap/ldap" + "github.com/golang/glog" + "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "kubesphere.io/kubesphere/pkg/client" + "kubesphere.io/kubesphere/pkg/models" + jwtutils "kubesphere.io/kubesphere/pkg/utils/jwt" +) + +const ( + envAdminEmail = "ADMIN_EMAIL" + envAdminPWD = "ADMIN_PWD" +) + +var ( + counter Counter + AdminEmail = "admin@kubesphere.io" + AdminPWD = "passw0rd" +) + +func init() { + if env := os.Getenv(envAdminEmail); env != "" { + AdminEmail = env + } + if env := os.Getenv(envAdminPWD); env != "" { + AdminPWD = env + } +} + +func DatabaseInit() error { + var conn ldap.Client + var err error + maxRetry := 5 + for retry := 0; retry < maxRetry; retry++ { + conn, err = NewConnection() + if err == nil { + break + } else if retry == maxRetry-1 { + log.Printf("cannot connect to ldap server ,%s", err) + } else { + log.Printf("cannot connect to ldap server ,retry %d/%d\n after 2s", retry+1, maxRetry) + } + time.Sleep(10 * time.Second) + } + + if err != nil { + return err + } + + defer conn.Close() + + if err != nil { + return err + } + + // search for the given username + userSearchRequest := ldap.NewSearchRequest( + client.UserSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=inetOrgPerson))", + nil, + nil, + ) + + users, err := conn.Search(userSearchRequest) + + if err != nil { + switch err.(type) { + case *ldap.Error: + if err.(*ldap.Error).ResultCode == 32 { + err := createUserBaseDN() + if err != nil { + return fmt.Errorf("UserBaseDN %s create failed: %s\n", client.UserSearchBase, err) + } else { + log.Printf("UserBaseDN %s create success\n", client.UserSearchBase) + } + } else { + return fmt.Errorf("UserBaseDN %s not exist: %s\n", client.UserSearchBase, err) + } + default: + return fmt.Errorf("UserBaseDN %s not exist: %s\n", client.UserSearchBase, err) + } + } + + counter = NewCounter(len(users.Entries)) + + if users == nil || len(users.Entries) == 0 { + err := CreateUser(models.User{Username: constants.AdminUserName, Email: AdminEmail, Password: AdminPWD, Description: "Administrator account that was always created by default."}) + + if err != nil { + return fmt.Errorf("admin create failed: %s\n", err) + } + + log.Println("admin init success") + } + + // search user group + groupSearchRequest := ldap.NewSearchRequest( + client.GroupSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=posixGroup))", + nil, + nil, + ) + + groups, err := conn.Search(groupSearchRequest) + + if err != nil { + switch err.(type) { + case *ldap.Error: + if err.(*ldap.Error).ResultCode == 32 { + err := createGroupsBaseDN() + if err != nil { + return fmt.Errorf("GroupBaseDN %s create failed: %s\n", client.GroupSearchBase, err) + } else { + log.Printf("GroupBaseDN %s create success\n", client.GroupSearchBase) + } + } else { + return fmt.Errorf("GroupBaseDN %s not exist: %s\n", client.GroupSearchBase, err) + } + default: + return fmt.Errorf("GroupBaseDN %s not exist: %s\n", client.GroupSearchBase, err) + } + } + + if groups == nil || len(groups.Entries) == 0 { + systemGroup := models.Group{Path: constants.SystemWorkspace, Name: constants.SystemWorkspace, Creator: constants.AdminUserName, Description: "system workspace"} + + _, err = CreateGroup(systemGroup) + + if err != nil { + return fmt.Errorf("system-group create failed: %s\n", err) + } + + log.Println("system-workspace init success") + } + + return nil +} + +func createUserBaseDN() error { + + conn, err := NewConnection() + if err != nil { + return err + } + defer conn.Close() + + groupsCreateRequest := ldap.NewAddRequest(client.UserSearchBase, nil) + groupsCreateRequest.Attribute("objectClass", []string{"organizationalUnit", "top"}) + groupsCreateRequest.Attribute("ou", []string{"Users"}) + return conn.Add(groupsCreateRequest) +} + +func createGroupsBaseDN() error { + + conn, err := NewConnection() + + if err != nil { + return err + } + + defer conn.Close() + + groupsCreateRequest := ldap.NewAddRequest(client.GroupSearchBase, nil) + groupsCreateRequest.Attribute("objectClass", []string{"organizationalUnit", "top"}) + groupsCreateRequest.Attribute("ou", []string{"Groups"}) + return conn.Add(groupsCreateRequest) +} + +func NewConnection() (ldap.Client, error) { + conn, err := client.LdapClient().Get() + if err != nil { + return nil, err + } + err = conn.Bind(client.ManagerDN, client.ManagerPassword) + if err != nil { + return nil, err + } + return conn, nil +} + +// User login +func Login(username string, password string, ip string) (string, error) { + + conn, err := NewConnection() + + if err != nil { + return "", err + } + + defer conn.Close() + + userSearchRequest := ldap.NewSearchRequest( + client.UserSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&(objectClass=inetOrgPerson)(|(uid=%s)(mail=%s)))", username, username), + []string{"uid", "mail"}, + nil, + ) + + result, err := conn.Search(userSearchRequest) + + if err != nil { + return "", err + } + + if len(result.Entries) != 1 { + return "", ldap.NewError(ldap.LDAPResultInvalidCredentials, errors.New("incorrect password")) + } + + uid := result.Entries[0].GetAttributeValue("uid") + email := result.Entries[0].GetAttributeValue("mail") + dn := result.Entries[0].DN + + user := models.User{Username: uid, Email: email} + + // bind as the user to verify their password + err = conn.Bind(dn, password) + + if err != nil { + return "", err + } + + if ip != "" { + redisClient := client.RedisClient() + redisClient.RPush(fmt.Sprintf("kubesphere:users:%s:login-log", uid), fmt.Sprintf("%s,%s", time.Now().UTC().Format("2006-01-02T15:04:05Z"), ip)) + redisClient.LTrim(fmt.Sprintf("kubesphere:users:%s:login-log", uid), -10, -1) + } + + claims := jwt.MapClaims{} + + claims["exp"] = time.Now().Add(time.Hour * 24).Unix() + claims["username"] = user.Username + claims["email"] = user.Email + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + + uToken, _ := token.SignedString(jwtutils.Secret) + + return uToken, nil +} + +func UserList(limit int, offset int) (int, []models.User, error) { + + conn, err := NewConnection() + + if err != nil { + return 0, nil, err + } + + defer conn.Close() + + users := make([]models.User, 0) + + pageControl := ldap.NewControlPaging(1000) + + entries := make([]*ldap.Entry, 0) + + cursor := 0 +l1: + for { + + userSearchRequest := ldap.NewSearchRequest( + client.UserSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=inetOrgPerson))", + []string{"uid", "mail", "description"}, + []ldap.Control{pageControl}, + ) + + response, err := conn.Search(userSearchRequest) + + if err != nil { + return 0, nil, err + } + + for _, entry := range response.Entries { + cursor++ + if cursor > offset { + if len(entries) < limit { + entries = append(entries, entry) + } else { + break l1 + } + } + } + + updatedControl := ldap.FindControl(response.Controls, ldap.ControlTypePaging) + if ctrl, ok := updatedControl.(*ldap.ControlPaging); ctrl != nil && ok && len(ctrl.Cookie) != 0 { + pageControl.SetCookie(ctrl.Cookie) + continue + } + + break + } + + redisClient := client.RedisClient() + + for _, v := range entries { + + uid := v.GetAttributeValue("uid") + email := v.GetAttributeValue("mail") + description := v.GetAttributeValue("description") + user := models.User{Username: uid, Email: email, Description: description} + + avatar, err := redisClient.HMGet("kubesphere:users:avatar", uid).Result() + + if err != nil { + return 0, nil, err + } + + if len(avatar) > 0 { + if url, ok := avatar[0].(string); ok { + user.AvatarUrl = url + } + } + + lastLogin, err := redisClient.LRange(fmt.Sprintf("kubesphere:users:%s:login-log", uid), -1, -1).Result() + + if err != nil { + return 0, nil, err + } + + if len(lastLogin) > 0 { + user.LastLoginTime = strings.Split(lastLogin[0], ",")[0] + } + + user.ClusterRules = make([]models.SimpleRule, 0) + + users = append(users, user) + } + + return counter.Get(), users, nil +} + +func LoginLog(username string) ([]string, error) { + redisClient := client.RedisClient() + + data, err := redisClient.LRange(fmt.Sprintf("kubesphere:users:%s:login-log", username), -10, -1).Result() + + if err != nil { + return nil, err + } + + return data, nil +} + +func Search(keyword string, limit int, offset int) (int, []models.User, error) { + + conn, err := NewConnection() + + if err != nil { + return 0, nil, err + } + + defer conn.Close() + + users := make([]models.User, 0) + + pageControl := ldap.NewControlPaging(80) + + entries := make([]*ldap.Entry, 0) + + cursor := 0 +l1: + for { + userSearchRequest := ldap.NewSearchRequest( + client.UserSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&(objectClass=inetOrgPerson)(|(uid=*%s*)(mail=*%s*)(description=*%s*)))", keyword, keyword, keyword), + []string{"uid", "mail", "description"}, + []ldap.Control{pageControl}, + ) + + response, err := conn.Search(userSearchRequest) + + if err != nil { + return 0, nil, err + } + + for _, entry := range response.Entries { + cursor++ + if cursor > offset { + if len(entries) < limit { + entries = append(entries, entry) + } else { + break l1 + } + } + } + + updatedControl := ldap.FindControl(response.Controls, ldap.ControlTypePaging) + if ctrl, ok := updatedControl.(*ldap.ControlPaging); ctrl != nil && ok && len(ctrl.Cookie) != 0 { + pageControl.SetCookie(ctrl.Cookie) + continue + } + + break + } + + redisClient := client.RedisClient() + + for _, v := range entries { + + uid := v.GetAttributeValue("uid") + email := v.GetAttributeValue("mail") + description := v.GetAttributeValue("description") + user := models.User{Username: uid, Email: email, Description: description} + + avatar, err := redisClient.HMGet("kubesphere:users:avatar", uid).Result() + + if err != nil { + return 0, nil, err + } + + if len(avatar) > 0 { + if url, ok := avatar[0].(string); ok { + user.AvatarUrl = url + } + } + + lastLogin, err := redisClient.LRange(fmt.Sprintf("kubesphere:users:%s:login-log", uid), -1, -1).Result() + + if err != nil { + return 0, nil, err + } + + if len(lastLogin) > 0 { + user.LastLoginTime = strings.Split(lastLogin[0], ",")[0] + } + + user.ClusterRules = make([]models.SimpleRule, 0) + + users = append(users, user) + } + + return counter.Get(), users, nil +} + +func UserDetail(username string, conn ldap.Client) (*models.User, error) { + + userSearchRequest := ldap.NewSearchRequest( + client.UserSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&(objectClass=inetOrgPerson)(uid=%s))", username), + []string{"mail", "description", "preferredLanguage"}, + nil, + ) + + result, err := conn.Search(userSearchRequest) + + if err != nil { + return nil, err + } + + if len(result.Entries) != 1 { + return nil, ldap.NewError(ldap.LDAPResultNoSuchObject, fmt.Errorf("user %s does not exist", username)) + } + + email := result.Entries[0].GetAttributeValue("mail") + description := result.Entries[0].GetAttributeValue("description") + lang := result.Entries[0].GetAttributeValue("preferredLanguage") + user := models.User{Username: username, Email: email, Description: description, Lang: lang} + + groupSearchRequest := ldap.NewSearchRequest( + client.GroupSearchBase, + ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&(objectClass=posixGroup)(memberUid=%s))", username), + nil, + nil, + ) + + result, err = conn.Search(groupSearchRequest) + + if err != nil { + return nil, err + + } + + groups := make([]string, 0) + + for _, group := range result.Entries { + groupName := convertDNToPath(group.DN) + groups = append(groups, groupName) + } + + user.Groups = groups + + redisClient := client.RedisClient() + + avatar, err := redisClient.HMGet("kubesphere:users:avatar", username).Result() + + if err != nil { + return nil, err + } + + if len(avatar) > 0 { + if url, ok := avatar[0].(string); ok { + user.AvatarUrl = url + } + } + + user.Status = 0 + + lastLogin, err := redisClient.LRange(fmt.Sprintf("kubesphere:users:%s:login-log", username), -1, -1).Result() + + if err != nil { + return nil, err + } + + if len(lastLogin) > 0 { + user.LastLoginTime = strings.Split(lastLogin[0], ",")[0] + } + + return &user, nil +} + +func DeleteUser(username string) error { + + // bind root DN + conn, err := NewConnection() + if err != nil { + return err + } + + defer conn.Close() + + deleteRequest := ldap.NewDelRequest(fmt.Sprintf("uid=%s,%s", username, client.UserSearchBase), nil) + + err = conn.Del(deleteRequest) + + if err != nil { + return err + } + + err = deleteRoleBindings(username) + + if err != nil { + return err + } + + counter.Sub(1) + + return nil +} + +func deleteRoleBindings(username string) error { + roleBindingLister := informers.SharedInformerFactory().Rbac().V1().RoleBindings().Lister() + roleBindings, err := roleBindingLister.List(labels.Everything()) + + if err != nil { + return err + } + + for _, roleBinding := range roleBindings { + + length1 := len(roleBinding.Subjects) + + for index, subject := range roleBinding.Subjects { + if subject.Kind == v1.UserKind && subject.Name == username { + roleBinding.Subjects = append(roleBinding.Subjects[:index], roleBinding.Subjects[index+1:]...) + index-- + } + } + + length2 := len(roleBinding.Subjects) + + if length2 == 0 { + deletePolicy := meta_v1.DeletePropagationForeground + err = client.K8sClient().RbacV1().RoleBindings(roleBinding.Namespace).Delete(roleBinding.Name, &meta_v1.DeleteOptions{PropagationPolicy: &deletePolicy}) + + if err != nil { + glog.Errorf("delete role binding %s %s %s failed: %v", username, roleBinding.Namespace, roleBinding.Name, err) + } + } else if length2 < length1 { + _, err = client.K8sClient().RbacV1().RoleBindings(roleBinding.Namespace).Update(roleBinding) + + if err != nil { + glog.Errorf("update role binding %s %s %s failed: %v", username, roleBinding.Namespace, roleBinding.Name, err) + } + } + } + + clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() + clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything()) + + for _, clusterRoleBinding := range clusterRoleBindings { + length1 := len(clusterRoleBinding.Subjects) + + for index, subject := range clusterRoleBinding.Subjects { + if subject.Kind == v1.UserKind && subject.Name == username { + clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects[:index], clusterRoleBinding.Subjects[index+1:]...) + index-- + } + } + + length2 := len(clusterRoleBinding.Subjects) + if length2 == 0 { + if groups := regexp.MustCompile(fmt.Sprintf(`^system:(\S+):(%s)$`, strings.Join(constants.WorkSpaceRoles, "|"))).FindStringSubmatch(clusterRoleBinding.RoleRef.Name); len(groups) == 3 { + _, err = client.K8sClient().RbacV1().ClusterRoleBindings().Update(clusterRoleBinding) + } else { + deletePolicy := meta_v1.DeletePropagationForeground + err = client.K8sClient().RbacV1().ClusterRoleBindings().Delete(clusterRoleBinding.Name, &meta_v1.DeleteOptions{PropagationPolicy: &deletePolicy}) + } + if err != nil { + glog.Errorf("update cluster role binding %s failed:%s", clusterRoleBinding.Name, err) + } + } else if length2 < length1 { + _, err = client.K8sClient().RbacV1().ClusterRoleBindings().Update(clusterRoleBinding) + + if err != nil { + glog.Errorf("update cluster role binding %s failed:%s", clusterRoleBinding.Name, err) + } + } + + } + + return nil +} + +func UserCreateCheck(check string) (exist bool, err error) { + + // bind root DN + conn, err := NewConnection() + + if err != nil { + return false, err + } + + defer conn.Close() + + // search for the given username + userSearchRequest := ldap.NewSearchRequest( + client.UserSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&(objectClass=inetOrgPerson)(|(uid=%s)(mail=%s)))", check, check), + []string{"uid", "mail"}, + nil, + ) + + result, err := conn.Search(userSearchRequest) + + if err != nil { + return false, err + } + + if len(result.Entries) > 0 { + return true, nil + } else { + return false, nil + } +} + +func CreateUser(user models.User) error { + user.Username = strings.TrimSpace(user.Username) + user.Email = strings.TrimSpace(user.Email) + user.Password = strings.TrimSpace(user.Password) + user.Description = strings.TrimSpace(user.Description) + + conn, err := NewConnection() + + if err != nil { + return err + } + + defer conn.Close() + + userSearchRequest := ldap.NewSearchRequest( + client.UserSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&(objectClass=inetOrgPerson)(|(uid=%s)(mail=%s)))", user.Username, user.Email), + []string{"uid", "mail"}, + nil, + ) + + result, err := conn.Search(userSearchRequest) + + if err != nil { + return err + } + + if len(result.Entries) > 0 { + return errors.New("username or email already exists") + } + + maxUid, err := getMaxUid(conn) + + if err != nil { + return err + } + + maxUid += 1 + + userCreateRequest := ldap.NewAddRequest(fmt.Sprintf("uid=%s,%s", user.Username, client.UserSearchBase), nil) + userCreateRequest.Attribute("objectClass", []string{"inetOrgPerson", "posixAccount", "top"}) + userCreateRequest.Attribute("cn", []string{user.Username}) // RFC4519: common name(s) for which the entity is known by + userCreateRequest.Attribute("sn", []string{" "}) // RFC2256: last (family) name(s) for which the entity is known by + userCreateRequest.Attribute("gidNumber", []string{"500"}) // RFC2307: An integer uniquely identifying a group in an administrative domain + userCreateRequest.Attribute("homeDirectory", []string{"/home/" + user.Username}) // The absolute path to the home directory + userCreateRequest.Attribute("uid", []string{user.Username}) // RFC4519: user identifier + userCreateRequest.Attribute("uidNumber", []string{strconv.Itoa(maxUid)}) // RFC2307: An integer uniquely identifying a user in an administrative domain + userCreateRequest.Attribute("mail", []string{user.Email}) // RFC1274: RFC822 Mailbox + userCreateRequest.Attribute("userPassword", []string{user.Password}) // RFC4519/2307: password of user + if user.Lang != "" { + userCreateRequest.Attribute("preferredLanguage", []string{user.Lang}) // RFC4519/2307: password of user + } + if user.Description != "" { + userCreateRequest.Attribute("description", []string{user.Description}) // RFC4519: descriptive information + } + + err = conn.Add(userCreateRequest) + + if err != nil { + return err + } + + counter.Add(1) + + if user.ClusterRole != "" { + CreateClusterRoleBinding(user.Username, user.ClusterRole) + } + + return nil +} + +func getMaxUid(conn ldap.Client) (int, error) { + userSearchRequest := ldap.NewSearchRequest(client.UserSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=inetOrgPerson))", + []string{"uidNumber"}, + nil) + + result, err := conn.Search(userSearchRequest) + + if err != nil { + return 0, err + } + + var maxUid int + + if len(result.Entries) == 0 { + maxUid = 1000 + } else { + for _, usr := range result.Entries { + uid, _ := strconv.Atoi(usr.GetAttributeValue("uidNumber")) + if uid > maxUid { + maxUid = uid + } + } + } + + return maxUid, nil +} + +func getMaxGid(conn ldap.Client) (int, error) { + + groupSearchRequest := ldap.NewSearchRequest(client.GroupSearchBase, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=posixGroup))", + []string{"gidNumber"}, + nil) + + result, err := conn.Search(groupSearchRequest) + + if err != nil { + return 0, err + } + + var maxGid int + + if len(result.Entries) == 0 { + maxGid = 500 + } else { + for _, group := range result.Entries { + gid, _ := strconv.Atoi(group.GetAttributeValue("gidNumber")) + if gid > maxGid { + maxGid = gid + } + } + } + + return maxGid, nil +} + +func UpdateUser(user models.User) error { + + conn, err := NewConnection() + if err != nil { + return err + } + + defer conn.Close() + + dn := fmt.Sprintf("uid=%s,%s", user.Username, client.UserSearchBase) + userModifyRequest := ldap.NewModifyRequest(dn, nil) + if user.Email != "" { + userModifyRequest.Replace("mail", []string{user.Email}) + } + if user.Description != "" { + userModifyRequest.Replace("description", []string{user.Description}) + } + + if user.Lang != "" { + userModifyRequest.Replace("preferredLanguage", []string{user.Lang}) + } + + if user.Password != "" { + userModifyRequest.Replace("userPassword", []string{user.Password}) + } + + err = conn.Modify(userModifyRequest) + + if err != nil { + return err + } + + err = CreateClusterRoleBinding(user.Username, user.ClusterRole) + + if err != nil { + return err + } + + return nil +} +func DeleteGroup(path string) error { + + // bind root DN + conn, err := NewConnection() + if err != nil { + return err + } + defer conn.Close() + + searchBase, cn := splitPath(path) + + groupDeleteRequest := ldap.NewDelRequest(fmt.Sprintf("cn=%s,%s", cn, searchBase), nil) + err = conn.Del(groupDeleteRequest) + + if err != nil { + return err + } + + return nil +} + +func CreateGroup(group models.Group) (*models.Group, error) { + + // bind root DN + conn, err := NewConnection() + if err != nil { + return nil, err + } + defer conn.Close() + + maxGid, err := getMaxGid(conn) + + if err != nil { + return nil, err + } + + maxGid += 1 + + if group.Path == "" { + group.Path = group.Name + } + + searchBase, cn := splitPath(group.Path) + + groupCreateRequest := ldap.NewAddRequest(fmt.Sprintf("cn=%s,%s", cn, searchBase), nil) + groupCreateRequest.Attribute("objectClass", []string{"posixGroup", "top"}) + groupCreateRequest.Attribute("cn", []string{cn}) + groupCreateRequest.Attribute("gidNumber", []string{strconv.Itoa(maxGid)}) + + if group.Description != "" { + groupCreateRequest.Attribute("description", []string{group.Description}) + } + + groupCreateRequest.Attribute("memberUid", []string{group.Creator}) + + err = conn.Add(groupCreateRequest) + + if err != nil { + return nil, err + } + + group.Gid = strconv.Itoa(maxGid) + + group.CreateTime = time.Now().UTC().Format("2006-01-02T15:04:05Z") + + redisClient := client.RedisClient() + + if err := redisClient.HMSet("kubesphere:groups:create-time", map[string]interface{}{group.Name: group.CreateTime}).Err(); err != nil { + return nil, err + } + if err := redisClient.HMSet("kubesphere:groups:creator", map[string]interface{}{group.Name: group.Creator}).Err(); err != nil { + return nil, err + } + + return &group, nil +} + +func UpdateGroup(group *models.Group) (*models.Group, error) { + + // bind root DN + conn, err := NewConnection() + if err != nil { + return nil, err + } + defer conn.Close() + + old, err := GroupDetail(group.Path, conn) + + if err != nil { + return nil, err + } + + searchBase, cn := splitPath(group.Path) + + groupUpdateRequest := ldap.NewModifyRequest(fmt.Sprintf("cn=%s,%s", cn, searchBase), nil) + + if old.Description == "" { + if group.Description != "" { + groupUpdateRequest.Add("description", []string{group.Description}) + } + } else { + if group.Description != "" { + groupUpdateRequest.Replace("description", []string{group.Description}) + } else { + groupUpdateRequest.Delete("description", []string{}) + } + } + + if group.Members != nil { + groupUpdateRequest.Replace("memberUid", group.Members) + } + + err = conn.Modify(groupUpdateRequest) + + if err != nil { + return nil, err + } + + return group, nil +} + +func CountChild(path string) (int, error) { + // bind root DN + conn, err := NewConnection() + if err != nil { + return 0, err + } + defer conn.Close() + + var groupSearchRequest *ldap.SearchRequest + if path == "" { + groupSearchRequest = ldap.NewSearchRequest(client.GroupSearchBase, + ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=posixGroup))", + []string{"cn", "gidNumber", "memberUid", "description"}, + nil) + } else { + searchBase, cn := splitPath(path) + groupSearchRequest = ldap.NewSearchRequest(fmt.Sprintf("cn=%s,%s", cn, searchBase), + ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=posixGroup))", + []string{"cn", "gidNumber", "memberUid", "description"}, + nil) + } + + result, err := conn.Search(groupSearchRequest) + + if err != nil { + return 0, err + } + + return len(result.Entries), nil +} + +func ChildList(path string) ([]models.Group, error) { + + // bind root DN + conn, err := NewConnection() + + if err != nil { + return nil, err + } + + defer conn.Close() + + var groupSearchRequest *ldap.SearchRequest + if path == "" { + groupSearchRequest = ldap.NewSearchRequest(client.GroupSearchBase, + ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=posixGroup))", + []string{"cn", "gidNumber", "memberUid", "description"}, + nil) + } else { + searchBase, cn := splitPath(path) + groupSearchRequest = ldap.NewSearchRequest(fmt.Sprintf("cn=%s,%s", cn, searchBase), + ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=posixGroup))", + []string{"cn", "gidNumber", "memberUid", "description"}, + nil) + } + + result, err := conn.Search(groupSearchRequest) + + if err != nil { + return nil, err + } + + groups := make([]models.Group, 0) + + for _, v := range result.Entries { + dn := v.DN + cn := v.GetAttributeValue("cn") + gid := v.GetAttributeValue("gidNumber") + members := v.GetAttributeValues("memberUid") + description := v.GetAttributeValue("description") + + group := models.Group{Path: convertDNToPath(dn), Name: cn, Gid: gid, Members: members, Description: description} + + childSearchRequest := ldap.NewSearchRequest(dn, + ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0, 0, false, + "(&(objectClass=posixGroup))", + []string{""}, + nil) + + result, err = conn.Search(childSearchRequest) + + if err != nil { + return nil, err + } + + childGroups := make([]string, 0) + + for _, v := range result.Entries { + child := convertDNToPath(v.DN) + childGroups = append(childGroups, child) + } + + group.ChildGroups = childGroups + + redisClient := client.RedisClient() + + createTime, _ := redisClient.HMGet("kubesphere:groups:create-time", group.Name).Result() + + if len(createTime) > 0 { + if t, ok := createTime[0].(string); ok { + group.CreateTime = t + } + } + + creator, _ := redisClient.HMGet("kubesphere:groups:creator", group.Name).Result() + + if len(creator) > 0 { + if t, ok := creator[0].(string); ok { + group.Creator = t + } + } + + groups = append(groups, group) + } + + return groups, nil +} + +func GroupDetail(path string, conn ldap.Client) (*models.Group, error) { + + searchBase, cn := splitPath(path) + + groupSearchRequest := ldap.NewSearchRequest(searchBase, + ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf("(&(objectClass=posixGroup)(cn=%s))", cn), + []string{"cn", "gidNumber", "memberUid", "description"}, + nil) + + result, err := conn.Search(groupSearchRequest) + + if err != nil { + return nil, err + } + + if len(result.Entries) != 1 { + return nil, ldap.NewError(ldap.LDAPResultNoSuchObject, fmt.Errorf("group %s does not exist", path)) + } + + dn := result.Entries[0].DN + cn = result.Entries[0].GetAttributeValue("cn") + gid := result.Entries[0].GetAttributeValue("gidNumber") + members := result.Entries[0].GetAttributeValues("memberUid") + description := result.Entries[0].GetAttributeValue("description") + + group := models.Group{Path: convertDNToPath(dn), Name: cn, Gid: gid, Members: members, Description: description} + + childGroups := make([]string, 0) + + group.ChildGroups = childGroups + + redisClient := client.RedisClient() + + createTime, _ := redisClient.HMGet("kubesphere:groups:create-time", group.Name).Result() + + if len(createTime) > 0 { + if t, ok := createTime[0].(string); ok { + group.CreateTime = t + } + } + + creator, _ := redisClient.HMGet("kubesphere:groups:creator", group.Name).Result() + + if len(creator) > 0 { + if t, ok := creator[0].(string); ok { + group.Creator = t + } + } + + return &group, nil + +} diff --git a/pkg/models/iam/path.go b/pkg/models/iam/path.go new file mode 100644 index 000000000..6b580b87b --- /dev/null +++ b/pkg/models/iam/path.go @@ -0,0 +1,76 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package iam + +import ( + "fmt" + "kubesphere.io/kubesphere/pkg/client" + "regexp" + "strings" +) + +func convertDNToPath(dn string) string { + + paths := regexp.MustCompile("cn=[a-z0-9]([-a-z0-9]*[a-z0-9])?").FindAllString(dn, -1) + + if len(paths) > 1 { + for i := 0; i < len(paths); i++ { + paths[i] = strings.Replace(paths[i], "cn=", "", 1) + } + for i, j := 0, len(paths)-1; i < j; i, j = i+1, j-1 { + paths[i], paths[j] = paths[j], paths[i] + } + return strings.Join(paths, ":") + } else if len(paths) == 1 { + return strings.Replace(paths[0], "cn=", "", -1) + } else { + return "" + } +} + +func splitPath(path string) (searchBase string, cn string) { + + paths := strings.Split(path, ":") + length := len(paths) + if length > 2 { + + cn = paths[length-1] + basePath := paths[:length-1] + + for i := 0; i < len(basePath); i++ { + basePath[i] = fmt.Sprintf("cn=%s", basePath[i]) + } + + for i, j := 0, length-2; i < j; i, j = i+1, j-1 { + basePath[i], basePath[j] = basePath[j], basePath[i] + } + + searchBase = fmt.Sprintf("%s,%s", strings.Join(basePath, ","), client.GroupSearchBase) + } else if length == 2 { + searchBase = fmt.Sprintf("cn=%s,%s", paths[0], client.GroupSearchBase) + cn = paths[1] + } else { + searchBase = client.GroupSearchBase + if paths[0] == "" { + cn = "*" + } else { + cn = paths[0] + } + } + return +} diff --git a/pkg/models/iam/policy/policy.go b/pkg/models/iam/policy/policy.go new file mode 100644 index 000000000..89f98d528 --- /dev/null +++ b/pkg/models/iam/policy/policy.go @@ -0,0 +1,1333 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package policy + +import ( + "encoding/json" + "io/ioutil" + + "kubesphere.io/kubesphere/pkg/models" + + "k8s.io/api/rbac/v1" +) + +const ( + rulesConfigPath = "/etc/kubesphere/rules/rules.json" + clusterRulesConfigPath = "/etc/kubesphere/rules/clusterrules.json" +) + +func init() { + rulesConfig, err := ioutil.ReadFile(rulesConfigPath) + + if err == nil { + config := &[]models.Rule{} + json.Unmarshal(rulesConfig, config) + if len(*config) > 0 { + RoleRuleMapping = *config + } + } + + clusterRulesConfig, err := ioutil.ReadFile(clusterRulesConfigPath) + + if err == nil { + config := &[]models.Rule{} + json.Unmarshal(clusterRulesConfig, config) + if len(*config) > 0 { + ClusterRoleRuleMapping = *config + } + } +} + +var ( + WorkspaceRoleRuleMapping = []models.Rule{ + { + Name: "workspaces", + Actions: []models.Action{ + + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"*"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces"}, + }, { + Verbs: []string{"*"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/*"}, + }, + { + Verbs: []string{"*"}, + APIGroups: []string{"jenkins.kubesphere.io"}, + Resources: []string{"*"}, + }, { + Verbs: []string{"*"}, + APIGroups: []string{"devops.kubesphere.io"}, + Resources: []string{"*"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces"}, + }, + }, + }, + }, + }, + + {Name: "members", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/members"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/members"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"patch", "update"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/members"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/members"}, + }, + }, + }, + }, + }, + { + Name: "devops", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/devops"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/devops"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/devops"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/devops"}, + }, + }, + }, + }, + }, + { + Name: "projects", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/namespaces"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/namespaces"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/namespaces"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/namespaces"}, + }, + }, + }, + }, + }, + { + Name: "organizations", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"workspaces/organizations"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"workspaces/organizations"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"workspaces/organizations"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"workspaces/organizations"}, + }, + }, + }}, + }, + { + Name: "roles", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces/roles"}, + }, + }}, + }, + }, + } + + ClusterRoleRuleMapping = []models.Rule{ + {Name: "workspaces", + Actions: []models.Action{ + { + Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"users"}, + }, + { + Verbs: []string{"get"}, + APIGroups: []string{"kubesphere.io"}, + ResourceNames: []string{"workspaces"}, + Resources: []string{"monitoring/*"}, + }, + { + Verbs: []string{"list"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"quota", "status", "monitoring", "persistentvolumeclaims"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"resources"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces", "workspaces/*"}, + }, + { + Verbs: []string{"get"}, + APIGroups: []string{""}, + Resources: []string{"namespaces"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"", "apps", "extensions", "batch"}, + Resources: []string{"serviceaccounts", "limitranges", "deployments", "configmaps", "secrets", "jobs", "cronjobs", "persistentvolumeclaims", "statefulsets", "daemonsets", "ingresses", "services", "pods/*", "pods", "events", "deployments/scale"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"rolebindings", "roles"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"members"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"router"}, + }, + { + Verbs: []string{"*"}, + APIGroups: []string{"jenkins.kubesphere.io", "devops.kubesphere.io"}, + Resources: []string{"*"}, + }, + }, + }, + { + Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"*"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"workspaces", "workspaces/*"}, + }, + { + Verbs: []string{"*"}, + APIGroups: []string{""}, + Resources: []string{"namespaces"}, + }, + { + Verbs: []string{"*"}, + APIGroups: []string{"", "apps", "extensions", "batch"}, + Resources: []string{"serviceaccounts", "limitranges", "deployments", "configmaps", "secrets", "jobs", "cronjobs", "persistentvolumeclaims", "statefulsets", "daemonsets", "ingresses", "services", "pods/*", "pods", "events", "deployments/scale"}, + }, + { + Verbs: []string{"*"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"rolebindings", "roles"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"members"}, + }, + { + Verbs: []string{"*"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"router"}, + }, + { + Verbs: []string{"*"}, + APIGroups: []string{"jenkins.kubesphere.io", "devops.kubesphere.io"}, + Resources: []string{"*"}, + }, + }, + }, + }, + }, + { + Name: "monitoring", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{{ + Verbs: []string{"*"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"monitoring", "health", "monitoring/*"}, + }}, + }, + }, + }, + { + Name: "accounts", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "watch", "list"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"users", "users/*"}, + }, + { + Verbs: []string{"get"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"clusterrules"}, + ResourceNames: []string{"mapping"}, + }, + { + Verbs: []string{"get", "watch", "list"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"clusterrolebindings"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create", "get", "list"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"users"}, + }, + { + Verbs: []string{"get"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"clusterrules"}, + ResourceNames: []string{"mapping"}, + }, + { + Verbs: []string{"create", "delete", "deletecollection"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"clusterrolebindings"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list", "update", "patch"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"users"}, + }, + { + Verbs: []string{"create", "delete", "deletecollection"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"clusterrolebindings"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete", "deletecollection"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"accounts"}, + }, + }, + }, + }, + }, { + Name: "roles", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "watch", "list"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"clusterroles"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + ResourceNames: []string{"cluster-roles"}, + Resources: []string{"resources"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"clusterroles/*"}, + }, + }, + }, + + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"clusterroles"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"clusterroles"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete", "deletecollection"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"clusterroles"}, + }, + }, + }, + }, + }, { + Name: "storageclasses", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "watch", "list"}, + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"storageclasses"}, + }, { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + ResourceNames: []string{"storage-classes"}, + Resources: []string{"resources"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"storage/*"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"storageclasses"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"storageclasses"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete", "deletecollection"}, + APIGroups: []string{"storage.k8s.io"}, + Resources: []string{"storageclasses"}, + }, + }, + }, + }, + }, { + Name: "nodes", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "watch", "list"}, + APIGroups: []string{""}, + Resources: []string{"nodes", "events"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + ResourceNames: []string{"nodes"}, + Resources: []string{"resources", "monitoring", "monitoring/*"}, + }, { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + ResourceNames: []string{"pods"}, + Resources: []string{"resources"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{""}, + Resources: []string{"nodes"}, + }, + }, + }, + }, + }, { + Name: "repos", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "watch", "list"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"repos"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"repos"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"repos"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete", "deletecollection"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"repos"}, + }, + }, + }, + }, + }, { + Name: "apps", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"list"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"repos", "app_versions"}, + }, { + Verbs: []string{"get"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"app_version/*"}, + }, + { + Verbs: []string{"*"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"apps", "clusters"}, + }, + }, + }, + }, + }, { + Name: "components", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"list", "get"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"components", "components/*"}, + }, + { + Verbs: []string{"list", "get"}, + APIGroups: []string{""}, + Resources: []string{"pods"}, + }, + }, + }, + }, + }} + + RoleRuleMapping = []models.Rule{{ + Name: "projects", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{""}, + Resources: []string{"namespaces"}, + }, + { + Verbs: []string{"list"}, + APIGroups: []string{""}, + Resources: []string{"events"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{""}, + Resources: []string{"namespaces"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{""}, + Resources: []string{"namespaces"}, + }, + }, + }, + }, + }, + { + Name: "members", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"rolebindings"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"account.kubesphere.io"}, + Resources: []string{"users"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"rolebindings"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "watch", "list", "create", "update", "patch"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"rolebindings"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"rolebindings"}, + }, + }, + }, + }, + }, + { + Name: "roles", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"roles"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + ResourceNames: []string{"roles"}, + Resources: []string{"resources"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"roles"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"patch", "update"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"roles"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"roles"}, + }, + }, + }, + }, + }, + { + Name: "deployments", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"deployments", "deployments/scale"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{""}, + Resources: []string{"pods", "pods/*"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"deployments"}, + }, + }, + }, + + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"deployments", "deployments/*"}, + }, + }, + }, + + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"deployments"}, + }, + }, + }, + {Name: "scale", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"deployments/scale"}, + }, + }, + }, + }, + }, { + Name: "statefulsets", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"apps"}, + Resources: []string{"statefulsets"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{""}, + Resources: []string{"pods", "pods/*"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"apps"}, + Resources: []string{"statefulsets"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"apps"}, + Resources: []string{"statefulsets"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"apps"}, + Resources: []string{"statefulsets"}, + }, + }, + }, + {Name: "scale", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"patch"}, + APIGroups: []string{"apps"}, + Resources: []string{"statefulsets"}, + }, + }, + }, + }, + }, { + Name: "daemonsets", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"daemonsets"}, + }, + { + Verbs: []string{"get", "list"}, + APIGroups: []string{""}, + Resources: []string{"pods", "pods/*"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"daemonsets"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"daemonsets"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"apps", "extensions"}, + Resources: []string{"daemonsets"}, + }, + }, + }, + }, + }, { + Name: "pods", + Actions: []models.Action{ + {Name: "terminal", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"pod/shell"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"*"}, + Resources: []string{"pods"}, + }, + }, + }, + }, + }, + { + Name: "services", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"list", "get"}, + APIGroups: []string{""}, + Resources: []string{"services"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{""}, + Resources: []string{"services"}, + }, + }, + }, + + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{""}, + Resources: []string{"services"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{""}, + Resources: []string{"services"}, + }, + }, + }, + }, + }, + { + Name: "internet", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"router"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"router"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"router"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"kubesphere.io"}, + Resources: []string{"router"}, + }, + }, + }, + }, + }, + + { + Name: "routes", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"extensions"}, + Resources: []string{"ingresses"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"extensions"}, + Resources: []string{"ingresses"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"extensions"}, + Resources: []string{"ingresses"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"extensions"}, + Resources: []string{"ingresses"}, + }, + }, + }, + }, + }, { + Name: "volumes", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{""}, + Resources: []string{"persistentvolumeclaims"}, + }, + }, + }, + {Name: "create", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{""}, + Resources: []string{"persistentvolumeclaims"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{""}, + Resources: []string{"persistentvolumeclaims"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{""}, + Resources: []string{"persistentvolumeclaims"}, + }, + }, + }, + }, + }, { + Name: "applications", + Actions: []models.Action{ + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{"kubesphere.io"}, + ResourceNames: []string{"applications"}, + Resources: []string{"resources"}, + }, + { + Verbs: []string{"list"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"repos", "app_versions"}, + }, { + Verbs: []string{"get"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"app_version/*"}, + }, + }, + }, + {Name: "edit", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"openpitrix.io"}, + Resources: []string{"apps"}, + }, + }, + }, + {Name: "delete", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"openpitrix.io"}, + ResourceNames: []string{"delete"}, + Resources: []string{"clusters"}, + }, + }, + }, + }, + }, + { + Name: "jobs", + Actions: []models.Action{ + {Name: "view", Rules: []v1.PolicyRule{ + { + Verbs: []string{"view", "list"}, + APIGroups: []string{"batch"}, + Resources: []string{"jobs"}, + }, + }}, + {Name: "create", Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"batch"}, + Resources: []string{"jobs"}, + }, + }}, + {Name: "edit", Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"batch"}, + Resources: []string{"jobs"}, + }, + }}, + {Name: "delete", Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"batch"}, + Resources: []string{"jobs"}, + }, + }}, + }, + }, + { + Name: "cronjobs", + Actions: []models.Action{ + {Name: "view", Rules: []v1.PolicyRule{ + { + Verbs: []string{"view", "list"}, + APIGroups: []string{"batch"}, + Resources: []string{"cronjobs"}, + }, + }}, + {Name: "create", Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{"batch"}, + Resources: []string{"cronjobs"}, + }, + }}, + {Name: "edit", Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{"batch"}, + Resources: []string{"cronjobs"}, + }, + }}, + {Name: "delete", Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{"batch"}, + Resources: []string{"cronjobs"}, + }, + }}, + }, + }, + { + Name: "secrets", + Actions: []models.Action{ + {Name: "view", Rules: []v1.PolicyRule{ + { + Verbs: []string{"view", "list"}, + APIGroups: []string{""}, + Resources: []string{"secrets"}, + }, + }}, + {Name: "create", Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{""}, + Resources: []string{"secrets"}, + }, + }}, + {Name: "edit", Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{""}, + Resources: []string{"secrets"}, + }, + }}, + {Name: "delete", Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{""}, + Resources: []string{"secrets"}, + }, + }}, + }, + }, + { + Name: "configmaps", + Actions: []models.Action{ + {Name: "view", Rules: []v1.PolicyRule{ + { + Verbs: []string{"view", "list"}, + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + }, + }}, + {Name: "create", Rules: []v1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + }, + }}, + {Name: "edit", Rules: []v1.PolicyRule{ + { + Verbs: []string{"update", "patch"}, + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + }, + }}, + {Name: "delete", Rules: []v1.PolicyRule{ + { + Verbs: []string{"delete"}, + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + }, + }}, + }, + }, + } +) diff --git a/pkg/models/iam/types.go b/pkg/models/iam/types.go deleted file mode 100644 index f20248ec9..000000000 --- a/pkg/models/iam/types.go +++ /dev/null @@ -1,39 +0,0 @@ -package iam - -import ( - "k8s.io/api/rbac/v1" -) - -type Action struct { - Name string `json:"name"` - Rules []v1.PolicyRule `json:"rules"` -} - -type Rule struct { - Name string `json:"name"` - Actions []Action `json:"actions"` -} - -type SimpleRule struct { - Name string `json:"name"` - Actions []string `json:"actions"` -} - -type User struct { - Username string `json:"username"` - Groups []string `json:"groups"` - Password string `json:"password,omitempty"` - AvatarUrl string `json:"avatar_url"` - Description string `json:"description"` - Email string `json:"email"` - LastLoginTime string `json:"last_login_time"` - Status int `json:"status"` - ClusterRole string `json:"cluster_role"` - ClusterRules []SimpleRule `json:"cluster_rules,omitempty"` - Roles map[string]string `json:"roles,omitempty"` - Rules map[string][]SimpleRule `json:"rules,omitempty"` - Role string `json:"role,omitempty"` - WorkspaceRoles map[string]string `json:"workspace_roles,omitempty"` - WorkspaceRole string `json:"workspace_role,omitempty"` - WorkspaceRules map[string][]SimpleRule `json:"workspace_rules,omitempty"` -} diff --git a/pkg/models/kubeconfig/kubeconfig.go b/pkg/models/kubeconfig/kubeconfig.go index 1cf18b234..cc51055c5 100644 --- a/pkg/models/kubeconfig/kubeconfig.go +++ b/pkg/models/kubeconfig/kubeconfig.go @@ -258,7 +258,7 @@ func CreateKubeConfig(user string) error { configMap := v1.ConfigMap{TypeMeta: metaV1.TypeMeta{Kind: "Configmap", APIVersion: "v1"}, ObjectMeta: metaV1.ObjectMeta{Name: user}, Data: data} _, err = k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Create(&configMap) if err != nil && !errors.IsAlreadyExists(err) { - glog.Errorf("create user %s's kubeConfig failed, reason: %s", user, err) + glog.Errorf("create user %s's kubeConfig failed, reason: %v", user, err) return err } } @@ -271,7 +271,7 @@ func GetKubeConfig(user string) (string, error) { k8sClient := client.K8sClient() configMap, err := k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Get(user, metaV1.GetOptions{}) if err != nil { - glog.Errorf("cannot get user %s's kubeConfig, reason: %s", user, err) + glog.Errorf("cannot get user %s's kubeConfig, reason: %v", user, err) return "", err } return configMap.Data[kubectlConfigKey], nil @@ -287,7 +287,7 @@ func DelKubeConfig(user string) error { deletePolicy := metaV1.DeletePropagationBackground err = k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Delete(user, &metaV1.DeleteOptions{PropagationPolicy: &deletePolicy}) if err != nil { - glog.Errorf("delete user %s's kubeConfig failed, reason: %s", user, err) + glog.Errorf("delete user %s's kubeConfig failed, reason: %v", user, err) return err } return nil diff --git a/pkg/models/kubectl/kubectl.go b/pkg/models/kubectl/kubectl.go index 4ed3a5528..2f680b3ed 100644 --- a/pkg/models/kubectl/kubectl.go +++ b/pkg/models/kubectl/kubectl.go @@ -20,6 +20,7 @@ package kubectl import ( "fmt" + "kubesphere.io/kubesphere/pkg/models" "math/rand" "github.com/golang/glog" @@ -38,18 +39,12 @@ const ( namespace = constants.KubeSphereControlNamespace ) -type PodInfo struct { - Namespace string `json:"namespace"` - Pod string `json:"pod"` - Container string `json:"container"` -} - -func GetKubectlPod(username string) (PodInfo, error) { +func GetKubectlPod(username string) (models.PodInfo, error) { k8sClient := client.K8sClient() deploy, err := k8sClient.AppsV1beta2().Deployments(namespace).Get(username, metav1.GetOptions{}) if err != nil { glog.Errorln(err) - return PodInfo{}, err + return models.PodInfo{}, err } selectors := deploy.Spec.Selector.MatchLabels @@ -57,16 +52,16 @@ func GetKubectlPod(username string) (PodInfo, error) { podList, err := k8sClient.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: labelSelector}) if err != nil { glog.Errorln(err) - return PodInfo{}, err + return models.PodInfo{}, err } pod, err := selectCorrectPod(namespace, podList.Items) if err != nil { glog.Errorln(err) - return PodInfo{}, err + return models.PodInfo{}, err } - info := PodInfo{Namespace: pod.Namespace, Pod: pod.Name, Container: pod.Status.ContainerStatuses[0].Name} + info := models.PodInfo{Namespace: pod.Namespace, Pod: pod.Name, Container: pod.Status.ContainerStatuses[0].Name} return info, nil diff --git a/pkg/models/metrics/metrics.go b/pkg/models/metrics/metrics.go index 5c1598bc5..f89d79f87 100644 --- a/pkg/models/metrics/metrics.go +++ b/pkg/models/metrics/metrics.go @@ -20,14 +20,13 @@ package metrics import ( "fmt" + "kubesphere.io/kubesphere/pkg/models" "net/url" "regexp" "strings" "sync" "time" - v12 "k8s.io/client-go/listers/core/v1" - "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/models/components" @@ -50,7 +49,6 @@ import ( var ( jsonIter = jsoniter.ConfigCompatibleWithStandardLibrary nodeStatusDelLabel = []string{"endpoint", "instance", "job", "namespace", "pod", "service"} - nodeLister v12.NodeLister ) const ( @@ -126,10 +124,6 @@ type OneComponentStatus struct { Error string `json:"error,omitempty"` } -func init() { - nodeLister = informers.SharedInformerFactory().Core().V1().Nodes().Lister() -} - func getAllWorkspaceNames(formatedMetric *FormatedMetric) map[string]int { var wsMap = make(map[string]int) @@ -197,12 +191,12 @@ func unifyMetricHistoryTimeRange(fmtMetrics *FormatedMetric) { var timestampMap = make(map[float64]bool) if fmtMetrics.Data.ResultType == ResultTypeMatrix { - for i, _ := range fmtMetrics.Data.Result { + for i := range fmtMetrics.Data.Result { values, exist := fmtMetrics.Data.Result[i][ResultItemValues] if exist { valueArray, sure := values.([]interface{}) if sure { - for j, _ := range valueArray { + for j := range valueArray { timeAndValue := valueArray[j].([]interface{}) timestampMap[float64(timeAndValue[0].(uint64))] = true } @@ -213,7 +207,7 @@ func unifyMetricHistoryTimeRange(fmtMetrics *FormatedMetric) { timestampArray := make([]float64, len(timestampMap)) i := 0 - for timestamp, _ := range timestampMap { + for timestamp := range timestampMap { timestampArray[i] = timestamp i++ } @@ -230,7 +224,7 @@ func unifyMetricHistoryTimeRange(fmtMetrics *FormatedMetric) { formatValueArray := make([][]interface{}, len(timestampArray)) j := 0 - for k, _ := range timestampArray { + for k := range timestampArray { valueItem, sure := valueArray[j].([]interface{}) if sure && float64(valueItem[0].(uint64)) == timestampArray[k] { formatValueArray[k] = []interface{}{int64(timestampArray[k]), valueItem[1]} @@ -294,7 +288,7 @@ func GetMetric(queryType, params, metricName string) *FormatedMetric { } func GetNodeAddressInfo() *map[string][]v1.NodeAddress { - + nodeLister := informers.SharedInformerFactory().Core().V1().Nodes().Lister() nodes, err := nodeLister.List(labels.Everything()) if err != nil { @@ -938,7 +932,7 @@ func MonitorComponentStatus(monitoringRequest *client.MonitoringRequestParams) * nsStatus, exist := Components[ns] if exist { for _, nsStatusItem := range nsStatus.(map[string]interface{}) { - component := nsStatusItem.(components.Component) + component := nsStatusItem.(models.Component) namespaceComponentTotalMap[ns] += 1 if component.HealthyBackends != 0 && component.HealthyBackends == component.TotalBackends { namespaceComponentHealthyMap[ns] += 1 diff --git a/pkg/models/nodes/nodes.go b/pkg/models/nodes/nodes.go index 0be4ef030..d85715452 100644 --- a/pkg/models/nodes/nodes.go +++ b/pkg/models/nodes/nodes.go @@ -33,7 +33,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "kubesphere.io/kubesphere/pkg/client" - "kubesphere.io/kubesphere/pkg/errors" ) func DrainNode(nodename string) (err error) { @@ -45,7 +44,7 @@ func DrainNode(nodename string) (err error) { } if node.Spec.Unschedulable { - return errors.New(errors.Conflict, fmt.Sprintf("node %s have been drained", nodename)) + return fmt.Errorf("node %s have been drained", nodename) } data := []byte(" {\"spec\":{\"unschedulable\":true}}") diff --git a/pkg/models/quotas/quotas.go b/pkg/models/quotas/quotas.go index 2fe92ea0b..70a55ac3e 100644 --- a/pkg/models/quotas/quotas.go +++ b/pkg/models/quotas/quotas.go @@ -23,10 +23,10 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" - v12 "k8s.io/client-go/listers/core/v1" - "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models" "kubesphere.io/kubesphere/pkg/models/resources" + "kubesphere.io/kubesphere/pkg/params" ) const ( @@ -51,27 +51,17 @@ var ( statefulsetsKey: resources.StatefulSets, persistentvolumeclaimsKey: resources.PersistentVolumeClaims, podsKey: resources.Pods, namespaceKey: resources.Namespaces, storageClassesKey: resources.StorageClasses, clusterRolesKey: resources.ClusterRoles, jobsKey: resources.Jobs, cronJobsKey: resources.CronJobs} - resouceQuotaLister v12.ResourceQuotaLister ) -type ResourceQuota struct { - Namespace string `json:"namespace"` - Data v1.ResourceQuotaStatus `json:"data"` -} - func getUsage(namespace, resource string) (int, error) { - list, err := resources.ListNamespaceResource(namespace, resource, "", "", false, -1, 0) + list, err := resources.ListNamespaceResource(namespace, resource, ¶ms.Conditions{}, "", false, -1, 0) if err != nil { return 0, err } return list.TotalCount, nil } -func init() { - resouceQuotaLister = informers.SharedInformerFactory().Core().V1().ResourceQuotas().Lister() -} - -func GetClusterQuotas() (*ResourceQuota, error) { +func GetClusterQuotas() (*models.ResourceQuota, error) { quota := v1.ResourceQuotaStatus{Hard: make(v1.ResourceList), Used: make(v1.ResourceList)} @@ -85,11 +75,11 @@ func GetClusterQuotas() (*ResourceQuota, error) { quota.Used[v1.ResourceName(k)] = quantity } - return &ResourceQuota{Namespace: "\"\"", Data: quota}, nil + return &models.ResourceQuota{Namespace: "\"\"", Data: quota}, nil } -func GetNamespaceQuotas(namespace string) (*ResourceQuota, error) { +func GetNamespaceQuotas(namespace string) (*models.ResourceQuota, error) { quota, err := getNamespaceResourceQuota(namespace) if err != nil { glog.Error(err) @@ -115,7 +105,7 @@ func GetNamespaceQuotas(namespace string) (*ResourceQuota, error) { } } - return &ResourceQuota{Namespace: namespace, Data: *quota}, nil + return &models.ResourceQuota{Namespace: namespace, Data: *quota}, nil } func updateNamespaceQuota(tmpResourceList, resourceList v1.ResourceList) { @@ -135,7 +125,8 @@ func updateNamespaceQuota(tmpResourceList, resourceList v1.ResourceList) { } func getNamespaceResourceQuota(namespace string) (*v1.ResourceQuotaStatus, error) { - quotaList, err := resouceQuotaLister.ResourceQuotas(namespace).List(labels.Everything()) + resourceQuotaLister := informers.SharedInformerFactory().Core().V1().ResourceQuotas().Lister() + quotaList, err := resourceQuotaLister.ResourceQuotas(namespace).List(labels.Everything()) if err != nil || len(quotaList) == 0 { return nil, err } diff --git a/pkg/models/registries/registries.go b/pkg/models/registries/registries.go index 4bd9b3289..4c31b7853 100644 --- a/pkg/models/registries/registries.go +++ b/pkg/models/registries/registries.go @@ -20,12 +20,11 @@ package registries import ( "context" "encoding/base64" + "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/client" "github.com/golang/glog" - - "kubesphere.io/kubesphere/pkg/errors" ) type AuthInfo struct { @@ -62,6 +61,6 @@ func RegistryVerify(authInfo AuthInfo) error { if resp.Status == loginSuccess { return nil } else { - return errors.New(errors.VerifyFailed, resp.Status) + return fmt.Errorf(resp.Status) } } diff --git a/pkg/models/resources/clusterroles.go b/pkg/models/resources/clusterroles.go index 5ada3d82f..531d43a42 100644 --- a/pkg/models/resources/clusterroles.go +++ b/pkg/models/resources/clusterroles.go @@ -18,19 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" rbac "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers/rbac/v1" ) type clusterRoleSearcher struct { - clusterRoleLister v1.ClusterRoleLister } -// exactly match +// exactly Match func (*clusterRoleSearcher) match(match map[string]string, item *rbac.ClusterRole) bool { for k, v := range match { switch k { @@ -45,7 +45,7 @@ func (*clusterRoleSearcher) match(match map[string]string, item *rbac.ClusterRol return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*clusterRoleSearcher) fuzzy(fuzzy map[string]string, item *rbac.ClusterRole) bool { for k, v := range fuzzy { switch k { @@ -86,8 +86,8 @@ func (*clusterRoleSearcher) compare(a, b *rbac.ClusterRole, orderBy string) bool } } -func (s *clusterRoleSearcher) search(conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - clusterRoles, err := s.clusterRoleLister.List(labels.Everything()) +func (s *clusterRoleSearcher) search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + clusterRoles, err := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister().List(labels.Everything()) if err != nil { return nil, err @@ -95,11 +95,11 @@ func (s *clusterRoleSearcher) search(conditions *conditions, orderBy string, rev result := make([]*rbac.ClusterRole, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = clusterRoles } else { for _, item := range clusterRoles { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/configmaps.go b/pkg/models/resources/configmaps.go index a0f727655..91835ff2d 100644 --- a/pkg/models/resources/configmaps.go +++ b/pkg/models/resources/configmaps.go @@ -18,20 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - lister "k8s.io/client-go/listers/core/v1" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" ) type configMapSearcher struct { - configMapLister lister.ConfigMapLister } -// exactly match +// exactly Match func (*configMapSearcher) match(match map[string]string, item *v1.ConfigMap) bool { for k, v := range match { switch k { @@ -46,7 +45,7 @@ func (*configMapSearcher) match(match map[string]string, item *v1.ConfigMap) boo return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*configMapSearcher) fuzzy(fuzzy map[string]string, item *v1.ConfigMap) bool { for k, v := range fuzzy { switch k { @@ -91,8 +90,8 @@ func (*configMapSearcher) compare(a, b *v1.ConfigMap, orderBy string) bool { } } -func (s *configMapSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - configMaps, err := s.configMapLister.ConfigMaps(namespace).List(labels.Everything()) +func (s *configMapSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + configMaps, err := informers.SharedInformerFactory().Core().V1().ConfigMaps().Lister().ConfigMaps(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -100,11 +99,11 @@ func (s *configMapSearcher) search(namespace string, conditions *conditions, ord result := make([]*v1.ConfigMap, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = configMaps } else { for _, item := range configMaps { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/cronjobs.go b/pkg/models/resources/cronjobs.go index 31ac08546..1cf5b6e90 100644 --- a/pkg/models/resources/cronjobs.go +++ b/pkg/models/resources/cronjobs.go @@ -18,28 +18,28 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - lister "k8s.io/client-go/listers/batch/v2alpha1" + "k8s.io/api/batch/v1beta1" - "k8s.io/api/batch/v2alpha1" "k8s.io/apimachinery/pkg/labels" ) type cronJobSearcher struct { - cronJobLister lister.CronJobLister } -func cronJobStatus(item *v2alpha1.CronJob) string { +func cronJobStatus(item *v1beta1.CronJob) string { if item.Spec.Suspend != nil && *item.Spec.Suspend { return paused } return running } -// Exactly match -func (*cronJobSearcher) match(match map[string]string, item *v2alpha1.CronJob) bool { +// Exactly Match +func (*cronJobSearcher) match(match map[string]string, item *v1beta1.CronJob) bool { for k, v := range match { switch k { case status: @@ -53,7 +53,7 @@ func (*cronJobSearcher) match(match map[string]string, item *v2alpha1.CronJob) b return true } -func (*cronJobSearcher) fuzzy(fuzzy map[string]string, item *v2alpha1.CronJob) bool { +func (*cronJobSearcher) fuzzy(fuzzy map[string]string, item *v1beta1.CronJob) bool { for k, v := range fuzzy { switch k { @@ -88,7 +88,7 @@ func (*cronJobSearcher) fuzzy(fuzzy map[string]string, item *v2alpha1.CronJob) b return true } -func (*cronJobSearcher) compare(a, b *v2alpha1.CronJob, orderBy string) bool { +func (*cronJobSearcher) compare(a, b *v1beta1.CronJob, orderBy string) bool { switch orderBy { case createTime: return a.CreationTimestamp.Time.After(b.CreationTimestamp.Time) @@ -99,20 +99,20 @@ func (*cronJobSearcher) compare(a, b *v2alpha1.CronJob, orderBy string) bool { } } -func (s *cronJobSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - cronJobs, err := s.cronJobLister.CronJobs(namespace).List(labels.Everything()) +func (s *cronJobSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + cronJobs, err := informers.SharedInformerFactory().Batch().V1beta1().CronJobs().Lister().CronJobs(namespace).List(labels.Everything()) if err != nil { return nil, err } - result := make([]*v2alpha1.CronJob, 0) + result := make([]*v1beta1.CronJob, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = cronJobs } else { for _, item := range cronJobs { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/daemonsets.go b/pkg/models/resources/daemonsets.go index ed7621a41..c2575ba4e 100644 --- a/pkg/models/resources/daemonsets.go +++ b/pkg/models/resources/daemonsets.go @@ -18,17 +18,16 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - lister "k8s.io/client-go/listers/apps/v1" - "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/labels" ) type daemonSetSearcher struct { - daemonSetLister lister.DaemonSetLister } func daemonSetStatus(item *v1.DaemonSet) string { @@ -41,7 +40,7 @@ func daemonSetStatus(item *v1.DaemonSet) string { } } -// Exactly match +// Exactly Match func (*daemonSetSearcher) match(match map[string]string, item *v1.DaemonSet) bool { for k, v := range match { switch k { @@ -102,8 +101,8 @@ func (*daemonSetSearcher) compare(a, b *v1.DaemonSet, orderBy string) bool { } } -func (s *daemonSetSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - daemonSets, err := s.daemonSetLister.DaemonSets(namespace).List(labels.Everything()) +func (s *daemonSetSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + daemonSets, err := informers.SharedInformerFactory().Apps().V1().DaemonSets().Lister().DaemonSets(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -111,11 +110,11 @@ func (s *daemonSetSearcher) search(namespace string, conditions *conditions, ord result := make([]*v1.DaemonSet, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = daemonSets } else { for _, item := range daemonSets { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/deployments.go b/pkg/models/resources/deployments.go index 1e627dee3..2e67d3c42 100644 --- a/pkg/models/resources/deployments.go +++ b/pkg/models/resources/deployments.go @@ -18,18 +18,17 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - lister "k8s.io/client-go/listers/apps/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/api/apps/v1" ) type deploymentSearcher struct { - deploymentLister lister.DeploymentLister } func deploymentStatus(item *v1.Deployment) string { @@ -45,7 +44,7 @@ func deploymentStatus(item *v1.Deployment) string { return stopped } -// Exactly match +// Exactly Match func (*deploymentSearcher) match(match map[string]string, item *v1.Deployment) bool { for k, v := range match { switch k { @@ -106,8 +105,8 @@ func (*deploymentSearcher) compare(a, b *v1.Deployment, orderBy string) bool { } } -func (s *deploymentSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - deployments, err := s.deploymentLister.Deployments(namespace).List(labels.Everything()) +func (s *deploymentSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + deployments, err := informers.SharedInformerFactory().Apps().V1().Deployments().Lister().Deployments(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -115,11 +114,11 @@ func (s *deploymentSearcher) search(namespace string, conditions *conditions, or result := make([]*v1.Deployment, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = deployments } else { for _, item := range deployments { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/ingresses.go b/pkg/models/resources/ingresses.go index d1ae7b557..83b5dd051 100644 --- a/pkg/models/resources/ingresses.go +++ b/pkg/models/resources/ingresses.go @@ -18,21 +18,20 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - lister "k8s.io/client-go/listers/extensions/v1beta1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/labels" ) type ingressSearcher struct { - ingressLister lister.IngressLister } -// exactly match +// exactly Match func (*ingressSearcher) match(match map[string]string, item *extensions.Ingress) bool { for k, v := range match { switch k { @@ -47,7 +46,7 @@ func (*ingressSearcher) match(match map[string]string, item *extensions.Ingress) return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*ingressSearcher) fuzzy(fuzzy map[string]string, item *extensions.Ingress) bool { for k, v := range fuzzy { switch k { @@ -92,8 +91,8 @@ func (*ingressSearcher) compare(a, b *extensions.Ingress, orderBy string) bool { } } -func (s *ingressSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - ingresses, err := s.ingressLister.Ingresses(namespace).List(labels.Everything()) +func (s *ingressSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + ingresses, err := informers.SharedInformerFactory().Extensions().V1beta1().Ingresses().Lister().Ingresses(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -101,11 +100,11 @@ func (s *ingressSearcher) search(namespace string, conditions *conditions, order result := make([]*extensions.Ingress, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = ingresses } else { for _, item := range ingresses { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/jobs.go b/pkg/models/resources/jobs.go index faaaa1512..25862bdc0 100644 --- a/pkg/models/resources/jobs.go +++ b/pkg/models/resources/jobs.go @@ -18,37 +18,36 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" "time" - lister "k8s.io/client-go/listers/batch/v1" - - batchV1 "k8s.io/api/batch/v1" - coreV1 "k8s.io/api/core/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" ) type jobSearcher struct { - jobLister lister.JobLister } -func jobStatus(item *batchV1.Job) string { +func jobStatus(item *batchv1.Job) string { status := "" for _, condition := range item.Status.Conditions { - if condition.Type == batchV1.JobFailed && condition.Status == coreV1.ConditionTrue { + if condition.Type == batchv1.JobFailed && condition.Status == corev1.ConditionTrue { status = failed } - if condition.Type == batchV1.JobComplete && condition.Status == coreV1.ConditionTrue { + if condition.Type == batchv1.JobComplete && condition.Status == corev1.ConditionTrue { status = complete } } return status } -// Exactly match -func (*jobSearcher) match(match map[string]string, item *batchV1.Job) bool { +// Exactly Match +func (*jobSearcher) match(match map[string]string, item *batchv1.Job) bool { for k, v := range match { switch k { case status: @@ -62,7 +61,7 @@ func (*jobSearcher) match(match map[string]string, item *batchV1.Job) bool { return true } -func (*jobSearcher) fuzzy(fuzzy map[string]string, item *batchV1.Job) bool { +func (*jobSearcher) fuzzy(fuzzy map[string]string, item *batchv1.Job) bool { for k, v := range fuzzy { switch k { @@ -97,7 +96,7 @@ func (*jobSearcher) fuzzy(fuzzy map[string]string, item *batchV1.Job) bool { return true } -func jobUpdateTime(item *batchV1.Job) time.Time { +func jobUpdateTime(item *batchv1.Job) time.Time { updateTime := item.CreationTimestamp.Time for _, condition := range item.Status.Conditions { if updateTime.Before(condition.LastProbeTime.Time) { @@ -110,7 +109,7 @@ func jobUpdateTime(item *batchV1.Job) time.Time { return updateTime } -func (*jobSearcher) compare(a, b *batchV1.Job, orderBy string) bool { +func (*jobSearcher) compare(a, b *batchv1.Job, orderBy string) bool { switch orderBy { case updateTime: return jobUpdateTime(a).After(jobUpdateTime(b)) @@ -121,20 +120,20 @@ func (*jobSearcher) compare(a, b *batchV1.Job, orderBy string) bool { } } -func (s *jobSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - jobs, err := s.jobLister.Jobs(namespace).List(labels.Everything()) +func (s *jobSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + jobs, err := informers.SharedInformerFactory().Batch().V1().Jobs().Lister().Jobs(namespace).List(labels.Everything()) if err != nil { return nil, err } - result := make([]*batchV1.Job, 0) + result := make([]*batchv1.Job, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = jobs } else { for _, item := range jobs { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/namespaces.go b/pkg/models/resources/namespaces.go index c96b8eec7..6003ad3e0 100644 --- a/pkg/models/resources/namespaces.go +++ b/pkg/models/resources/namespaces.go @@ -18,19 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - lister "k8s.io/client-go/listers/core/v1" ) type namespaceSearcher struct { - namespaceLister lister.NamespaceLister } -// exactly match +// exactly Match func (*namespaceSearcher) match(match map[string]string, item *v1.Namespace) bool { for k, v := range match { switch k { @@ -45,7 +45,7 @@ func (*namespaceSearcher) match(match map[string]string, item *v1.Namespace) boo return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*namespaceSearcher) fuzzy(fuzzy map[string]string, item *v1.Namespace) bool { for k, v := range fuzzy { switch k { @@ -90,8 +90,8 @@ func (*namespaceSearcher) compare(a, b *v1.Namespace, orderBy string) bool { } } -func (s *namespaceSearcher) search(conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - namespaces, err := s.namespaceLister.List(labels.Everything()) +func (s *namespaceSearcher) search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + namespaces, err := informers.SharedInformerFactory().Core().V1().Namespaces().Lister().List(labels.Everything()) if err != nil { return nil, err @@ -99,11 +99,11 @@ func (s *namespaceSearcher) search(conditions *conditions, orderBy string, rever result := make([]*v1.Namespace, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = namespaces } else { for _, item := range namespaces { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/nodes.go b/pkg/models/resources/nodes.go index ebd7d9468..a15ce0561 100644 --- a/pkg/models/resources/nodes.go +++ b/pkg/models/resources/nodes.go @@ -18,19 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - lister "k8s.io/client-go/listers/core/v1" ) type nodeSearcher struct { - nodeLister lister.NodeLister } -// exactly match +// exactly Match func (*nodeSearcher) match(match map[string]string, item *v1.Node) bool { for k, v := range match { switch k { @@ -45,7 +45,7 @@ func (*nodeSearcher) match(match map[string]string, item *v1.Node) bool { return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*nodeSearcher) fuzzy(fuzzy map[string]string, item *v1.Node) bool { for k, v := range fuzzy { switch k { @@ -90,8 +90,8 @@ func (*nodeSearcher) compare(a, b *v1.Node, orderBy string) bool { } } -func (s *nodeSearcher) search(conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - nodes, err := s.nodeLister.List(labels.Everything()) +func (s *nodeSearcher) search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + nodes, err := informers.SharedInformerFactory().Core().V1().Nodes().Lister().List(labels.Everything()) if err != nil { return nil, err @@ -99,11 +99,11 @@ func (s *nodeSearcher) search(conditions *conditions, orderBy string, reverse bo result := make([]*v1.Node, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = nodes } else { for _, item := range nodes { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/persistentvolumeclaims.go b/pkg/models/resources/persistentvolumeclaims.go index 7160bdf11..ca8735e9f 100644 --- a/pkg/models/resources/persistentvolumeclaims.go +++ b/pkg/models/resources/persistentvolumeclaims.go @@ -18,20 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - lister "k8s.io/client-go/listers/core/v1" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" ) type persistentVolumeClaimSearcher struct { - persistentVolumeClaimLister lister.PersistentVolumeClaimLister } -// exactly match +// exactly Match func (*persistentVolumeClaimSearcher) match(match map[string]string, item *v1.PersistentVolumeClaim) bool { for k, v := range match { switch k { @@ -46,7 +45,7 @@ func (*persistentVolumeClaimSearcher) match(match map[string]string, item *v1.Pe return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*persistentVolumeClaimSearcher) fuzzy(fuzzy map[string]string, item *v1.PersistentVolumeClaim) bool { for k, v := range fuzzy { switch k { @@ -91,8 +90,8 @@ func (*persistentVolumeClaimSearcher) compare(a, b *v1.PersistentVolumeClaim, or } } -func (s *persistentVolumeClaimSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - persistentVolumeClaims, err := s.persistentVolumeClaimLister.PersistentVolumeClaims(namespace).List(labels.Everything()) +func (s *persistentVolumeClaimSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + persistentVolumeClaims, err := informers.SharedInformerFactory().Core().V1().PersistentVolumeClaims().Lister().PersistentVolumeClaims(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -100,11 +99,11 @@ func (s *persistentVolumeClaimSearcher) search(namespace string, conditions *con result := make([]*v1.PersistentVolumeClaim, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = persistentVolumeClaims } else { for _, item := range persistentVolumeClaims { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/pods.go b/pkg/models/resources/pods.go index 4190aebf9..857bd6ab0 100644 --- a/pkg/models/resources/pods.go +++ b/pkg/models/resources/pods.go @@ -18,20 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - v12 "k8s.io/client-go/listers/core/v1" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" ) type podSearcher struct { - podLister v12.PodLister } -// exactly match +// exactly Match func (*podSearcher) match(match map[string]string, item *v1.Pod) bool { for k, v := range match { switch k { @@ -46,7 +45,7 @@ func (*podSearcher) match(match map[string]string, item *v1.Pod) bool { return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*podSearcher) fuzzy(fuzzy map[string]string, item *v1.Pod) bool { for k, v := range fuzzy { switch k { @@ -91,9 +90,9 @@ func (*podSearcher) compare(a, b *v1.Pod, orderBy string) bool { } } -func (s *podSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { +func (s *podSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { - pods, err := s.podLister.Pods(namespace).List(labels.Everything()) + pods, err := informers.SharedInformerFactory().Core().V1().Pods().Lister().Pods(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -101,11 +100,11 @@ func (s *podSearcher) search(namespace string, conditions *conditions, orderBy s result := make([]*v1.Pod, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = pods } else { for _, item := range pods { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/resources.go b/pkg/models/resources/resources.go index 7cb1a86b2..9e4311d7f 100644 --- a/pkg/models/resources/resources.go +++ b/pkg/models/resources/resources.go @@ -18,74 +18,35 @@ package resources import ( - "regexp" + "fmt" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/params" "strings" - - "kubesphere.io/kubesphere/pkg/informers" - - "kubesphere.io/kubesphere/pkg/errors" ) func init() { - namespacedResources[ConfigMaps] = &configMapSearcher{ - configMapLister: informers.SharedInformerFactory().Core().V1().ConfigMaps().Lister(), - } - namespacedResources[CronJobs] = &cronJobSearcher{ - cronJobLister: informers.SharedInformerFactory().Batch().V2alpha1().CronJobs().Lister(), - } - namespacedResources[DaemonSets] = &daemonSetSearcher{ - daemonSetLister: informers.SharedInformerFactory().Apps().V1().DaemonSets().Lister(), - } - namespacedResources[Deployments] = &deploymentSearcher{ - deploymentLister: informers.SharedInformerFactory().Apps().V1().Deployments().Lister(), - } - namespacedResources[Ingresses] = &ingressSearcher{ - ingressLister: informers.SharedInformerFactory().Extensions().V1beta1().Ingresses().Lister(), - } - namespacedResources[Jobs] = &jobSearcher{ - jobLister: informers.SharedInformerFactory().Batch().V1().Jobs().Lister(), - } - namespacedResources[PersistentVolumeClaims] = &persistentVolumeClaimSearcher{ - persistentVolumeClaimLister: informers.SharedInformerFactory().Core().V1().PersistentVolumeClaims().Lister(), - } - namespacedResources[Secrets] = &secretSearcher{ - secretLister: informers.SharedInformerFactory().Core().V1().Secrets().Lister(), - } - namespacedResources[Services] = &serviceSearcher{ - serviceLister: informers.SharedInformerFactory().Core().V1().Services().Lister(), - } - namespacedResources[StatefulSets] = &statefulSetSearcher{ - statefulSetLister: informers.SharedInformerFactory().Apps().V1().StatefulSets().Lister(), - } - namespacedResources[Pods] = &podSearcher{ - podLister: informers.SharedInformerFactory().Core().V1().Pods().Lister(), - } - namespacedResources[Roles] = &roleSearcher{ - roleLister: informers.SharedInformerFactory().Rbac().V1().Roles().Lister(), - } + namespacedResources[ConfigMaps] = &configMapSearcher{} + namespacedResources[CronJobs] = &cronJobSearcher{} + namespacedResources[DaemonSets] = &daemonSetSearcher{} + namespacedResources[Deployments] = &deploymentSearcher{} + namespacedResources[Ingresses] = &ingressSearcher{} + namespacedResources[Jobs] = &jobSearcher{} + namespacedResources[PersistentVolumeClaims] = &persistentVolumeClaimSearcher{} + namespacedResources[Secrets] = &secretSearcher{} + namespacedResources[Services] = &serviceSearcher{} + namespacedResources[StatefulSets] = &statefulSetSearcher{} + namespacedResources[Pods] = &podSearcher{} + namespacedResources[Roles] = &roleSearcher{} - clusterResources[Nodes] = &nodeSearcher{ - nodeLister: informers.SharedInformerFactory().Core().V1().Nodes().Lister(), - } - clusterResources[Namespaces] = &namespaceSearcher{ - namespaceLister: informers.SharedInformerFactory().Core().V1().Namespaces().Lister(), - } - clusterResources[ClusterRoles] = &clusterRoleSearcher{ - clusterRoleLister: informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister(), - } - clusterResources[StorageClasses] = &storageClassesSearcher{ - storageClassesLister: informers.SharedInformerFactory().Storage().V1().StorageClasses().Lister(), - } + clusterResources[Nodes] = &nodeSearcher{} + clusterResources[Namespaces] = &namespaceSearcher{} + clusterResources[ClusterRoles] = &clusterRoleSearcher{} + clusterResources[StorageClasses] = &storageClassesSearcher{} } var namespacedResources = make(map[string]namespacedSearcherInterface) var clusterResources = make(map[string]clusterSearcherInterface) -type conditions struct { - match map[string]string - fuzzy map[string]string -} - const ( name = "name" label = "label" @@ -123,33 +84,26 @@ const ( ) type namespacedSearcherInterface interface { - search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) + search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) } type clusterSearcherInterface interface { - search(conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) + search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) } -func ListNamespaceResource(namespace, resource, conditionStr, orderBy string, reverse bool, limit, offset int) (*ResourceList, error) { +func ListNamespaceResource(namespace, resource string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { items := make([]interface{}, 0) total := 0 var err error - - conditions, err := parseToConditions(conditionStr) - - if err != nil { - return nil, err - } - var result []interface{} if searcher, ok := namespacedResources[resource]; ok { result, err = searcher.search(namespace, conditions, orderBy, reverse) } else { - return nil, errors.New(errors.NotImplement, "not support") + return nil, fmt.Errorf("not support") } if err != nil { - return nil, errors.New(errors.Internal, err.Error()) + return nil, err } total = len(result) @@ -160,16 +114,14 @@ func ListNamespaceResource(namespace, resource, conditionStr, orderBy string, re } } - return &ResourceList{TotalCount: total, Items: items}, nil + return &models.PageableResponse{TotalCount: total, Items: items}, nil } -func ListClusterResource(resource, conditionStr, orderBy string, reverse bool, limit, offset int) (*ResourceList, error) { +func ListClusterResource(resource string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { items := make([]interface{}, 0) total := 0 var err error - conditions, err := parseToConditions(conditionStr) - if err != nil { return nil, err } @@ -179,11 +131,11 @@ func ListClusterResource(resource, conditionStr, orderBy string, reverse bool, l if searcher, ok := clusterResources[resource]; ok { result, err = searcher.search(conditions, orderBy, reverse) } else { - return nil, errors.New(errors.NotImplement, "not support") + return nil, fmt.Errorf("not support") } if err != nil { - return nil, errors.New(errors.Internal, err.Error()) + return nil, err } total = len(result) @@ -194,36 +146,7 @@ func ListClusterResource(resource, conditionStr, orderBy string, reverse bool, l } } - return &ResourceList{TotalCount: total, Items: items}, nil -} - -func parseToConditions(str string) (*conditions, error) { - conditions := &conditions{match: make(map[string]string, 0), fuzzy: make(map[string]string, 0)} - - if str == "" { - return conditions, nil - } - - for _, item := range strings.Split(str, ",") { - if strings.Count(item, "=") > 1 || strings.Count(item, "~") > 1 { - return nil, errors.New(errors.InvalidArgument, "invalid condition") - } - if groups := regexp.MustCompile(`(\S+)([=~])(\S+)`).FindStringSubmatch(item); len(groups) == 4 { - if groups[2] == "=" { - conditions.match[groups[1]] = groups[3] - } else { - conditions.fuzzy[groups[1]] = groups[3] - } - } else { - return nil, errors.New(errors.InvalidArgument, "invalid condition") - } - } - return conditions, nil -} - -type ResourceList struct { - TotalCount int `json:"total_count"` - Items []interface{} `json:"items"` + return &models.PageableResponse{TotalCount: total, Items: items}, nil } func searchFuzzy(m map[string]string, key, value string) bool { diff --git a/pkg/models/resources/roles.go b/pkg/models/resources/roles.go index c4c0bf3c6..94ae267cc 100644 --- a/pkg/models/resources/roles.go +++ b/pkg/models/resources/roles.go @@ -18,20 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" rbac "k8s.io/api/rbac/v1" - lister "k8s.io/client-go/listers/rbac/v1" - "k8s.io/apimachinery/pkg/labels" ) type roleSearcher struct { - roleLister lister.RoleLister } -// exactly match +// exactly Match func (*roleSearcher) match(match map[string]string, item *rbac.Role) bool { for k, v := range match { switch k { @@ -46,7 +45,7 @@ func (*roleSearcher) match(match map[string]string, item *rbac.Role) bool { return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*roleSearcher) fuzzy(fuzzy map[string]string, item *rbac.Role) bool { for k, v := range fuzzy { switch k { @@ -87,8 +86,8 @@ func (*roleSearcher) compare(a, b *rbac.Role, orderBy string) bool { } } -func (s *roleSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - roles, err := s.roleLister.Roles(namespace).List(labels.Everything()) +func (s *roleSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + roles, err := informers.SharedInformerFactory().Rbac().V1().Roles().Lister().Roles(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -96,11 +95,11 @@ func (s *roleSearcher) search(namespace string, conditions *conditions, orderBy result := make([]*rbac.Role, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = roles } else { for _, item := range roles { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/secrets.go b/pkg/models/resources/secrets.go index b5a88998e..8c7fc13b2 100644 --- a/pkg/models/resources/secrets.go +++ b/pkg/models/resources/secrets.go @@ -18,20 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - lister "k8s.io/client-go/listers/core/v1" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" ) type secretSearcher struct { - secretLister lister.SecretLister } -// exactly match +// exactly Match func (*secretSearcher) match(match map[string]string, item *v1.Secret) bool { for k, v := range match { switch k { @@ -50,7 +49,7 @@ func (*secretSearcher) match(match map[string]string, item *v1.Secret) bool { return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*secretSearcher) fuzzy(fuzzy map[string]string, item *v1.Secret) bool { for k, v := range fuzzy { switch k { @@ -95,8 +94,8 @@ func (*secretSearcher) compare(a, b *v1.Secret, orderBy string) bool { } } -func (s *secretSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - secrets, err := s.secretLister.Secrets(namespace).List(labels.Everything()) +func (s *secretSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + secrets, err := informers.SharedInformerFactory().Core().V1().Secrets().Lister().Secrets(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -104,11 +103,11 @@ func (s *secretSearcher) search(namespace string, conditions *conditions, orderB result := make([]*v1.Secret, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = secrets } else { for _, item := range secrets { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/services.go b/pkg/models/resources/services.go index 1203e8dc5..7f9b05342 100644 --- a/pkg/models/resources/services.go +++ b/pkg/models/resources/services.go @@ -18,20 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - lister "k8s.io/client-go/listers/core/v1" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" ) type serviceSearcher struct { - serviceLister lister.ServiceLister } -// exactly match +// exactly Match func (*serviceSearcher) match(match map[string]string, item *v1.Service) bool { for k, v := range match { switch k { @@ -46,7 +45,7 @@ func (*serviceSearcher) match(match map[string]string, item *v1.Service) bool { return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*serviceSearcher) fuzzy(fuzzy map[string]string, item *v1.Service) bool { for k, v := range fuzzy { switch k { @@ -91,8 +90,8 @@ func (*serviceSearcher) compare(a, b *v1.Service, orderBy string) bool { } } -func (s *serviceSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - services, err := s.serviceLister.Services(namespace).List(labels.Everything()) +func (s *serviceSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + services, err := informers.SharedInformerFactory().Core().V1().Services().Lister().Services(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -100,11 +99,11 @@ func (s *serviceSearcher) search(namespace string, conditions *conditions, order result := make([]*v1.Service, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = services } else { for _, item := range services { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/statefulsets.go b/pkg/models/resources/statefulsets.go index 628431f3f..a5b1bde0e 100644 --- a/pkg/models/resources/statefulsets.go +++ b/pkg/models/resources/statefulsets.go @@ -18,17 +18,16 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" - lister "k8s.io/client-go/listers/apps/v1" - "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/labels" ) type statefulSetSearcher struct { - statefulSetLister lister.StatefulSetLister } func statefulSetStatus(item *v1.StatefulSet) string { @@ -44,7 +43,7 @@ func statefulSetStatus(item *v1.StatefulSet) string { return stopped } -// Exactly match +// Exactly Match func (*statefulSetSearcher) match(match map[string]string, item *v1.StatefulSet) bool { for k, v := range match { switch k { @@ -105,8 +104,8 @@ func (*statefulSetSearcher) compare(a, b *v1.StatefulSet, orderBy string) bool { } } -func (s *statefulSetSearcher) search(namespace string, conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - statefulSets, err := s.statefulSetLister.StatefulSets(namespace).List(labels.Everything()) +func (s *statefulSetSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + statefulSets, err := informers.SharedInformerFactory().Apps().V1().StatefulSets().Lister().StatefulSets(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -114,11 +113,11 @@ func (s *statefulSetSearcher) search(namespace string, conditions *conditions, o result := make([]*v1.StatefulSet, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = statefulSets } else { for _, item := range statefulSets { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/resources/storageclasses.go b/pkg/models/resources/storageclasses.go index 797652af2..513b52cf4 100644 --- a/pkg/models/resources/storageclasses.go +++ b/pkg/models/resources/storageclasses.go @@ -18,19 +18,19 @@ package resources import ( + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "sort" "strings" "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/labels" - lister "k8s.io/client-go/listers/storage/v1" ) type storageClassesSearcher struct { - storageClassesLister lister.StorageClassLister } -// exactly match +// exactly Match func (*storageClassesSearcher) match(match map[string]string, item *v1.StorageClass) bool { for k, v := range match { switch k { @@ -45,7 +45,7 @@ func (*storageClassesSearcher) match(match map[string]string, item *v1.StorageCl return true } -// fuzzy searchInNamespace +// Fuzzy searchInNamespace func (*storageClassesSearcher) fuzzy(fuzzy map[string]string, item *v1.StorageClass) bool { for k, v := range fuzzy { switch k { @@ -86,8 +86,8 @@ func (*storageClassesSearcher) compare(a, b *v1.StorageClass, orderBy string) bo } } -func (s *storageClassesSearcher) search(conditions *conditions, orderBy string, reverse bool) ([]interface{}, error) { - storageClasses, err := s.storageClassesLister.List(labels.Everything()) +func (s *storageClassesSearcher) search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + storageClasses, err := informers.SharedInformerFactory().Storage().V1().StorageClasses().Lister().List(labels.Everything()) if err != nil { return nil, err @@ -95,11 +95,11 @@ func (s *storageClassesSearcher) search(conditions *conditions, orderBy string, result := make([]*v1.StorageClass, 0) - if len(conditions.match) == 0 && len(conditions.fuzzy) == 0 { + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { result = storageClasses } else { for _, item := range storageClasses { - if s.match(conditions.match, item) && s.fuzzy(conditions.fuzzy, item) { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { result = append(result, item) } } diff --git a/pkg/models/revisions/revisions.go b/pkg/models/revisions/revisions.go index 2948096c5..3455ca492 100644 --- a/pkg/models/revisions/revisions.go +++ b/pkg/models/revisions/revisions.go @@ -24,29 +24,11 @@ import ( "github.com/golang/glog" "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/labels" - lister "k8s.io/client-go/listers/apps/v1" - - "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/informers" ) -var ( - daemonSetLister lister.DaemonSetLister - deploymentLister lister.DeploymentLister - replicaSetLister lister.ReplicaSetLister - statefulSetLister lister.StatefulSetLister - controllerRevisionLister lister.ControllerRevisionLister -) - -func init() { - daemonSetLister = informers.SharedInformerFactory().Apps().V1().DaemonSets().Lister() - deploymentLister = informers.SharedInformerFactory().Apps().V1().Deployments().Lister() - replicaSetLister = informers.SharedInformerFactory().Apps().V1().ReplicaSets().Lister() - statefulSetLister = informers.SharedInformerFactory().Apps().V1().StatefulSets().Lister() - controllerRevisionLister = informers.SharedInformerFactory().Apps().V1().ControllerRevisions().Lister() -} - func GetDeployRevision(namespace, name, revision string) (*v1.ReplicaSet, error) { + deploymentLister := informers.SharedInformerFactory().Apps().V1().Deployments().Lister() deploy, err := deploymentLister.Deployments(namespace).Get(name) if err != nil { glog.Errorf("get deployment %s failed, reason: %s", name, err) @@ -56,6 +38,7 @@ func GetDeployRevision(namespace, name, revision string) (*v1.ReplicaSet, error) labelMap := deploy.Spec.Template.Labels labelSelector := labels.Set(labelMap).AsSelector() + replicaSetLister := informers.SharedInformerFactory().Apps().V1().ReplicaSets().Lister() rsList, err := replicaSetLister.ReplicaSets(namespace).List(labelSelector) if err != nil { return nil, err @@ -67,11 +50,11 @@ func GetDeployRevision(namespace, name, revision string) (*v1.ReplicaSet, error) } } - return nil, errors.New(errors.NotFound, fmt.Sprintf("revision not found %v#%v", name, revision)) + return nil, fmt.Errorf("revision not found %v#%v", name, revision) } func GetDaemonSetRevision(namespace, name string, revisionInt int) (*v1.ControllerRevision, error) { - + daemonSetLister := informers.SharedInformerFactory().Apps().V1().DaemonSets().Lister() ds, err := daemonSetLister.DaemonSets(namespace).Get(name) if err != nil { @@ -84,7 +67,7 @@ func GetDaemonSetRevision(namespace, name string, revisionInt int) (*v1.Controll } func GetStatefulSetRevision(namespace, name string, revisionInt int) (*v1.ControllerRevision, error) { - + statefulSetLister := informers.SharedInformerFactory().Apps().V1().StatefulSets().Lister() st, err := statefulSetLister.StatefulSets(namespace).Get(name) if err != nil { @@ -97,7 +80,7 @@ func GetStatefulSetRevision(namespace, name string, revisionInt int) (*v1.Contro func getControllerRevision(namespace, name string, labelMap map[string]string, revision int) (*v1.ControllerRevision, error) { labelSelector := labels.Set(labelMap).AsSelector() - + controllerRevisionLister := informers.SharedInformerFactory().Apps().V1().ControllerRevisions().Lister() revisions, err := controllerRevisionLister.ControllerRevisions(namespace).List(labelSelector) if err != nil { @@ -110,5 +93,5 @@ func getControllerRevision(namespace, name string, labelMap map[string]string, r } } - return nil, errors.New(errors.NotFound, fmt.Sprintf("revision not found %v#%v", name, revision)) + return nil, fmt.Errorf("revision not found %v#%v", name, revision) } diff --git a/pkg/models/routers/routers.go b/pkg/models/routers/routers.go index 02fc13657..873228b9b 100644 --- a/pkg/models/routers/routers.go +++ b/pkg/models/routers/routers.go @@ -23,15 +23,12 @@ import ( "io/ioutil" "k8s.io/apimachinery/pkg/labels" - v12 "k8s.io/client-go/listers/core/v1" - - "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/informers" "github.com/golang/glog" - coreV1 "k8s.io/api/core/v1" - extensionsV1beta1 "k8s.io/api/extensions/v1beta1" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/api/rbac/v1" @@ -43,18 +40,10 @@ import ( "kubesphere.io/kubesphere/pkg/models/iam" ) -var ( - serviceLister v12.ServiceLister -) - -func init() { - serviceLister = informers.SharedInformerFactory().Core().V1().Services().Lister() -} - -func GetAllRouters() ([]*coreV1.Service, error) { +func GetAllRouters() ([]*corev1.Service, error) { selector := labels.SelectorFromSet(labels.Set{"app": "kubesphere", "component": "ks-router", "tier": "backend"}) - + serviceLister := informers.SharedInformerFactory().Core().V1().Services().Lister() services, err := serviceLister.Services(constants.IngressControllerNamespace).List(selector) if err != nil { @@ -65,7 +54,7 @@ func GetAllRouters() ([]*coreV1.Service, error) { return services, nil } -func GetAllRoutersOfUser(username string) ([]*coreV1.Service, error) { +func GetAllRoutersOfUser(username string) ([]*corev1.Service, error) { allNamespace, namespaces, err := iam.GetUserNamespaces(username, v1.PolicyRule{ Verbs: []string{"get", "list"}, APIGroups: []string{""}, @@ -82,7 +71,7 @@ func GetAllRoutersOfUser(username string) ([]*coreV1.Service, error) { return GetAllRouters() } - routers := make([]*coreV1.Service, 0) + routers := make([]*corev1.Service, 0) for _, namespace := range namespaces { router, err := GetRouter(namespace) @@ -99,11 +88,11 @@ func GetAllRoutersOfUser(username string) ([]*coreV1.Service, error) { } // Get router from a namespace -func GetRouter(namespace string) (*coreV1.Service, error) { +func GetRouter(namespace string) (*corev1.Service, error) { serviceName := constants.IngressControllerPrefix + namespace selector := labels.SelectorFromSet(labels.Set{"app": "kubesphere", "component": "ks-router", "tier": "backend", "project": namespace}) - + serviceLister := informers.SharedInformerFactory().Core().V1().Services().Lister() services, err := serviceLister.Services(constants.IngressControllerNamespace).List(selector) if err != nil { @@ -116,7 +105,7 @@ func GetRouter(namespace string) (*coreV1.Service, error) { } } - return nil, errors.New(errors.NotFound, fmt.Sprintf("resources not found %s", serviceName)) + return nil, fmt.Errorf("resources not found %s", serviceName) } // Load all resource yamls @@ -148,11 +137,11 @@ func LoadYamls() ([]string, error) { } // Create a ingress controller in a namespace -func CreateRouter(namespace string, routerType coreV1.ServiceType, annotations map[string]string) (*coreV1.Service, error) { +func CreateRouter(namespace string, routerType corev1.ServiceType, annotations map[string]string) (*corev1.Service, error) { k8sClient := client.K8sClient() - var router *coreV1.Service + var router *corev1.Service yamls, err := LoadYamls() @@ -170,8 +159,8 @@ func CreateRouter(namespace string, routerType coreV1.ServiceType, annotations m } switch obj.(type) { - case *coreV1.Service: - service := obj.(*coreV1.Service) + case *corev1.Service: + service := obj.(*corev1.Service) service.SetAnnotations(annotations) service.Spec.Type = routerType @@ -190,8 +179,8 @@ func CreateRouter(namespace string, routerType coreV1.ServiceType, annotations m router = service - case *extensionsV1beta1.Deployment: - deployment := obj.(*extensionsV1beta1.Deployment) + case *extensionsv1beta1.Deployment: + deployment := obj.(*extensionsv1beta1.Deployment) deployment.Name = constants.IngressControllerPrefix + namespace // Add project label @@ -204,7 +193,7 @@ func CreateRouter(namespace string, routerType coreV1.ServiceType, annotations m // Choose self as master deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, "--election-id="+deployment.Name) - if routerType == coreV1.ServiceTypeLoadBalancer { + if routerType == corev1.ServiceTypeLoadBalancer { deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, "--publish-service="+constants.IngressControllerNamespace+"/"+constants.IngressControllerPrefix+namespace) } else { deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, "--report-node-internal-ip-address") @@ -224,11 +213,11 @@ func CreateRouter(namespace string, routerType coreV1.ServiceType, annotations m // DeleteRouter is used to delete ingress controller related resources in namespace // It will not delete ClusterRole resource cause it maybe used by other controllers -func DeleteRouter(namespace string) (*coreV1.Service, error) { +func DeleteRouter(namespace string) (*corev1.Service, error) { k8sClient := client.K8sClient() var err error - var router *coreV1.Service + var router *corev1.Service if err != nil { glog.Error(err) @@ -236,9 +225,9 @@ func DeleteRouter(namespace string) (*coreV1.Service, error) { // delete controller service serviceName := constants.IngressControllerPrefix + namespace - deleteOptions := metaV1.DeleteOptions{} + deleteOptions := meta_v1.DeleteOptions{} - listOptions := metaV1.ListOptions{ + listOptions := meta_v1.ListOptions{ LabelSelector: "app=kubesphere,component=ks-router,tier=backend,project=" + namespace, FieldSelector: "metadata.name=" + serviceName} @@ -259,7 +248,7 @@ func DeleteRouter(namespace string) (*coreV1.Service, error) { // delete controller deployment deploymentName := constants.IngressControllerPrefix + namespace - listOptions = metaV1.ListOptions{ + listOptions = meta_v1.ListOptions{ LabelSelector: "app=kubesphere,component=ks-router,tier=backend,project=" + namespace, } deployments, err := k8sClient.ExtensionsV1beta1().Deployments(constants.IngressControllerNamespace).List(listOptions) @@ -279,10 +268,10 @@ func DeleteRouter(namespace string) (*coreV1.Service, error) { } // Update Ingress Controller Service, change type from NodePort to Loadbalancer or vice versa. -func UpdateRouter(namespace string, routerType coreV1.ServiceType, annotations map[string]string) (*coreV1.Service, error) { +func UpdateRouter(namespace string, routerType corev1.ServiceType, annotations map[string]string) (*corev1.Service, error) { k8sClient := client.K8sClient() - var router *coreV1.Service + var router *corev1.Service router, err := GetRouter(namespace) @@ -293,7 +282,7 @@ func UpdateRouter(namespace string, routerType coreV1.ServiceType, annotations m if router == nil { glog.Error("Trying to update a non-existed router") - return nil, errors.New(errors.Internal, "router not created yet") + return nil, fmt.Errorf("router not created yet") } // from LoadBalancer to NodePort, or vice-versa diff --git a/pkg/models/status/status.go b/pkg/models/status/status.go index d2673c16c..28c1896c1 100644 --- a/pkg/models/status/status.go +++ b/pkg/models/status/status.go @@ -18,7 +18,8 @@ package status import ( - "fmt" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/params" "kubesphere.io/kubesphere/pkg/models/resources" ) @@ -31,7 +32,7 @@ type workLoadStatus struct { func GetNamespacesResourceStatus(namespace string) (*workLoadStatus, error) { res := workLoadStatus{Count: make(map[string]int), Namespace: namespace, Items: make(map[string]interface{})} - var notReadyList *resources.ResourceList + var notReadyList *models.PageableResponse var err error for _, resource := range []string{resources.Deployments, resources.StatefulSets, resources.DaemonSets, resources.PersistentVolumeClaims} { notReadyStatus := "updating" @@ -39,7 +40,7 @@ func GetNamespacesResourceStatus(namespace string) (*workLoadStatus, error) { notReadyStatus = "pending" } - notReadyList, err = resources.ListNamespaceResource(namespace, resource, fmt.Sprintf("status=%s", notReadyStatus), "", false, -1, 0) + notReadyList, err = resources.ListNamespaceResource(namespace, resource, ¶ms.Conditions{Match: map[string]string{"status": notReadyStatus}}, "", false, -1, 0) if err != nil { return nil, err diff --git a/pkg/models/storage/storage.go b/pkg/models/storage/storage.go index 2a1342053..4c784defb 100644 --- a/pkg/models/storage/storage.go +++ b/pkg/models/storage/storage.go @@ -24,9 +24,6 @@ import ( storageV1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" - lister "k8s.io/client-go/listers/core/v1" - lister2 "k8s.io/client-go/listers/storage/v1" - "kubesphere.io/kubesphere/pkg/informers" ) @@ -36,18 +33,12 @@ type ScMetrics struct { PvcNumber string `json:"pvcNumber"` } -var ( - persistentVolumeClaimLister lister.PersistentVolumeClaimLister - persistentVolumeLister lister.PersistentVolumeLister - sotrageClassesLister lister2.StorageClassLister -) - func init() { - persistentVolumeClaimLister = informers.SharedInformerFactory().Core().V1().PersistentVolumeClaims().Lister() - persistentVolumeLister = informers.SharedInformerFactory().Core().V1().PersistentVolumes().Lister() + } func GetPvcListBySc(scName string) ([]*v1.PersistentVolumeClaim, error) { + persistentVolumeClaimLister := informers.SharedInformerFactory().Core().V1().PersistentVolumeClaims().Lister() all, err := persistentVolumeClaimLister.List(labels.Everything()) if err != nil { @@ -70,6 +61,7 @@ func GetPvcListBySc(scName string) ([]*v1.PersistentVolumeClaim, error) { // Get info of metrics func GetScMetrics(scName string) (*ScMetrics, error) { + persistentVolumeLister := informers.SharedInformerFactory().Core().V1().PersistentVolumes().Lister() pvList, err := persistentVolumeLister.List(labels.Everything()) if err != nil { return nil, err @@ -102,7 +94,7 @@ func GetScMetrics(scName string) (*ScMetrics, error) { func GetScList() ([]*storageV1.StorageClass, error) { // Get StorageClass list - scList, err := sotrageClassesLister.List(labels.Everything()) + scList, err := informers.SharedInformerFactory().Storage().V1().StorageClasses().Lister().List(labels.Everything()) if err != nil { return nil, err diff --git a/pkg/models/storage/volumes.go b/pkg/models/storage/volumes.go index b3072f062..a656ad895 100644 --- a/pkg/models/storage/volumes.go +++ b/pkg/models/storage/volumes.go @@ -20,19 +20,12 @@ package storage import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - v12 "k8s.io/client-go/listers/core/v1" - "kubesphere.io/kubesphere/pkg/informers" ) -var podLister v12.PodLister - -func init() { - podLister = informers.SharedInformerFactory().Core().V1().Pods().Lister() -} - // List pods of a specific persistent volume claims func GetPodListByPvc(pvc string, ns string) (res []*v1.Pod, err error) { + podLister := informers.SharedInformerFactory().Core().V1().Pods().Lister() podList, err := podLister.Pods(ns).List(labels.Everything()) if err != nil { return nil, err diff --git a/pkg/models/types.go b/pkg/models/types.go index 3fb55a144..160be8b30 100644 --- a/pkg/models/types.go +++ b/pkg/models/types.go @@ -17,11 +17,121 @@ */ package models -type MessageResponse struct { - Message string `json:"message"` -} +import ( + v12 "k8s.io/api/core/v1" + "time" + + "k8s.io/api/rbac/v1" +) type PageableResponse struct { Items []interface{} `json:"items"` TotalCount int `json:"total_count"` } + +type Workspace struct { + Group `json:",inline"` + Admin string `json:"admin,omitempty"` + Namespaces []string `json:"namespaces"` + DevopsProjects []string `json:"devops_projects"` +} + +type UserInvite struct { + Username string `json:"username"` + Role string `json:"role"` +} + +func (g Group) GetCreateTime() (time.Time, error) { + return time.Parse("2006-01-02T15:04:05Z", g.CreateTime) +} + +type WorkspaceDPBinding struct { + Workspace string `gorm:"primary_key"` + DevOpsProject string `gorm:"primary_key"` +} + +type DevopsProject struct { + ProjectId *string `json:"project_id,omitempty"` + Name string `json:"name"` + Description string `json:"description"` + Creator string `json:"creator"` + CreateTime *time.Time `json:"create_time,omitempty"` + Status *string `json:"status"` + Visibility *string `json:"visibility,omitempty"` +} + +type Action struct { + Name string `json:"name"` + Rules []v1.PolicyRule `json:"rules"` +} + +type Rule struct { + Name string `json:"name"` + Actions []Action `json:"actions"` +} + +type SimpleRule struct { + Name string `json:"name"` + Actions []string `json:"actions"` +} + +type User struct { + Username string `json:"username"` + //UID string `json:"uid"` + Groups []string `json:"groups,omitempty"` + Password string `json:"password,omitempty"` + CurrentPassword string `json:"current_password,omitempty"` + //Extra map[string]interface{} `json:"extra"` + AvatarUrl string `json:"avatar_url"` + Description string `json:"description"` + Email string `json:"email"` + LastLoginTime string `json:"last_login_time"` + Status int `json:"status"` + ClusterRole string `json:"cluster_role"` + ClusterRules []SimpleRule `json:"cluster_rules"` + Roles map[string]string `json:"roles,omitempty"` + Rules map[string][]SimpleRule `json:"rules,omitempty"` + Role string `json:"role,omitempty"` + RoleBinding string `json:"role_binding,omitempty"` + Lang string `json:"lang,omitempty"` + WorkspaceRoles map[string]string `json:"workspace_roles,omitempty"` + WorkspaceRole string `json:"workspace_role,omitempty"` + WorkspaceRules map[string][]SimpleRule `json:"workspace_rules,omitempty"` +} + +type Group struct { + Path string `json:"path"` + Name string `json:"name"` + Gid string `json:"gid"` + Members []string `json:"members"` + Logo string `json:"logo"` + Creator string `json:"creator"` + CreateTime string `json:"create_time"` + ChildGroups []string `json:"child_groups"` + Description string `json:"description"` +} + +type Component struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + SelfLink string `json:"selfLink"` + Label interface{} `json:"label"` + StartedAt time.Time `json:"startedAt"` + TotalBackends int `json:"totalBackends"` + HealthyBackends int `json:"healthyBackends"` +} + +type PodInfo struct { + Namespace string `json:"namespace"` + Pod string `json:"pod"` + Container string `json:"container"` +} + +type Token struct { + Token string `json:"access_token"` +} + +type ResourceQuota struct { + Namespace string `json:"namespace"` + Data v12.ResourceQuotaStatus `json:"data"` +} diff --git a/pkg/models/workspaces/types.go b/pkg/models/workspaces/types.go deleted file mode 100644 index 8db78b922..000000000 --- a/pkg/models/workspaces/types.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package workspaces - -import "time" - -type Workspace struct { - Group `json:",inline"` - Admin string `json:"admin,omitempty"` - Namespaces []string `json:"namespaces"` - DevopsProjects []string `json:"devops_projects"` -} - -type UserInvite struct { - Username string `json:"username"` - Role string `json:"role"` -} - -type Group struct { - Path string `json:"path"` - Name string `json:"name"` - Gid string `json:"gid"` - Members []string `json:"members"` - Logo string `json:"logo"` - Creator string `json:"creator"` - CreateTime string `json:"create_time"` - ChildGroups []string `json:"child_groups,omitempty"` - Description string `json:"description"` -} - -func (g Group) GetCreateTime() (time.Time, error) { - return time.Parse("2006-01-02T15:04:05Z", g.CreateTime) -} - -type WorkspaceDPBinding struct { - Workspace string `gorm:"primary_key"` - DevOpsProject string `gorm:"primary_key"` -} - -type DevopsProject struct { - ProjectId *string `json:"project_id,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - Creator string `json:"creator"` - CreateTime *time.Time `json:"create_time,omitempty"` - Status *string `json:"status"` - Visibility *string `json:"visibility,omitempty"` -} diff --git a/pkg/models/workspaces/workspaces.go b/pkg/models/workspaces/workspaces.go index 7bb2778a7..1f604598c 100644 --- a/pkg/models/workspaces/workspaces.go +++ b/pkg/models/workspaces/workspaces.go @@ -24,10 +24,9 @@ import ( "io/ioutil" "net/http" - lister "k8s.io/client-go/listers/core/v1" - "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models" "kubesphere.io/kubesphere/pkg/models/iam" "log" @@ -50,26 +49,14 @@ import ( "sort" - lister2 "k8s.io/client-go/listers/rbac/v1" - "kubesphere.io/kubesphere/pkg/client" - ksErr "kubesphere.io/kubesphere/pkg/errors" + kserr "kubesphere.io/kubesphere/pkg/errors" ) -var ( - namespaceLister lister.NamespaceLister - clusterRoleLister lister2.ClusterRoleLister -) - -func init() { - namespaceLister = informers.SharedInformerFactory().Core().V1().Namespaces().Lister() - clusterRoleLister = informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() -} - func UnBindDevopsProject(workspace string, devops string) error { - db := client.NewSharedDBClient() + db := client.DBClient() defer db.Close() - return db.Delete(&WorkspaceDPBinding{Workspace: workspace, DevOpsProject: devops}).Error + return db.Delete(&models.WorkspaceDPBinding{Workspace: workspace, DevOpsProject: devops}).Error } func DeleteDevopsProject(username string, devops string) error { @@ -87,12 +74,12 @@ func DeleteDevopsProject(username string, devops string) error { return err } if result.StatusCode > 200 { - return ksErr.Wrap(data) + return kserr.Parse(data) } return nil } -func CreateDevopsProject(username string, workspace string, devops DevopsProject) (*DevopsProject, error) { +func CreateDevopsProject(username string, workspace string, devops models.DevopsProject) (*models.DevopsProject, error) { data, err := json.Marshal(devops) @@ -117,10 +104,10 @@ func CreateDevopsProject(username string, workspace string, devops DevopsProject } if result.StatusCode > 200 { - return nil, ksErr.Wrap(data) + return nil, kserr.Parse(data) } - var project DevopsProject + var project models.DevopsProject err = json.Unmarshal(data, &project) @@ -140,7 +127,7 @@ func CreateDevopsProject(username string, workspace string, devops DevopsProject return &project, nil } -func createDefaultDevopsRoleBinding(workspace string, project DevopsProject) error { +func createDefaultDevopsRoleBinding(workspace string, project models.DevopsProject) error { admins, err := iam.GetWorkspaceUsers(workspace, constants.WorkspaceAdmin) if err != nil { @@ -296,7 +283,7 @@ func ListNamespaceByUser(workspaceName string, username string, keyword string, } func Namespaces(workspaceName string) ([]*core.Namespace, error) { - + namespaceLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister() namespaces, err := namespaceLister.List(labels.SelectorFromSet(labels.Set{"kubesphere.io/workspace": workspaceName})) if err != nil { @@ -317,11 +304,9 @@ func Namespaces(workspaceName string) ([]*core.Namespace, error) { } func BindingDevopsProject(workspace string, devops string) error { - //db := client.NewSharedDBClient() - //defer db.Close() - //return db.Create(&WorkspaceDPBinding{Workspace: workspace, DevOpsProject: devops}).Error - // TODO FIX - return nil + db := client.DBClient() + defer db.Close() + return db.Create(&models.WorkspaceDPBinding{Workspace: workspace, DevOpsProject: devops}).Error } func DeleteNamespace(workspace string, namespaceName string) error { @@ -338,7 +323,7 @@ func DeleteNamespace(workspace string, namespaceName string) error { } -func Delete(workspace *Workspace) error { +func Delete(workspace *models.Workspace) error { err := release(workspace) @@ -365,13 +350,13 @@ func Delete(workspace *Workspace) error { } if result.StatusCode > 200 { - return ksErr.Wrap(data) + return kserr.Parse(data) } return nil } -func release(workspace *Workspace) error { +func release(workspace *models.Workspace) error { for _, namespace := range workspace.Namespaces { err := DeleteNamespace(workspace.Name, namespace) if err != nil && !apierrors.IsNotFound(err) { @@ -413,7 +398,7 @@ func workspaceRoleRelease(workspace string) error { return nil } -func Create(workspace *Workspace) (*Workspace, error) { +func Create(workspace *models.Workspace) (*models.Workspace, error) { data, err := json.Marshal(workspace) @@ -434,10 +419,10 @@ func Create(workspace *Workspace) (*Workspace, error) { } if result.StatusCode > 200 { - return nil, ksErr.Wrap(data) + return nil, kserr.Parse(data) } - var created Workspace + var created models.Workspace err = json.Unmarshal(data, &created) @@ -458,7 +443,7 @@ func Create(workspace *Workspace) (*Workspace, error) { return &created, nil } -func Edit(workspace *Workspace) (*Workspace, error) { +func Edit(workspace *models.Workspace) (*models.Workspace, error) { data, err := json.Marshal(workspace) @@ -487,10 +472,10 @@ func Edit(workspace *Workspace) (*Workspace, error) { } if result.StatusCode > 200 { - return nil, ksErr.Wrap(data) + return nil, kserr.Parse(data) } - var edited Workspace + var edited models.Workspace err = json.Unmarshal(data, &edited) @@ -501,7 +486,7 @@ func Edit(workspace *Workspace) (*Workspace, error) { return &edited, nil } -func Detail(name string) (*Workspace, error) { +func Detail(name string) (*models.Workspace, error) { result, err := http.Get(fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/groups/%s", constants.AccountAPIServer, name)) @@ -517,10 +502,10 @@ func Detail(name string) (*Workspace, error) { } if result.StatusCode > 200 { - return nil, ksErr.Wrap(data) + return nil, kserr.Parse(data) } - var group Group + var group models.Group err = json.Unmarshal(data, &group) @@ -528,7 +513,7 @@ func Detail(name string) (*Workspace, error) { return nil, err } - db := client.NewSharedDBClient() + db := client.DBClient() defer db.Close() workspace, err := convertGroupToWorkspace(db, group) @@ -541,7 +526,7 @@ func Detail(name string) (*Workspace, error) { } // List all workspaces for the current user -func ListWorkspaceByUser(username string, keyword string) ([]*Workspace, error) { +func ListWorkspaceByUser(username string, keyword string) ([]*models.Workspace, error) { clusterRoles, err := iam.GetClusterRoles(username) if err != nil { @@ -556,7 +541,7 @@ func ListWorkspaceByUser(username string, keyword string) ([]*Workspace, error) workspacesManager := v1.PolicyRule{APIGroups: []string{"kubesphere.io"}, Verbs: []string{"list", "get"}, Resources: []string{"workspaces"}} - var workspaces []*Workspace + var workspaces []*models.Workspace if iam.RulesMatchesRequired(rules, workspacesManager) { workspaces, err = fetch(nil) } else { @@ -582,13 +567,13 @@ func ListWorkspaceByUser(username string, keyword string) ([]*Workspace, error) return workspaces, err } -func fetch(names []string) ([]*Workspace, error) { +func fetch(names []string) ([]*models.Workspace, error) { url := fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/groups", constants.AccountAPIServer) if names != nil { if len(names) == 0 { - return make([]*Workspace, 0), nil + return make([]*models.Workspace, 0), nil } else { url = url + "?path=" + strings.Join(names, ",") } @@ -608,10 +593,10 @@ func fetch(names []string) ([]*Workspace, error) { } if result.StatusCode > 200 { - return nil, ksErr.Wrap(data) + return nil, kserr.Parse(data) } - var groups []Group + var groups []models.Group err = json.Unmarshal(data, &groups) @@ -619,11 +604,11 @@ func fetch(names []string) ([]*Workspace, error) { return nil, err } - db := client.NewSharedDBClient() + db := client.DBClient() defer db.Close() - workspaces := make([]*Workspace, 0) + workspaces := make([]*models.Workspace, 0) for _, group := range groups { workspace, err := convertGroupToWorkspace(db, group) if err != nil { @@ -635,21 +620,21 @@ func fetch(names []string) ([]*Workspace, error) { return workspaces, nil } -func ListDevopsProjectsByUser(username string, workspace string, keyword string, orderBy string, reverse bool, limit int, offset int) (int, []DevopsProject, error) { +func ListDevopsProjectsByUser(username string, workspace string, keyword string, orderBy string, reverse bool, limit int, offset int) (int, []models.DevopsProject, error) { - db := client.NewSharedDBClient() + db := client.DBClient() defer db.Close() - var workspaceDOPBindings []WorkspaceDPBinding + var workspaceDOPBindings []models.WorkspaceDPBinding if err := db.Where("workspace = ?", workspace).Find(&workspaceDOPBindings).Error; err != nil { return 0, nil, err } - devOpsProjects := make([]DevopsProject, 0) + devOpsProjects := make([]models.DevopsProject, 0) request, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/api/v1alpha/projects", constants.DevopsAPIServer), nil) - request.Header.Add("X-Token-Username", username) + request.Header.Add(constants.UserNameHeader, username) result, err := http.DefaultClient.Do(request) if err != nil { @@ -662,15 +647,8 @@ func ListDevopsProjectsByUser(username string, workspace string, keyword string, return 0, nil, err } - //if result.StatusCode == 403 || result.StatusCode == 404 { - // if err := db.Delete(&workspaceDOPBinding).Error; err != nil { - // return nil, err - // } - // continue - //} - if result.StatusCode > 200 { - return 0, nil, ksErr.Wrap(data) + return 0, nil, kserr.Parse(data) } err = json.Unmarshal(data, &devOpsProjects) @@ -720,14 +698,14 @@ func ListDevopsProjectsByUser(username string, workspace string, keyword string, } if len(devOpsProjects) < offset { - return len(devOpsProjects), make([]DevopsProject, 0), nil + return len(devOpsProjects), make([]models.DevopsProject, 0), nil } else if len(devOpsProjects) < limit+offset { return len(devOpsProjects), devOpsProjects[offset:], nil } else { return len(devOpsProjects), devOpsProjects[offset : limit+offset], nil } } -func convertGroupToWorkspace(db *gorm.DB, group Group) (*Workspace, error) { +func convertGroupToWorkspace(db *gorm.DB, group models.Group) (*models.Workspace, error) { namespaces, err := Namespaces(group.Name) if err != nil { @@ -740,7 +718,7 @@ func convertGroupToWorkspace(db *gorm.DB, group Group) (*Workspace, error) { namespacesNames = append(namespacesNames, namespace.Name) } - var workspaceDOPBindings []WorkspaceDPBinding + var workspaceDOPBindings []models.WorkspaceDPBinding if err := db.Where("workspace = ?", group.Name).Find(&workspaceDOPBindings).Error; err != nil { return nil, err @@ -752,7 +730,7 @@ func convertGroupToWorkspace(db *gorm.DB, group Group) (*Workspace, error) { devOpsProjects = append(devOpsProjects, workspaceDOPBinding.DevOpsProject) } - workspace := Workspace{Group: group} + workspace := models.Workspace{Group: group} workspace.Namespaces = namespacesNames workspace.DevopsProjects = devOpsProjects return &workspace, nil @@ -769,7 +747,7 @@ func CreateNamespace(namespace *core.Namespace) (*core.Namespace, error) { return ns, nil } -func Invite(workspaceName string, users []UserInvite) error { +func Invite(workspaceName string, users []models.UserInvite) error { for _, user := range users { if !slice.ContainsString(constants.WorkSpaceRoles, user.Role, nil) { return fmt.Errorf("role %s not exist", user.Role) @@ -848,9 +826,9 @@ func RemoveMembers(workspaceName string, users []string) error { return nil } -func Roles(workspace *Workspace) ([]*v1.ClusterRole, error) { +func Roles(workspace *models.Workspace) ([]*v1.ClusterRole, error) { roles := make([]*v1.ClusterRole, 0) - + clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() for _, name := range constants.WorkSpaceRoles { clusterRole, err := clusterRoleLister.Get(fmt.Sprintf("system:%s:%s", workspace.Name, name)) @@ -871,7 +849,7 @@ func Roles(workspace *Workspace) ([]*v1.ClusterRole, error) { return roles, nil } -func GetWorkspaceMembers(workspace string, keyword string) ([]iam.User, error) { +func GetWorkspaceMembers(workspace string, keyword string) ([]models.User, error) { url := fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/workspaces/%s/members", constants.AccountAPIServer, workspace) @@ -893,10 +871,10 @@ func GetWorkspaceMembers(workspace string, keyword string) ([]iam.User, error) { } if result.StatusCode > 200 { - return nil, ksErr.Wrap(data) + return nil, kserr.Parse(data) } - var users []iam.User + var users []models.User err = json.Unmarshal(data, &users) @@ -908,7 +886,7 @@ func GetWorkspaceMembers(workspace string, keyword string) ([]iam.User, error) { } -func WorkspaceRoleInit(workspace *Workspace) error { +func WorkspaceRoleInit(workspace *models.Workspace) error { k8sClient := client.K8sClient() admin := new(v1.ClusterRole) @@ -1169,7 +1147,7 @@ func unbindNamespacesRole(namespaces []string, users []string) error { return nil } -func UnbindWorkspace(workspace *Workspace, users []string) error { +func UnbindWorkspace(workspace *models.Workspace, users []string) error { err := unbindNamespacesRole(workspace.Namespaces, users) @@ -1186,7 +1164,7 @@ func UnbindWorkspace(workspace *Workspace, users []string) error { return nil } -func CreateWorkspaceRoleBinding(workspace *Workspace, username string, role string) error { +func CreateWorkspaceRoleBinding(workspace *models.Workspace, username string, role string) error { k8sClient := client.K8sClient() @@ -1242,10 +1220,10 @@ func CreateWorkspaceRoleBinding(workspace *Workspace, username string, role stri func GetDevOpsProjects(workspaceName string) ([]string, error) { - db := client.NewSharedDBClient() + db := client.DBClient() defer db.Close() - var workspaceDOPBindings []WorkspaceDPBinding + var workspaceDOPBindings []models.WorkspaceDPBinding if err := db.Where("workspace = ?", workspaceName).Find(&workspaceDOPBindings).Error; err != nil { return nil, err @@ -1304,7 +1282,7 @@ func Count() (int, error) { } if result.StatusCode > 200 { - return 0, ksErr.Wrap(data) + return 0, kserr.Parse(data) } var count map[string]json.Number @@ -1319,13 +1297,14 @@ func Count() (int, error) { v, err := value.Int64() if err != nil { - return 0, ksErr.New(ksErr.Internal, err.Error()) + return 0, err } return int(v), nil } func GetAllProjectNums() (int, error) { + namespaceLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister() list, err := namespaceLister.List(labels.Everything()) if err != nil { return 0, err @@ -1334,11 +1313,11 @@ func GetAllProjectNums() (int, error) { } func GetAllDevOpsProjectsNums() (int, error) { - db := client.NewSharedDBClient() + db := client.DBClient() defer db.Close() var count int - if err := db.Model(&WorkspaceDPBinding{}).Count(&count).Error; err != nil { + if err := db.Model(&models.WorkspaceDPBinding{}).Count(&count).Error; err != nil { return 0, err } return count, nil @@ -1357,7 +1336,7 @@ func GetAllAccountNums() (int, error) { return 0, err } if result.StatusCode > 200 { - return 0, ksErr.Wrap(data) + return 0, kserr.Parse(data) } var count map[string]json.Number diff --git a/pkg/monitoring/v1alpha2/monitoring/monitoring.go b/pkg/monitoring/v1alpha2/monitoring/monitoring.go deleted file mode 100644 index bd2d6f286..000000000 --- a/pkg/monitoring/v1alpha2/monitoring/monitoring.go +++ /dev/null @@ -1,397 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package monitoring - -import ( - "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-openapi" - - "kubesphere.io/kubesphere/pkg/client" - "kubesphere.io/kubesphere/pkg/models/metrics" -) - -func Route(ws *restful.WebService) { - u := Monitor{} - - ws.Route(ws.GET("/clusters").To(u.monitorCluster). - Doc("monitor cluster level metrics"). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("cluster_cpu_utilisation")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "cluster"})) - - ws.Route(ws.GET("/nodes").To(u.monitorNode). - Doc("monitor nodes level metrics"). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("node_cpu_utilisation")). - Param(ws.QueryParameter("nodes_filter", "node re2 expression filter").DataType("string").Required(false).DefaultValue("")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "node"})) - - ws.Route(ws.GET("/nodes/{node_id}").To(u.monitorNode). - Doc("monitor specific node level metrics"). - Param(ws.PathParameter("node_id", "specific node").DataType("string").Required(true).DefaultValue("")). - Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").DataType("string").Required(true).DefaultValue("node_cpu_utilisation")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "node"})) - - ws.Route(ws.GET("/namespaces").To(u.monitorNamespace). - Doc("monitor namespaces level metrics"). - Param(ws.QueryParameter("namespaces_filter", "namespaces re2 expression filter").DataType("string").Required(false).DefaultValue("")). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("namespace_memory_utilisation")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "namespace"})) - - ws.Route(ws.GET("/namespaces/{ns_name}").To(u.monitorNamespace). - Doc("monitor specific namespace level metrics"). - Param(ws.PathParameter("ns_name", "specific namespace").DataType("string").Required(true).DefaultValue("monitoring")). - Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").DataType("string").Required(true).DefaultValue("namespace_memory_utilisation")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "namespace"})) - - ws.Route(ws.GET("/namespaces/{ns_name}/pods").To(u.monitorPod). - Doc("monitor pods level metrics"). - Param(ws.PathParameter("ns_name", "specific namespace").DataType("string").Required(true).DefaultValue("monitoring")). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("pod_memory_utilisation_wo_cache")). - Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").DataType("string").Required(false).DefaultValue("")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "pod"})) - - ws.Route(ws.GET("/namespaces/{ns_name}/pods/{pod_name}").To(u.monitorPod). - Doc("monitor specific pod level metrics"). - Param(ws.PathParameter("ns_name", "specific namespace").DataType("string").Required(true).DefaultValue("monitoring")). - Param(ws.PathParameter("pod_name", "specific pod").DataType("string").Required(true).DefaultValue("")). - Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").DataType("string").Required(true).DefaultValue("pod_memory_utilisation_wo_cache")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "pod"})) - - ws.Route(ws.GET("/nodes/{node_id}/pods").To(u.monitorPod). - Doc("monitor pods level metrics by nodeid"). - Param(ws.PathParameter("node_id", "specific node").DataType("string").Required(true).DefaultValue("i-k89a62il")). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("pod_memory_utilisation_wo_cache")). - Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").DataType("string").Required(false).DefaultValue("openpitrix.*")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "pod"})) - - ws.Route(ws.GET("/nodes/{node_id}/pods/{pod_name}").To(u.monitorPod). - Doc("monitor specific pod level metrics by nodeid"). - Param(ws.PathParameter("node_id", "specific node").DataType("string").Required(true).DefaultValue("i-k89a62il")). - Param(ws.PathParameter("pod_name", "specific pod").DataType("string").Required(true).DefaultValue("")). - Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").DataType("string").Required(true).DefaultValue("pod_memory_utilisation_wo_cache")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "pod"})) - - ws.Route(ws.GET("/nodes/{node_id}/pods/{pod_name}/containers").To(u.monitorContainer). - Doc("monitor specific pod level metrics by nodeid"). - Param(ws.PathParameter("node_id", "specific node").DataType("string").Required(true)). - Param(ws.PathParameter("pod_name", "specific pod").DataType("string").Required(true)). - Param(ws.QueryParameter("containers_filter", "container re2 expression filter").DataType("string").Required(false).DefaultValue("")). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...").DataType("string").Required(false)). - Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").DataType("string").Required(true).DefaultValue("pod_memory_utilisation_wo_cache")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")). - Param(ws.QueryParameter("type", "rank, statistic").DataType("string").Required(false).DefaultValue("rank")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "container"})) - - ws.Route(ws.GET("/namespaces/{ns_name}/pods/{pod_name}/containers").To(u.monitorContainer). - Doc("monitor containers level metrics"). - Param(ws.PathParameter("ns_name", "specific namespace").DataType("string").Required(true).DefaultValue("monitoring")). - Param(ws.PathParameter("pod_name", "specific pod").DataType("string").Required(true).DefaultValue("")). - Param(ws.QueryParameter("containers_filter", "container re2 expression filter").DataType("string").Required(false).DefaultValue("")). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...").DataType("string").Required(false)). - Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").DataType("string").Required(true).DefaultValue("container_memory_utilisation_wo_cache")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")). - Param(ws.QueryParameter("type", "rank, statistic").DataType("string").Required(false).DefaultValue("rank")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "container"})). - Consumes(restful.MIME_JSON, restful.MIME_XML). - Produces(restful.MIME_JSON) - - ws.Route(ws.GET("/namespaces/{ns_name}/pods/{pod_name}/containers/{container_name}").To(u.monitorContainer). - Doc("monitor specific container level metrics"). - Param(ws.PathParameter("ns_name", "specific namespace").DataType("string").Required(true).DefaultValue("monitoring")). - Param(ws.PathParameter("pod_name", "specific pod").DataType("string").Required(true).DefaultValue("")). - Param(ws.PathParameter("container_name", "specific container").DataType("string").Required(true).DefaultValue("")). - Param(ws.QueryParameter("metrics_name", "metrics name cpu memory...").DataType("string").Required(true).DefaultValue("container_memory_utilisation_wo_cache")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "container"})). - Consumes(restful.MIME_JSON, restful.MIME_XML). - Produces(restful.MIME_JSON) - - ws.Route(ws.GET("/namespaces/{ns_name}/workloads/{workload_kind}").To(u.monitorWorkload). - Doc("monitor specific workload level metrics"). - Param(ws.PathParameter("ns_name", "namespace").DataType("string").Required(true).DefaultValue("kube-system")). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...").DataType("string").Required(false)). - Param(ws.PathParameter("workload_kind", "workload kind").DataType("string").Required(false).DefaultValue("daemonset")). - Param(ws.QueryParameter("workload_name", "workload name").DataType("string").Required(true).DefaultValue("")). - Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").DataType("string").Required(false).DefaultValue("openpitrix.*")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "max metric items in a page").DataType("string").Required(false).DefaultValue("4")). - Param(ws.QueryParameter("type", "rank, statistic").DataType("string").Required(false).DefaultValue("rank")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "workload"})) - - ws.Route(ws.GET("/namespaces/{ns_name}/workloads").To(u.monitorWorkload). - Doc("monitor all workload level metrics"). - Param(ws.PathParameter("ns_name", "namespace").DataType("string").Required(true).DefaultValue("kube-system")). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...").DataType("string").Required(false)). - Param(ws.QueryParameter("workloads_filter", "pod re2 expression filter").DataType("string").Required(false).DefaultValue("")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")). - Param(ws.QueryParameter("type", "rank, statistic").DataType("string").Required(false).DefaultValue("rank")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "workload"})) - - // list all namespace in this workspace by selected metrics - ws.Route(ws.GET("/workspaces/{workspace_name}").To(u.monitorOneWorkspace). - Doc("monitor workspaces level metrics"). - Param(ws.PathParameter("workspace_name", "workspace name").DataType("string").Required(true)). - Param(ws.QueryParameter("namespaces_filter", "namespaces filter").DataType("string").Required(false).DefaultValue("k.*")). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("namespace_memory_utilisation_wo_cache")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")). - Param(ws.QueryParameter("type", "rank, statistic").DataType("string").Required(false).DefaultValue("rank")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "workspace"})) - - ws.Route(ws.GET("/workspaces").To(u.monitorAllWorkspaces). - Doc("monitor workspaces level metrics"). - Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("workspace_memory_utilisation")). - Param(ws.QueryParameter("workspaces_filter", "workspaces re2 expression filter").DataType("string").Required(false).DefaultValue(".*")). - Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)). - Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)). - Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")). - Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")). - Param(ws.QueryParameter("type", "rank, statistic").DataType("string").Required(false).DefaultValue("rank")). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "workspace"})) - - ws.Route(ws.GET("/components").To(u.monitorComponentStatus). - Doc("monitor k8s components status"). - Metadata(restfulspec.KeyOpenAPITags, []string{"monitoring", "components"})) - -} - -func (u Monitor) monitorPod(request *restful.Request, response *restful.Response) { - requestParams := client.ParseMonitoringRequestParams(request) - podName := requestParams.PodName - metricName := requestParams.MetricsName - if podName != "" { - // single pod single metric - queryType, params, nullRule := metrics.AssemblePodMetricRequestInfo(requestParams, metricName) - var res *metrics.FormatedMetric - if !nullRule { - res = metrics.GetMetric(queryType, params, metricName) - } - response.WriteAsJson(res) - - } else { - // multiple - rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelPod) - // sorting - sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelPodName) - // paging - pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) - - response.WriteAsJson(pagedMetrics) - } -} - -func (u Monitor) monitorContainer(request *restful.Request, response *restful.Response) { - requestParams := client.ParseMonitoringRequestParams(request) - metricName := requestParams.MetricsName - if requestParams.MetricsFilter != "" { - rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelContainer) - // sorting - sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelContainerName) - // paging - pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) - - response.WriteAsJson(pagedMetrics) - - } else { - res := metrics.MonitorContainer(requestParams, metricName) - response.WriteAsJson(res) - } - -} - -func (u Monitor) monitorWorkload(request *restful.Request, response *restful.Response) { - requestParams := client.ParseMonitoringRequestParams(request) - - rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkload) - - var sortedMetrics *metrics.FormatedLevelMetric - var maxMetricCount int - - wlKind := requestParams.WorkloadKind - - // sorting - if wlKind == "" { - - sortedMetrics, maxMetricCount = metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelWorkload) - } else { - - sortedMetrics, maxMetricCount = metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelPodName) - } - - // paging - pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) - - response.WriteAsJson(pagedMetrics) - -} - -func (u Monitor) monitorAllWorkspaces(request *restful.Request, response *restful.Response) { - - requestParams := client.ParseMonitoringRequestParams(request) - - tp := requestParams.Tp - if tp == "_statistics" { - // merge multiple metric: all-devops, all-roles, all-projects...this api is designed for admin - res := metrics.MonitorAllWorkspacesStatistics() - - response.WriteAsJson(res) - - } else if tp == "rank" { - rawMetrics := metrics.MonitorAllWorkspaces(requestParams) - // sorting - sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelWorkspace) - // paging - pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) - - response.WriteAsJson(pagedMetrics) - } else { - res := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkspace) - response.WriteAsJson(res) - } -} - -func (u Monitor) monitorOneWorkspace(request *restful.Request, response *restful.Response) { - requestParams := client.ParseMonitoringRequestParams(request) - - tp := requestParams.Tp - if tp == "rank" { - // multiple - rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkspace) - // sorting - sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelNamespace) - // paging - pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) - - response.WriteAsJson(pagedMetrics) - - } else if tp == "_statistics" { - wsName := requestParams.WsName - - // merge multiple metric: devops, roles, projects... - res := metrics.MonitorOneWorkspaceStatistics(wsName) - response.WriteAsJson(res) - } else { - res := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkspace) - response.WriteAsJson(res) - } -} - -func (u Monitor) monitorNamespace(request *restful.Request, response *restful.Response) { - requestParams := client.ParseMonitoringRequestParams(request) - metricName := requestParams.MetricsName - nsName := requestParams.NsName - if nsName != "" { - // single - queryType, params := metrics.AssembleNamespaceMetricRequestInfo(requestParams, metricName) - res := metrics.GetMetric(queryType, params, metricName) - response.WriteAsJson(res) - } else { - // multiple - rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelNamespace) - // sorting - sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelNamespace) - // paging - pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) - - response.WriteAsJson(pagedMetrics) - } -} - -func (u Monitor) monitorCluster(request *restful.Request, response *restful.Response) { - requestParams := client.ParseMonitoringRequestParams(request) - - metricName := requestParams.MetricsName - if metricName != "" { - // single - queryType, params := metrics.AssembleClusterMetricRequestInfo(requestParams, metricName) - res := metrics.GetMetric(queryType, params, metricName) - - response.WriteAsJson(res) - } else { - // multiple - res := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelCluster) - response.WriteAsJson(res) - } -} - -func (u Monitor) monitorNode(request *restful.Request, response *restful.Response) { - requestParams := client.ParseMonitoringRequestParams(request) - - metricName := requestParams.MetricsName - if metricName != "" { - // single - queryType, params := metrics.AssembleNodeMetricRequestInfo(requestParams, metricName) - res := metrics.GetMetric(queryType, params, metricName) - nodeAddress := metrics.GetNodeAddressInfo() - metrics.AddNodeAddressMetric(res, nodeAddress) - response.WriteAsJson(res) - } else { - // multiple - rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelNode) - nodeAddress := metrics.GetNodeAddressInfo() - - for i := 0; i < len(rawMetrics.Results); i++ { - metrics.AddNodeAddressMetric(&rawMetrics.Results[i], nodeAddress) - } - - // sorting - sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelNode) - // paging - pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount) - - response.WriteAsJson(pagedMetrics) - } -} - -// k8s component(controller, scheduler, etcd) status -func (u Monitor) monitorComponentStatus(request *restful.Request, response *restful.Response) { - requestParams := client.ParseMonitoringRequestParams(request) - - status := metrics.MonitorComponentStatus(requestParams) - response.WriteAsJson(status) -} - -type Monitor struct { -} diff --git a/pkg/options/options.go b/pkg/options/options.go new file mode 100644 index 000000000..595a584ec --- /dev/null +++ b/pkg/options/options.go @@ -0,0 +1,63 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package options + +import ( + "github.com/spf13/pflag" +) + +var SharedOptions = NewServerRunOptions() + +type ServerRunOptions struct { + // server bind address + BindAddress string + + // insecure port number + InsecurePort int + + // secure port number + SecurePort int + + // tls cert file + TlsCertFile string + + // tls private key file + TlsPrivateKey string + + CommandLine *pflag.FlagSet +} + +func NewServerRunOptions() *ServerRunOptions { + // create default server run options + s := ServerRunOptions{ + BindAddress: "0.0.0.0", + InsecurePort: 9090, + SecurePort: 0, + TlsCertFile: "", + TlsPrivateKey: "", + CommandLine: &pflag.FlagSet{}, + } + + s.CommandLine.StringVar(&s.BindAddress, "bind-address", "0.0.0.0", "server bind address") + s.CommandLine.IntVar(&s.InsecurePort, "insecure-port", 9090, "insecure port number") + s.CommandLine.IntVar(&s.SecurePort, "secure-port", 0, "secure port number") + s.CommandLine.StringVar(&s.TlsCertFile, "tls-cert-file", "", "tls cert file") + s.CommandLine.StringVar(&s.TlsPrivateKey, "tls-private-key", "", "tls private key") + + return &s +} diff --git a/pkg/params/pagging.go b/pkg/params/pagging.go deleted file mode 100644 index a94ddd887..000000000 --- a/pkg/params/pagging.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package params - -import ( - "regexp" - "strconv" - - "golang.org/x/tools/container/intsets" -) - -const ( - Paging = "paging" - OrderBy = "orderBy" - Conditions = "conditions" - Reserve = "reserve" -) - -func ParsePaging(paging string) (limit, offset int) { - limit = intsets.MaxInt - offset = 0 - if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(paging); len(groups) == 3 { - limit, _ = strconv.Atoi(groups[1]) - page, _ := strconv.Atoi(groups[2]) - if page < 0 { - page = 1 - } - offset = (page - 1) * limit - } - return -} - -func ParseReserve(reserve string) bool { - b, err := strconv.ParseBool(reserve) - if err != nil { - return false - } - return b -} diff --git a/pkg/params/params.go b/pkg/params/params.go new file mode 100644 index 000000000..80854989f --- /dev/null +++ b/pkg/params/params.go @@ -0,0 +1,89 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package params + +import ( + "fmt" + "github.com/emicklei/go-restful" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/container/intsets" +) + +const ( + PagingParam = "paging" + OrderByParam = "orderBy" + ConditionsParam = "conditions" + ReverseParam = "reverse" +) + +func ParsePaging(req *restful.Request) (limit, offset int) { + paging := req.QueryParameter(PagingParam) + limit = intsets.MaxInt + offset = 0 + if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(paging); len(groups) == 3 { + limit, _ = strconv.Atoi(groups[1]) + page, _ := strconv.Atoi(groups[2]) + if page < 0 { + page = 1 + } + offset = (page - 1) * limit + } + return +} + +func ParseConditions(req *restful.Request) (*Conditions, error) { + conditionsStr := req.QueryParameter(ConditionsParam) + conditions := &Conditions{Match: make(map[string]string, 0), Fuzzy: make(map[string]string, 0)} + + if conditionsStr == "" { + return conditions, nil + } + + for _, item := range strings.Split(conditionsStr, ",") { + if strings.Count(item, "=") > 1 || strings.Count(item, "~") > 1 { + return nil, fmt.Errorf("invalid conditions") + } + if groups := regexp.MustCompile(`(\S+)([=~])(\S+)`).FindStringSubmatch(item); len(groups) == 4 { + if groups[2] == "=" { + conditions.Match[groups[1]] = groups[3] + } else { + conditions.Fuzzy[groups[1]] = groups[3] + } + } else { + return nil, fmt.Errorf("invalid conditions") + } + } + return conditions, nil +} + +func ParseReverse(req *restful.Request) bool { + reverse := req.QueryParameter(ReverseParam) + b, err := strconv.ParseBool(reverse) + if err != nil { + return false + } + return b +} + +type Conditions struct { + Match map[string]string + Fuzzy map[string]string +} diff --git a/pkg/utils/iputils.go b/pkg/utils/iputils.go new file mode 100644 index 000000000..207c761ee --- /dev/null +++ b/pkg/utils/iputils.go @@ -0,0 +1,48 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package utils + +import ( + "net" + "net/http" +) + +const ( + XForwardedFor = "X-Forwarded-For" + XRealIP = "X-Real-IP" + XClientIP = "x-client-ip" +) + +func RemoteIp(req *http.Request) string { + remoteAddr := req.RemoteAddr + if ip := req.Header.Get(XClientIP); ip != "" { + remoteAddr = ip + } else if ip := req.Header.Get(XRealIP); ip != "" { + remoteAddr = ip + } else if ip = req.Header.Get(XForwardedFor); ip != "" { + remoteAddr = ip + } else { + remoteAddr, _, _ = net.SplitHostPort(remoteAddr) + } + + if remoteAddr == "::1" { + remoteAddr = "127.0.0.1" + } + + return remoteAddr +} diff --git a/pkg/utils/jwt/jwt.go b/pkg/utils/jwt/jwt.go new file mode 100644 index 000000000..be7bc9820 --- /dev/null +++ b/pkg/utils/jwt/jwt.go @@ -0,0 +1,59 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package jwt + +import ( + "fmt" + "os" + + "github.com/dgrijalva/jwt-go" +) + +const secretEnv = "JWT_SECRET" + +var Secret []byte + +func init() { + if env := os.Getenv(secretEnv); env != "" { + Secret = []byte(env) + } else { + fmt.Printf("Environment variable %s not set\n", secretEnv) + } +} +func provideKey(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); ok { + return Secret, nil + } else { + return nil, fmt.Errorf("expect token signed with HMAC but got %v", token.Header["alg"]) + } +} + +func ValidateToken(uToken string) (*jwt.Token, error) { + + if len(uToken) == 0 { + return nil, fmt.Errorf("token length is zero") + } + + token, err := jwt.Parse(uToken, provideKey) + + if err != nil { + return nil, err + } + + return token, nil +} diff --git a/pkg/models/hpa/hpa.go b/pkg/utils/sliceutils.go similarity index 62% rename from pkg/models/hpa/hpa.go rename to pkg/utils/sliceutils.go index 91122954c..e93a81440 100644 --- a/pkg/models/hpa/hpa.go +++ b/pkg/utils/sliceutils.go @@ -15,15 +15,23 @@ limitations under the License. */ -package hpa +package utils -import ( - "k8s.io/api/autoscaling/v1" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "kubesphere.io/kubesphere/pkg/client" -) - -func GetHPA(namespace, hpa string) (*v1.HorizontalPodAutoscaler, error) { - return client.K8sClient().AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(hpa, metaV1.GetOptions{}) +func RemoveString(slice []string, remove func(item string) bool) []string { + for i := 0; i < len(slice); i++ { + if remove(slice[i]) { + slice = append(slice[:i], slice[i+1:]...) + i-- + } + } + return slice +} + +func HasString(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false } diff --git a/pkg/version/version.go b/pkg/version/version.go index 7f98c3fbb..27a2d5882 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -1,40 +1,19 @@ /* -Copyright 2018 The KubeSphere Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Copyright 2019 The KubeSphere Authors. - http://www.apache.org/licenses/LICENSE-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ package version - -import ( - "fmt" - "os" - - "github.com/spf13/pflag" - - "kubesphere.io/kubesphere/pkg/constants" -) - -var ( - versionFlag = pflag.Bool("version", false, "print the version of kubesphere") -) - -// PrintAndExitIfRequested will check if the -version flag was passed -// and, if so, print the version and exit. - -func PrintAndExitIfRequested() { - if *versionFlag { - fmt.Printf("Kubesphere %s\n", constants.APIVersion) - os.Exit(0) - } -} diff --git a/tools/cmd/doc-gen/main.go b/tools/cmd/doc-gen/main.go index 4ba9e3f78..65d5057c8 100644 --- a/tools/cmd/doc-gen/main.go +++ b/tools/cmd/doc-gen/main.go @@ -20,35 +20,74 @@ package main import ( "bytes" "encoding/json" + "flag" "fmt" "github.com/emicklei/go-restful" "github.com/emicklei/go-restful-openapi" + "github.com/go-openapi/spec" + "io/ioutil" "kubesphere.io/kubesphere/pkg/apiserver/runtime" "log" - + // Install apis _ "kubesphere.io/kubesphere/pkg/apis/metrics/install" _ "kubesphere.io/kubesphere/pkg/apis/operations/install" _ "kubesphere.io/kubesphere/pkg/apis/resources/install" ) +var output string + +func init() { + flag.StringVar(&output, "output", "./api.json", "--output=./api.json") +} + func main() { + flag.Parse() generateSwaggerJson() } func generateSwaggerJson() { - config := restfulspec.Config{ - WebServices: restful.RegisteredWebServices(), - } - - swagger := restfulspec.BuildSwagger(config) - container := runtime.Container apiTree(container) + config := restfulspec.Config{ + WebServices: container.RegisteredWebServices(), + PostBuildSwaggerObjectHandler: enrichSwaggerObject} + + swagger := restfulspec.BuildSwagger(config) + data, _ := json.Marshal(swagger) - log.Println(string(data)) + err := ioutil.WriteFile(output, data, 420) + if err != nil { + log.Fatal(err) + } + log.Printf("successfully written to %s", output) +} + +func enrichSwaggerObject(swo *spec.Swagger) { + swo.Info = &spec.Info{ + InfoProps: spec.InfoProps{ + Title: "KubeSphere", + Description: "KubeSphere OpenAPI", + Contact: &spec.ContactInfo{ + Name: "kubesphere", + Email: "kubesphere@yunify.com", + URL: "kubesphere.io", + }, + License: &spec.License{ + Name: "Apache", + URL: "http://www.apache.org/licenses/", + }, + Version: "2.0.0", + }, + } + + // setup security definitions + swo.SecurityDefinitions = map[string]*spec.SecurityScheme{ + "jwt": spec.APIKeyAuth("Authorization", "header"), + } + swo.Security = []map[string][]string{{"jwt": []string{}}} } func apiTree(container *restful.Container) { diff --git a/tools/cmd/resources-gen/main.go b/tools/cmd/resources-gen/main.go deleted file mode 100644 index 861ea2f0e..000000000 --- a/tools/cmd/resources-gen/main.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - "text/template" -) - -func main() { - log.SetFlags(0) - log.SetPrefix("resource-gen: ") - flag.Parse() - - fileSet := token.NewFileSet() - target := os.Getenv("GOFILE") - log.Println("start parse", target) - f, err := parser.ParseFile(fileSet, target, nil, 0) - if err != nil { - log.Fatal(err) - } - imports := make(map[string]string, 0) - lister := make(map[string][2]string, 0) - - for _, i := range f.Imports { - if i.Name != nil { - imports[i.Name.Name] = i.Path.Value - } else { - j := strings.LastIndex(i.Path.Value, "/") - if j > 0 { - imports[i.Path.Value[j+1:]] = i.Path.Value - } else { - imports[i.Path.Value] = i.Path.Value - } - } - } - for _, v := range f.Scope.Objects { - - if v.Kind != ast.Var || !strings.HasSuffix(v.Name, "Lister") { - continue - } - - if d, ok := v.Decl.(*ast.ValueSpec); ok { - if t, ok := d.Type.(*ast.SelectorExpr); ok { - if strings.HasSuffix(t.Sel.Name, "Lister") { - lister[t.Sel.Name] = [2]string{t.X.(*ast.Ident).Name, v.Name} - } - } - } - } - - src := genString(imports, lister) - - baseName := fmt.Sprintf("getter.go") - outputName := filepath.Join(".", strings.ToLower(baseName)) - err = ioutil.WriteFile(outputName, src, 0644) - - if err != nil { - log.Fatalln(err) - } -} - -func genString(imports map[string]string, lister map[string][2]string) []byte { - const strTmp = ` - package {{.pkg}} - import ( - {{range $index,$str :=.imports}} - {{$str}} - {{end}} - ) - - {{range $l,$m :=.lister}} - func Get{{$l}}() {{index $m 0}}.{{$l}} { - return {{index $m 1}} - } - {{end}} - ` - pkgName := os.Getenv("GOPACKAGE") - - i := make([]string, 0) - for _, v := range lister { - if imports[v[0]] == "" { - log.Panicln("import xxxxxx") - } else { - str := fmt.Sprintln(v[0], imports[v[0]]) - if !hasString(i, str) { - i = append(i, fmt.Sprintln(v[0], imports[v[0]])) - } - } - } - - data := map[string]interface{}{ - "pkg": pkgName, - "imports": i, - "lister": lister, - } - //利用模板库,生成代码文件 - t, err := template.New("").Parse(strTmp) - if err != nil { - log.Fatal(err) - } - buff := bytes.NewBufferString("") - err = t.Execute(buff, data) - if err != nil { - log.Fatal(err) - } - //进行格式化 - src, err := format.Source(buff.Bytes()) - if err != nil { - log.Fatal(err) - } - return src -} - -func hasString(slice []string, str string) bool { - for _, s := range slice { - if s == str { - return true - } - } - return false -} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go index b37e930d6..4051c1b33 100644 --- a/vendor/github.com/Microsoft/go-winio/ea.go +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -1,137 +1,137 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "errors" -) - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return -} - -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return -} - -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index b1d60abb8..ada2fbab6 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -20,7 +20,8 @@ const ( // FileBasicInfo contains file access time and file attributes information. type FileBasicInfo struct { CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime - FileAttributes uintptr // includes padding + FileAttributes uint32 + pad uint32 // padding } // GetFileBasicInfo retrieves times and attributes for a file. diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index 82cbe7af4..d99eedb64 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -15,7 +15,6 @@ import ( //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe //sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW //sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW -//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc @@ -121,6 +120,11 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil } return n, err } @@ -134,12 +138,14 @@ func (s pipeAddress) String() string { } // DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then the timeout -// is the default timeout established by the pipe server. +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 5 seconds. (We do not use WaitNamedPipe.) func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { var absTimeout time.Time if timeout != nil { absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(time.Second * 2) } var err error var h syscall.Handle @@ -148,22 +154,13 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { if err != cERROR_PIPE_BUSY { break } - now := time.Now() - var ms uint32 - if absTimeout.IsZero() { - ms = cNMPWAIT_USE_DEFAULT_WAIT - } else if now.After(absTimeout) { - ms = cNMPWAIT_NOWAIT - } else { - ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) - } - err = waitNamedPipe(path, ms) - if err != nil { - if err == cERROR_SEM_TIMEOUT { - return nil, ErrTimeout - } - break + if time.Now().After(absTimeout) { + return nil, ErrTimeout } + + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(time.Millisecond * 10) } if err != nil { return nil, &os.PathError{Op: "open", Path: path, Err: err} @@ -175,16 +172,6 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { return nil, err } - var state uint32 - err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) - if err != nil { - return nil, err - } - - if state&cPIPE_READMODE_MESSAGE != 0 { - return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} - } - f, err := makeWin32File(h) if err != nil { syscall.Close(h) @@ -354,13 +341,23 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { if err != nil { return nil, err } - // Immediately open and then close a client handle so that the named pipe is - // created but not currently accepting connections. + // Create a client handle and connect it. This results in the pipe + // instance always existing, so that clients see ERROR_PIPE_BUSY + // rather than ERROR_FILE_NOT_FOUND. This ties the first instance + // up so that no other instances can be used. This would have been + // cleaner if the Win32 API matched CreateFile with ConnectNamedPipe + // instead of CreateNamedPipe. (Apparently created named pipes are + // considered to be in listening state regardless of whether any + // active calls to ConnectNamedPipe are outstanding.) h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) if err != nil { syscall.Close(h) return nil, err } + // Close the client handle. The server side of the instance will + // still be busy, leading to ERROR_PIPE_BUSY instead of + // ERROR_NOT_FOUND, as long as we don't close the server handle, + // or disconnect the client with DisconnectNamedPipe. syscall.Close(h2) l := &win32PipeListener{ firstHandle: h, diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml index facfc91c6..cf31e6af6 100644 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ b/vendor/github.com/PuerkitoBio/purell/.travis.yml @@ -1,7 +1,12 @@ language: go go: - - 1.4 - - 1.5 - - 1.6 + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - "1.10.x" + - "1.11.x" - tip diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md index 09e8a32cb..07de0c498 100644 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -4,7 +4,7 @@ Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. -[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell) +[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) ## Install @@ -12,6 +12,7 @@ Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. ## Changelog +* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). * **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). * **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). * **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go index 645e1b76f..6d0fc190a 100644 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -299,7 +299,7 @@ func sortQuery(u *url.URL) { if len(q) > 0 { arKeys := make([]string, len(q)) i := 0 - for k, _ := range q { + for k := range q { arKeys[i] = k i++ } diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore index 66be63a00..6b7d7d1e8 100644 --- a/vendor/github.com/Sirupsen/logrus/.gitignore +++ b/vendor/github.com/Sirupsen/logrus/.gitignore @@ -1 +1,2 @@ logrus +vendor diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml index a23296a53..a8f154515 100644 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -1,15 +1,52 @@ language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - tip +go_import_path: github.com/sirupsen/logrus env: - GOMAXPROCS=4 GORACE=halt_on_error=1 -install: - - go get github.com/stretchr/testify/assert - - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2 - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows -script: - - go test -race -v ./... +matrix: + include: + - go: 1.10.x + install: + - go get github.com/stretchr/testify/assert + - go get golang.org/x/crypto/ssh/terminal + - go get golang.org/x/sys/unix + - go get golang.org/x/sys/windows + script: + - go test -race -v ./... + - go: 1.11.x + env: GO111MODULE=on + install: + - go mod download + script: + - go test -race -v ./... + - go: 1.11.x + env: GO111MODULE=off + install: + - go get github.com/stretchr/testify/assert + - go get golang.org/x/crypto/ssh/terminal + - go get golang.org/x/sys/unix + - go get golang.org/x/sys/windows + script: + - go test -race -v ./... + - go: 1.10.x + install: + - go get github.com/stretchr/testify/assert + - go get golang.org/x/crypto/ssh/terminal + - go get golang.org/x/sys/unix + - go get golang.org/x/sys/windows + script: + - go test -race -v -tags appengine ./... + - go: 1.11.x + env: GO111MODULE=on + install: + - go mod download + script: + - go test -race -v -tags appengine ./... + - go: 1.11.x + env: GO111MODULE=off + install: + - go get github.com/stretchr/testify/assert + - go get golang.org/x/crypto/ssh/terminal + - go get golang.org/x/sys/unix + - go get golang.org/x/sys/windows + script: + - go test -race -v -tags appengine ./... diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md index 1bd1deb29..cb85d9f9f 100644 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,45 @@ +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + # 1.0.5 * Fix hooks race (#707) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md index f77819b16..398731055 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -56,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 ``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + #### Case-sensitivity @@ -241,66 +272,15 @@ func init() { ``` Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). -| Hook | Description | -| ----- | ----------- | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [Application Insights](https://github.com/jjcollinge/logrus-appinsights) | Hook for logging to [Application Insights](https://azure.microsoft.com/en-us/services/application-insights/) -| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage| -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka | -| [Kafka REST Proxy](https://github.com/Nordstrom/logrus-kafka-rest-proxy) | Hook for logging to [Kafka REST Proxy](https://docs.confluent.io/current/kafka-rest/docs) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) | -| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | -| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics | -| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | -| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) | -| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | -| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | +A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + #### Level logging -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go +log.Trace("Something very low level.") log.Debug("Useful debugging information.") log.Info("Something noteworthy happened!") log.Warn("You should probably take a look at this.") @@ -372,6 +352,8 @@ The built-in logging formatters are: field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true`. For Windows, see [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). @@ -379,6 +361,7 @@ The built-in logging formatters are: Third party logging formatters: * [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. @@ -495,7 +478,7 @@ logrus.RegisterExitHandler(handler) #### Thread safety -By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. Situation when locking is not needed includes: diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go index 778f4c9f0..df6d188de 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -4,11 +4,30 @@ import ( "bytes" "fmt" "os" + "reflect" + "runtime" + "strings" "sync" "time" ) -var bufferPool *sync.Pool +var ( + bufferPool *sync.Pool + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) func init() { bufferPool = &sync.Pool{ @@ -16,15 +35,18 @@ func init() { return new(bytes.Buffer) }, } + + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 } // Defines the key when adding errors using WithError. var ErrorKey = "error" // An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. type Entry struct { Logger *Logger @@ -34,22 +56,28 @@ type Entry struct { // Time at which the log entry was created Time time.Time - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level - // Message passed to Debug, Info, Warn, Error, Fatal or Panic + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic Message string - // When formatter is called in entry.log(), an Buffer may be set to entry + // When formatter is called in entry.log(), a Buffer may be set to entry Buffer *bytes.Buffer + + // err may contain a field formatting error + err string } func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), } } @@ -80,19 +108,106 @@ func (entry *Entry) WithFields(fields Fields) *Entry { for k, v := range entry.Data { data[k] = v } + fieldErr := entry.err for k, v := range fields { - data[k] = v + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch t.Kind() { + case reflect.Func: + isErrField = true + case reflect.Ptr: + isErrField = t.Elem().Kind() == reflect.Func + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } } - return &Entry{Logger: entry.Logger, Data: data} + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name()) + + // now that we have the cache, we can skip a minimum count of known-logrus functions + // XXX this is dubious, the number of frames may vary store an entry in a logger interface + minimumCallerDepth = knownLogrusFrames + }) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil } // This function is not declared with a pointer value because otherwise // race conditions will occur when using multiple goroutines func (entry Entry) log(level Level, msg string) { var buffer *bytes.Buffer - entry.Time = time.Now() + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + entry.Level = level entry.Message = msg + if entry.Logger.ReportCaller { + entry.Caller = getCaller() + } entry.fireHooks() @@ -113,21 +228,19 @@ func (entry Entry) log(level Level, msg string) { } } -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) fireHooks() { +func (entry *Entry) fireHooks() { entry.Logger.mu.Lock() defer entry.Logger.mu.Unlock() - err := entry.Logger.Hooks.Fire(entry.Level, &entry) + err := entry.Logger.Hooks.Fire(entry.Level, entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } } func (entry *Entry) write() { - serialized, err := entry.Logger.Formatter.Format(entry) entry.Logger.mu.Lock() defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) } else { @@ -138,26 +251,30 @@ func (entry *Entry) write() { } } -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) } } +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + func (entry *Entry) Print(args ...interface{}) { entry.Info(args...) } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } + entry.Log(InfoLevel, args...) } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } + entry.Log(WarnLevel, args...) } func (entry *Entry) Warning(args ...interface{}) { @@ -165,37 +282,35 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } + entry.Log(ErrorLevel, args...) } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - Exit(1) + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } + entry.Log(PanicLevel, args...) panic(fmt.Sprint(args...)) } // Entry Printf family functions +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + entry.Log(level, fmt.Sprintf(format, args...)) +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } + entry.Logf(DebugLevel, format, args...) } func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } + entry.Logf(InfoLevel, format, args...) } func (entry *Entry) Printf(format string, args ...interface{}) { @@ -203,9 +318,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } + entry.Logf(WarnLevel, format, args...) } func (entry *Entry) Warningf(format string, args ...interface{}) { @@ -213,36 +326,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } + entry.Logf(ErrorLevel, format, args...) } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - Exit(1) + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } + entry.Logf(PanicLevel, format, args...) } // Entry Println family functions -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) } } +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } + entry.Logln(InfoLevel, args...) } func (entry *Entry) Println(args ...interface{}) { @@ -250,9 +363,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } + entry.Logln(WarnLevel, args...) } func (entry *Entry) Warningln(args ...interface{}) { @@ -260,22 +371,16 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } + entry.Logln(ErrorLevel, args...) } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - Exit(1) + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } + entry.Logln(PanicLevel, args...) } // Sprintlnn => Sprint no newline. This is to get the behavior of how diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go index 013183eda..7342613c3 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -2,6 +2,7 @@ package logrus import ( "io" + "time" ) var ( @@ -15,37 +16,38 @@ func StandardLogger() *Logger { // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out + std.SetOutput(out) } // SetFormatter sets the standard logger formatter. func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) } // SetLevel sets the standard logger level. func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.level() + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) } // AddHook adds a hook to the standard logger hooks. func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) + std.AddHook(hook) } // WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. @@ -72,6 +74,20 @@ func WithFields(fields Fields) *Entry { return std.WithFields(fields) } +// WithTime creats an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) @@ -107,11 +123,16 @@ func Panic(args ...interface{}) { std.Panic(args...) } -// Fatal logs a message at level Fatal on the standard logger. +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatal(args ...interface{}) { std.Fatal(args...) } +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + // Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) @@ -147,11 +168,16 @@ func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } -// Fatalf logs a message at level Fatal on the standard logger. +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + // Debugln logs a message at level Debug on the standard logger. func Debugln(args ...interface{}) { std.Debugln(args...) @@ -187,7 +213,7 @@ func Panicln(args ...interface{}) { std.Panicln(args...) } -// Fatalln logs a message at level Fatal on the standard logger. +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalln(args ...interface{}) { std.Fatalln(args...) } diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go index b183ff5b1..408883773 100644 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -2,7 +2,16 @@ package logrus import "time" -const defaultTimestampFormat = time.RFC3339 +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: @@ -18,7 +27,7 @@ type Formatter interface { Format(*Entry) ([]byte, error) } -// This is to not silently overwrite `time`, `msg` and `level` fields when +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when // dumping it. If this code wasn't there doing: // // logrus.WithField("level", 1).Info("hello") @@ -30,16 +39,40 @@ type Formatter interface { // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - if t, ok := data["time"]; ok { - data["fields.time"] = t +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) } - if m, ok := data["msg"]; ok { - data["fields.msg"] = m + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) } - if l, ok := data["level"]; ok { - data["fields.level"] = l + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } } } diff --git a/vendor/github.com/Sirupsen/logrus/go.mod b/vendor/github.com/Sirupsen/logrus/go.mod new file mode 100644 index 000000000..94574cc63 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/go.mod @@ -0,0 +1,11 @@ +module github.com/sirupsen/logrus + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.1 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 +) diff --git a/vendor/github.com/Sirupsen/logrus/go.sum b/vendor/github.com/Sirupsen/logrus/go.sum new file mode 100644 index 000000000..133d34ae1 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/go.sum @@ -0,0 +1,15 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go index fb01c1b10..260575359 100644 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -1,6 +1,7 @@ package logrus import ( + "bytes" "encoding/json" "fmt" ) @@ -10,13 +11,6 @@ type fieldKey string // FieldMap allows customization of the key names for default fields. type FieldMap map[fieldKey]string -// Default key names for the default fields -const ( - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" -) - func (f FieldMap) resolve(key fieldKey) string { if k, ok := f[key]; ok { return k @@ -33,21 +27,28 @@ type JSONFormatter struct { // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + // FieldMap allows users to customize the names of keys for default fields. // As an example: // formatter := &JSONFormatter{ // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", + // FieldKeyTime: "@timestamp", // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", // }, // } FieldMap FieldMap + + // PrettyPrint will indent all json logs + PrettyPrint bool } // Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) + data := make(Fields, len(entry.Data)+4) for k, v := range entry.Data { switch v := v.(type) { case error: @@ -58,22 +59,47 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data[k] = v } } - prefixFieldClashes(data) + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } if !f.DisableTimestamp { data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) } data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function + data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } - serialized, err := json.Marshal(data) - if err != nil { + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) } - return append(serialized, '\n'), nil + + return b.Bytes(), nil } diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go index fdaf8a653..9bf64e22a 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -5,12 +5,13 @@ import ( "os" "sync" "sync/atomic" + "time" ) type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventorous, such as logging to Kafka. + // something more adventurous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging // levels and log entries. For example, to send errors to an error tracking @@ -23,6 +24,10 @@ type Logger struct { // own that implements the `Formatter` interface, see the `README` or included // formatters for examples. Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // logged. @@ -31,8 +36,12 @@ type Logger struct { mu MutexWrap // Reusable empty entry entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc } +type exitFunc func(int) + type MutexWrap struct { lock sync.Mutex disabled bool @@ -68,10 +77,12 @@ func (mw *MutexWrap) Disable() { // It's recommended to make this a global instance called `log`. func New() *Logger { return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, } } @@ -84,11 +95,12 @@ func (logger *Logger) newEntry() *Entry { } func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} logger.entryPool.Put(entry) } // Adds a field to the log entry, note that it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() @@ -112,20 +124,31 @@ func (logger *Logger) WithError(err error) *Entry { return entry.WithError(err) } -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.level() >= DebugLevel { +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Debugf(format, args...) + entry.Logf(level, format, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Infof(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(InfoLevel, format, args...) } func (logger *Logger) Printf(format string, args ...interface{}) { @@ -135,60 +158,44 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(WarnLevel, format, args...) } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Warnf(format, args...) } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Errorf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(ErrorLevel, format, args...) } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatalf(format, args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.level() >= PanicLevel { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panicf(format, args...) + entry.Log(level, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + func (logger *Logger) Debug(args ...interface{}) { - if logger.level() >= DebugLevel { - entry := logger.newEntry() - entry.Debug(args...) - logger.releaseEntry(entry) - } + logger.Log(DebugLevel, args...) } func (logger *Logger) Info(args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Info(args...) - logger.releaseEntry(entry) - } + logger.Log(InfoLevel, args...) } func (logger *Logger) Print(args ...interface{}) { @@ -198,60 +205,44 @@ func (logger *Logger) Print(args ...interface{}) { } func (logger *Logger) Warn(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Log(WarnLevel, args...) } func (logger *Logger) Warning(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Warn(args...) } func (logger *Logger) Error(args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Error(args...) - logger.releaseEntry(entry) - } + logger.Log(ErrorLevel, args...) } func (logger *Logger) Fatal(args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatal(args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Log(FatalLevel, args...) + logger.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { - if logger.level() >= PanicLevel { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panic(args...) + entry.Logln(level, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + func (logger *Logger) Debugln(args ...interface{}) { - if logger.level() >= DebugLevel { - entry := logger.newEntry() - entry.Debugln(args...) - logger.releaseEntry(entry) - } + logger.Logln(DebugLevel, args...) } func (logger *Logger) Infoln(args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Infoln(args...) - logger.releaseEntry(entry) - } + logger.Logln(InfoLevel, args...) } func (logger *Logger) Println(args ...interface{}) { @@ -261,44 +252,32 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Logln(WarnLevel, args...) } func (logger *Logger) Warningln(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Warn(args...) } func (logger *Logger) Errorln(args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Errorln(args...) - logger.releaseEntry(entry) - } + logger.Logln(ErrorLevel, args...) } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatalln(args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Logln(FatalLevel, args...) + logger.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { - if logger.level() >= PanicLevel { - entry := logger.newEntry() - entry.Panicln(args...) - logger.releaseEntry(entry) + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit } + logger.ExitFunc(code) } //When file is opened with appending mode, it's safe to @@ -312,12 +291,53 @@ func (logger *Logger) level() Level { return Level(atomic.LoadUint32((*uint32)(&logger.Level))) } +// SetLevel sets the logger level. func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. func (logger *Logger) AddHook(hook Hook) { logger.mu.Lock() defer logger.mu.Unlock() logger.Hooks.Add(hook) } + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go index dd3899974..c1ca88990 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -14,22 +14,11 @@ type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" } - - return "unknown" } // ParseLevel takes a string level and returns the Logrus log level constant. @@ -47,12 +36,47 @@ func ParseLevel(lvl string) (Level, error) { return InfoLevel, nil case "debug": return DebugLevel, nil + case "trace": + return TraceLevel, nil } var l Level return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = Level(l) + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid lorus level %q", level) +} + // A constant exposing all logging levels var AllLevels = []Level{ PanicLevel, @@ -61,6 +85,7 @@ var AllLevels = []Level{ WarnLevel, InfoLevel, DebugLevel, + TraceLevel, } // These are the different logging levels. You can set the logging level to log @@ -69,7 +94,7 @@ const ( // PanicLevel level, highest level of severity. Logs and then calls panic with the // message passed to Debug, Info, ... PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the // logging level is set to Panic. FatalLevel // ErrorLevel level. Logs. Used for errors that should definitely be noted. @@ -82,6 +107,8 @@ const ( InfoLevel // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel ) // Won't compile if StdLogger can't be realized by a log.Logger @@ -140,4 +167,20 @@ type FieldLogger interface { Errorln(args ...interface{}) Fatalln(args ...interface{}) Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) } diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 4880d13d2..000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine,!gopherjs - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -type Termios unix.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_aix.go b/vendor/github.com/Sirupsen/logrus/terminal_check_aix.go new file mode 100644 index 000000000..04fdb7ba3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_aix.go @@ -0,0 +1,9 @@ +// +build !appengine,!js,!windows,aix + +package logrus + +import "io" + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go index 3de08e802..2403de981 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go @@ -1,4 +1,4 @@ -// +build appengine gopherjs +// +build appengine package logrus diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_js.go b/vendor/github.com/Sirupsen/logrus/terminal_check_js.go new file mode 100644 index 000000000..0c209750a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,11 @@ +// +build js + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go index 067047a12..d46556509 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go @@ -1,4 +1,4 @@ -// +build !appengine,!gopherjs +// +build !appengine,!js,!windows,!aix package logrus diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 000000000..3b9d2864c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,20 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..3dbd23720 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,8 @@ +// +build !windows + +package logrus + +import "io" + +func initTerminal(w io.Writer) { +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..b4ef5286c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,18 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func initTerminal(w io.Writer) { + switch v := w.(type) { + case *os.File: + sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go index 61b21caea..fb21649c9 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -3,6 +3,8 @@ package logrus import ( "bytes" "fmt" + "os" + "runtime" "sort" "strings" "sync" @@ -20,6 +22,7 @@ const ( var ( baseTimestamp time.Time + emptyFieldMap FieldMap ) func init() { @@ -34,6 +37,9 @@ type TextFormatter struct { // Force disabling colors. DisableColors bool + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + // Disable timestamp logging. useful when output is redirected to logging // system that already adds timestamps. DisableTimestamp bool @@ -50,60 +56,135 @@ type TextFormatter struct { // be desired. DisableSorting bool + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool // Whether the logger's out is to a terminal isTerminal bool - sync.Once + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + terminalInitOnce sync.Once } func (f *TextFormatter) init(entry *Entry) { if entry.Logger != nil { f.isTerminal = checkIfTerminal(entry.Logger.Out) + + if f.isTerminal { + initTerminal(entry.Logger.Out) + } } } +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { + isColored = true + } else if ok && force == "0" { + isColored = false + } else if os.Getenv("CLICOLOR") == "0" { + isColored = false + } + } + + return isColored && !f.DisableColors +} + // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var b *bytes.Buffer - keys := make([]string, 0, len(entry.Data)) - for k := range entry.Data { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { keys = append(keys, k) } - if !f.DisableSorting { - sort.Strings(keys) + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + fixedKeys = append(fixedKeys, + f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile)) + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer if entry.Buffer != nil { b = entry.Buffer } else { b = &bytes.Buffer{} } - prefixFieldClashes(entry.Data) - - f.Do(func() { f.init(entry) }) - - isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + f.terminalInitOnce.Do(func() { f.init(entry) }) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } - if isColored { - f.printColored(b, entry, keys, timestampFormat) + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = entry.Caller.Function + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + default: + value = data[key] + } + f.appendKeyValue(b, key, value) } } @@ -111,10 +192,10 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { return b.Bytes(), nil } -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { var levelColor int switch entry.Level { - case DebugLevel: + case DebugLevel, TraceLevel: levelColor = gray case WarnLevel: levelColor = yellow @@ -124,17 +205,31 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelColor = blue } - levelText := strings.ToUpper(entry.Level.String())[0:4] + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation { + levelText = levelText[0:4] + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + + if entry.HasCaller() { + caller = fmt.Sprintf("%s:%d %s()", + entry.Caller.File, entry.Caller.Line, entry.Caller.Function) + } if f.DisableTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) } else if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { - v := entry.Data[k] + v := data[k] fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) f.appendValue(b, v) } diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go index 7bdebedc6..9e1f75135 100644 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { var printFunc func(args ...interface{}) switch level { + case TraceLevel: + printFunc = entry.Trace case DebugLevel: printFunc = entry.Debug case InfoLevel: diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c83641619..bc52e96f2 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a6589a..792994785 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) -var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) +type flag uintptr - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return } } - - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() - } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3d5..205c28d68 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 7c519ff47..1be8ce945 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582a7..f78d89fc1 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875bac..b04edb7d7 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore new file mode 100644 index 000000000..80bed650e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml new file mode 100644 index 000000000..1027f56cd --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.travis.yml @@ -0,0 +1,13 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 000000000..df83a9c2f --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 000000000..7fc1f793c --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 000000000..d358d881b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,100 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 000000000..637029831 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 000000000..f0228f02e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 000000000..a86dc1a3b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 000000000..f97738124 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 000000000..d19624b72 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 000000000..1c93024aa --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 000000000..addbe5d40 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 000000000..291213c46 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 000000000..f04d189d0 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 000000000..d6901d9ad --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 000000000..e4caf1ca4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 000000000..10ee9db8a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 000000000..a5ababf95 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 000000000..ed1f212b2 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 000000000..d637e0867 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS deleted file mode 100644 index 252ff8aa2..000000000 --- a/vendor/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,182 +0,0 @@ -a-palchikov -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Duke -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Anton Tiurin -Antonio Mercado -Antonio Murdaca -Anusha Ragunathan -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Bodenmiller -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Cezar Sa Espinola -Charles Smith -Chris Dillon -cuiwei13 -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -David Verhasselt -David Xia -davidli -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Edgar Lee -Eric Yang -Fabio Berchtold -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frank Chen -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hu Keping -Hua Wang -HuKeping -Ian Babrou -igayoso -Jack Griffin -James Findley -Jason Freidman -Jason Heiss -Jeff Nickoloff -Jess Frazelle -Jessie Frazelle -jhaohai -Jianqing Wang -Jihoon Chung -Joao Fernandes -John Mulhausen -John Starks -Jon Johnson -Jon Poler -Jonathan Boulle -Jordan Liggitt -Josh Chorlton -Josh Hawn -Julien Fernandez -Ke Xu -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Li Yi -Liu Hua -liuchang0812 -Lloyd Ramey -Louis Kottmann -Luke Carpenter -Marcus Martins -Mary Anthony -Matt Bentley -Matt Duch -Matt Moore -Matt Robenolt -Matthew Green -Michael Prokop -Michal Minar -Michal Minář -Mike Brown -Miquel Sabaté -Misty Stanley-Jones -Misty Stanley-Jones -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Noah Treuhaft -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Pierre-Yves Ritschard -Qiao Anran -Randy Barlow -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Sebastien Coavoux -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -spacexnice -Spencer Rinehart -Stan Hu -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Victor Vieux -Victoria Bialas -Vincent Batts -Vincent Demeester -Vincent Giersch -W. Trevor King -weiyuan.yl -xg.song -xiekeyang -Yann ROBERT -yaoyao.xyy -yuexiao-wang -yuzou -zhouhaibing089 -姜继忠 diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go index 2d71fc5e9..b3dfb7a6d 100644 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) { return named, nil } +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 2f66cca87..8c0c23b2f 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) { var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { + if len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index a8b5dbb6f..1d91c7b6c 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,5 +1,51 @@ // +build !windows +/* +Package sockets is a simple unix domain socket wrapper. + +Usage + +For example: + + import( + "fmt" + "net" + "os" + "github.com/docker/go-connections/sockets" + ) + + func main() { + l, err := sockets.NewUnixSocketWithOpts("/path/to/sockets", + sockets.WithChown(0,0),sockets.WithChmod(0660)) + if err != nil { + panic(err) + } + echoStr := "hello" + + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + conn.Write([]byte(echoStr)) + conn.Close() + } + }() + + conn, err := net.Dial("unix", path) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5) + if _, err := conn.Read(buf); err != nil { + panic(err) + } else if string(buf) != echoStr { + panic(fmt.Errorf("Msg may lost")) + } + } +*/ package sockets import ( @@ -8,8 +54,31 @@ import ( "syscall" ) -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { +// SockOption sets up socket file's creating option +type SockOption func(string) error + +// WithChown modifies the socket file's uid and gid +func WithChown(uid, gid int) SockOption { + return func(path string) error { + if err := os.Chown(path, uid, gid); err != nil { + return err + } + return nil + } +} + +// WithChmod modifies socket file's access mode +func WithChmod(mask os.FileMode) SockOption { + return func(path string) error { + if err := os.Chmod(path, mask); err != nil { + return err + } + return nil + } +} + +// NewUnixSocketWithOpts creates a unix socket with the specified options +func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } @@ -20,13 +89,18 @@ func NewUnixSocket(path string, gid int) (net.Listener, error) { if err != nil { return nil, err } - if err := os.Chown(path, 0, gid); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err + + for _, op := range opts { + if err := op(path); err != nil { + l.Close() + return nil, err + } } + return l, nil } + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0660)) +} diff --git a/vendor/github.com/emicklei/go-restful-openapi/CHANGES.md b/vendor/github.com/emicklei/go-restful-openapi/CHANGES.md index e41124a78..c6e5d7b27 100644 --- a/vendor/github.com/emicklei/go-restful-openapi/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful-openapi/CHANGES.md @@ -1,5 +1,11 @@ # changes to the go-restful-openapi package +## v1.0.0 + +- Fix for #19 MapModelTypeNameFunc has incomplete behavior +- prevent array param.Type be overwritten in the else case below (#47) +- Merge paths with existing paths from other webServices (#48) + ## v0.11.0 - Register pointer to array/slice of primitives as such rather than as reference to the primitive type definition. (#46) diff --git a/vendor/github.com/emicklei/go-restful-openapi/README.md b/vendor/github.com/emicklei/go-restful-openapi/README.md index 2e3a73453..cc1283c44 100644 --- a/vendor/github.com/emicklei/go-restful-openapi/README.md +++ b/vendor/github.com/emicklei/go-restful-openapi/README.md @@ -23,4 +23,4 @@ See TestThatExtraTagsAreReadIntoModel for examples. - [go-restful](https://github.com/emicklei/go-restful) - [go-openapi](https://github.com/go-openapi/spec) -© 2017, ernestmicklei.com. MIT License. Contributions welcome. +© 2018, ernestmicklei.com. MIT License. Contributions welcome. diff --git a/vendor/github.com/emicklei/go-restful-openapi/build_definitions.go b/vendor/github.com/emicklei/go-restful-openapi/build_definitions.go old mode 100644 new mode 100755 diff --git a/vendor/github.com/emicklei/go-restful-openapi/build_path.go b/vendor/github.com/emicklei/go-restful-openapi/build_path.go old mode 100644 new mode 100755 index 6d0fa8ccc..634e4b75d --- a/vendor/github.com/emicklei/go-restful-openapi/build_path.go +++ b/vendor/github.com/emicklei/go-restful-openapi/build_path.go @@ -129,14 +129,6 @@ func buildParameter(r restful.Route, restfulParam *restful.Parameter, pattern st p := spec.Parameter{} param := restfulParam.Data() p.In = asParamType(param.Kind) - if param.AllowMultiple { - p.Type = "array" - p.Items = spec.NewItems() - p.Items.Type = param.DataType - p.CollectionFormat = param.CollectionFormat - } else { - p.Type = param.DataType - } p.Description = param.Description p.Name = param.Name p.Required = param.Required @@ -149,7 +141,7 @@ func buildParameter(r restful.Route, restfulParam *restful.Parameter, pattern st p.Schema = new(spec.Schema) p.SimpleSchema = spec.SimpleSchema{} if st.Kind() == reflect.Array || st.Kind() == reflect.Slice { - dataTypeName := definitionBuilder{}.keyFrom(st.Elem()) + dataTypeName := keyFrom(st.Elem(), cfg) p.Schema.Type = []string{"array"} p.Schema.Items = &spec.SchemaOrArray{ Schema: &spec.Schema{}, @@ -162,11 +154,19 @@ func buildParameter(r restful.Route, restfulParam *restful.Parameter, pattern st p.Schema.Items.Schema.Ref = spec.MustCreateRef("#/definitions/" + dataTypeName) } } else { - p.Schema.Ref = spec.MustCreateRef("#/definitions/" + param.DataType) + dataTypeName := keyFrom(st, cfg) + p.Schema.Ref = spec.MustCreateRef("#/definitions/" + dataTypeName) } } else { - p.Type = param.DataType + if param.AllowMultiple { + p.Type = "array" + p.Items = spec.NewItems() + p.Items.Type = param.DataType + p.CollectionFormat = param.CollectionFormat + } else { + p.Type = param.DataType + } p.Default = stringAutoType(param.DefaultValue) p.Format = param.DataFormat } @@ -185,7 +185,7 @@ func buildResponse(e restful.ResponseError, cfg Config) (r spec.Response) { } r.Schema = new(spec.Schema) if st.Kind() == reflect.Array || st.Kind() == reflect.Slice { - modelName := definitionBuilder{}.keyFrom(st.Elem()) + modelName := keyFrom(st.Elem(), cfg) r.Schema.Type = []string{"array"} r.Schema.Items = &spec.SchemaOrArray{ Schema: &spec.Schema{}, @@ -198,13 +198,13 @@ func buildResponse(e restful.ResponseError, cfg Config) (r spec.Response) { r.Schema.Items.Schema.Ref = spec.MustCreateRef("#/definitions/" + modelName) } } else { - modelName := definitionBuilder{}.keyFrom(st) + modelName := keyFrom(st, cfg) if isPrimitiveType(modelName) { // If the response is a primitive type, then don't reference any definitions. // Instead, set the schema's "type" to the model name. r.Schema.AddType(modelName, "") } else { - modelName := definitionBuilder{}.keyFrom(st) + modelName := keyFrom(st, cfg) r.Schema.Ref = spec.MustCreateRef("#/definitions/" + modelName) } } diff --git a/vendor/github.com/emicklei/go-restful-openapi/definition_builder.go b/vendor/github.com/emicklei/go-restful-openapi/definition_builder.go old mode 100644 new mode 100755 index e4a4d26d2..c89c51b7d --- a/vendor/github.com/emicklei/go-restful-openapi/definition_builder.go +++ b/vendor/github.com/emicklei/go-restful-openapi/definition_builder.go @@ -40,7 +40,7 @@ func (b definitionBuilder) addModel(st reflect.Type, nameOverride string) *spec. st = st.Elem() } - modelName := b.keyFrom(st) + modelName := keyFrom(st, b.Config) if nameOverride != "" { modelName = nameOverride } @@ -165,7 +165,7 @@ func (b definitionBuilder) buildProperty(field reflect.StructField, model *spec. prop.Type = []string{pType} } if prop.Format == "" { - prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType)) + prop.Format = b.jsonSchemaFormat(keyFrom(fieldType, b.Config)) } return jsonName, modelDescription, prop } @@ -200,17 +200,18 @@ func (b definitionBuilder) buildProperty(field reflect.StructField, model *spec. return jsonName, modelDescription, prop } - fieldTypeName := b.keyFrom(fieldType) + fieldTypeName := keyFrom(fieldType, b.Config) if b.isPrimitiveType(fieldTypeName) { mapped := b.jsonSchemaType(fieldTypeName) prop.Type = []string{mapped} prop.Format = b.jsonSchemaFormat(fieldTypeName) return jsonName, modelDescription, prop } - modelType := b.keyFrom(fieldType) + modelType := keyFrom(fieldType, b.Config) prop.Ref = spec.MustCreateRef("#/definitions/" + modelType) if fieldType.Name() == "" { // override type of anonymous structs + // FIXME: Still need a way to handle anonymous struct model naming. nestedTypeName := modelName + "." + jsonName prop.Ref = spec.MustCreateRef("#/definitions/" + nestedTypeName) b.addModel(fieldType, nestedTypeName) @@ -237,6 +238,7 @@ func (b definitionBuilder) buildStructTypeProperty(field reflect.StructField, js // check for anonymous if len(fieldType.Name()) == 0 { // anonymous + // FIXME: Still need a way to handle anonymous struct model naming. anonType := model.ID + "." + jsonName b.addModel(fieldType, anonType) prop.Ref = spec.MustCreateRef("#/definitions/" + anonType) @@ -247,7 +249,7 @@ func (b definitionBuilder) buildStructTypeProperty(field reflect.StructField, js // embedded struct sub := definitionBuilder{make(spec.Definitions), b.Config} sub.addModel(fieldType, "") - subKey := sub.keyFrom(fieldType) + subKey := keyFrom(fieldType, b.Config) // merge properties from sub subModel, _ := sub.Definitions[subKey] for k, v := range subModel.Properties { @@ -277,7 +279,7 @@ func (b definitionBuilder) buildStructTypeProperty(field reflect.StructField, js } // simple struct b.addModel(fieldType, "") - var pType = b.keyFrom(fieldType) + var pType = keyFrom(fieldType, b.Config) prop.Ref = spec.MustCreateRef("#/definitions/" + pType) return jsonName, prop } @@ -370,7 +372,7 @@ func (b definitionBuilder) buildPointerTypeProperty(field reflect.StructField, j } } else { // non-array, pointer type - fieldTypeName := b.keyFrom(fieldType.Elem()) + fieldTypeName := keyFrom(fieldType.Elem(), b.Config) var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path if b.isPrimitiveType(fieldTypeName) { prop.Type = []string{pType} @@ -395,13 +397,13 @@ func (b definitionBuilder) getElementTypeName(modelName, jsonName string, t refl if t.Name() == "" { return modelName + "." + jsonName } - return b.keyFrom(t) + return keyFrom(t, b.Config) } -func (b definitionBuilder) keyFrom(st reflect.Type) string { +func keyFrom(st reflect.Type, cfg Config) string { key := st.String() - if b.Config.ModelTypeNameHandler != nil { - if name, ok := b.Config.ModelTypeNameHandler(st); ok { + if cfg.ModelTypeNameHandler != nil { + if name, ok := cfg.ModelTypeNameHandler(st); ok { key = name } } diff --git a/vendor/github.com/emicklei/go-restful-openapi/spec_resource.go b/vendor/github.com/emicklei/go-restful-openapi/spec_resource.go old mode 100644 new mode 100755 index 1e62d64f5..b36274061 --- a/vendor/github.com/emicklei/go-restful-openapi/spec_resource.go +++ b/vendor/github.com/emicklei/go-restful-openapi/spec_resource.go @@ -30,6 +30,13 @@ func BuildSwagger(config Config) *spec.Swagger { for _, each := range config.WebServices { for path, item := range buildPaths(each, config).Paths { + existingPathItem, ok := paths.Paths[path] + if ok { + for _, r := range each.Routes() { + _, patterns := sanitizePath(r.Path) + item = buildPathItem(each, r, existingPathItem, patterns, config) + } + } paths.Paths[path] = item } for name, def := range buildDefinitions(each, config) { diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md index 7294637cd..5597fa1cc 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/CHANGES.md @@ -1,5 +1,17 @@ Change history of go-restful = + +v2.9.0 +- add per Route content encoding setting (overrides container setting) + +v2.8.0 +- add Request.QueryParameters() +- add json-iterator (via build tag) +- disable vgo module (until log is moved) + +v2.7.1 +- add vgo module + v2.6.1 - add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md index 65c3d4f9d..f52c25acf 100644 --- a/vendor/github.com/emicklei/go-restful/README.md +++ b/vendor/github.com/emicklei/go-restful/README.md @@ -71,6 +71,7 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers +- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` TODO: write examples of these. diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go index b4ad153e8..061a8d718 100644 --- a/vendor/github.com/emicklei/go-restful/container.go +++ b/vendor/github.com/emicklei/go-restful/container.go @@ -97,7 +97,7 @@ func (c *Container) Add(service *WebService) *Container { // cannot have duplicate root paths for _, each := range c.webServices { if each.RootPath() == service.RootPath() { - log.Printf("[restful] WebService with duplicate root path detected:['%v']", each) + log.Printf("WebService with duplicate root path detected:['%v']", each) os.Exit(1) } } @@ -139,7 +139,7 @@ func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) boo func (c *Container) Remove(ws *WebService) error { if c.ServeMux == http.DefaultServeMux { - errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) + errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) log.Print(errMsg) return errors.New(errMsg) } @@ -168,7 +168,7 @@ func (c *Container) Remove(ws *WebService) error { // This may be a security issue as it exposes sourcecode information. func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason)) + buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) for i := 2; ; i += 1 { _, file, line, ok := runtime.Caller(i) if !ok { @@ -220,20 +220,6 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R }() } - // Detect if compression is needed - // assume without compression, test for override - if c.contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("[restful] unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - } // Find best match Route ; err is non nil if no match was found var webService *WebService var route *Route @@ -245,6 +231,26 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R c.webServices, httpRequest) }() + + // Detect if compression is needed + // assume without compression, test for override + contentEncodingEnabled := c.contentEncodingEnabled + if route != nil && route.contentEncodingEnabled != nil { + contentEncodingEnabled = *route.contentEncodingEnabled + } + if contentEncodingEnabled { + doCompress, encoding := wantsCompressedResponse(httpRequest) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + } + if err != nil { // a non-200 response has already been written // run container filters anyway ; they should not touch the response... diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go index 42957055f..66dfc824f 100644 --- a/vendor/github.com/emicklei/go-restful/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go @@ -5,9 +5,7 @@ package restful // that can be found in the LICENSE file. import ( - "encoding/json" "encoding/xml" - "io" "strings" "sync" ) @@ -127,16 +125,11 @@ type entityJSONAccess struct { ContentType string } -// JSONNewDecoderFunc can be used to inject a different configration for the json Decoder instance. -var JSONNewDecoderFunc = func(r io.Reader) *json.Decoder { - decoder := json.NewDecoder(r) - decoder.UseNumber() - return decoder -} - // Read unmarshalls the value from JSON func (e entityJSONAccess) Read(req *Request, v interface{}) error { - return JSONNewDecoderFunc(req.Request.Body).Decode(v) + decoder := NewDecoder(req.Request.Body) + decoder.UseNumber() + return decoder.Decode(v) } // Write marshalls the value to JSON and set the Content-Type Header. @@ -153,7 +146,7 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er } if resp.prettyPrint { // pretty output must be created and written explicitly - output, err := json.MarshalIndent(v, " ", " ") + output, err := MarshalIndent(v, "", " ") if err != nil { return err } @@ -165,5 +158,5 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er // not-so-pretty resp.Header().Set(HEADER_ContentType, contentType) resp.WriteHeader(status) - return json.NewEncoder(resp).Encode(v) + return NewEncoder(resp).Encode(v) } diff --git a/vendor/github.com/emicklei/go-restful/go.mod b/vendor/github.com/emicklei/go-restful/go.mod deleted file mode 100644 index 5fa37e774..000000000 --- a/vendor/github.com/emicklei/go-restful/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/emicklei/go-restful/v2 diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/json.go new file mode 100644 index 000000000..871165166 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/json.go @@ -0,0 +1,11 @@ +// +build !jsoniter + +package restful + +import "encoding/json" + +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/jsoniter.go new file mode 100644 index 000000000..11b8f8ae7 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/jsoniter.go @@ -0,0 +1,12 @@ +// +build jsoniter + +package restful + +import "github.com/json-iterator/go" + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go index 4360b492e..bc303ef70 100644 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/jsr311.go @@ -99,11 +99,10 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R } return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") } - inputMediaOk := methodOk // content-type contentType := httpRequest.Header.Get(HEADER_ContentType) - inputMediaOk = []Route{} + inputMediaOk := []Route{} for _, each := range methodOk { if each.matchesContentType(contentType) { inputMediaOk = append(inputMediaOk, each) diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go index 8c23af12c..a20730feb 100644 --- a/vendor/github.com/emicklei/go-restful/request.go +++ b/vendor/github.com/emicklei/go-restful/request.go @@ -51,6 +51,11 @@ func (r *Request) QueryParameter(name string) string { return r.Request.FormValue(name) } +// QueryParameters returns the all the query parameters values by name +func (r *Request) QueryParameters(name string) []string { + return r.Request.URL.Query()[name] +} + // BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. func (r *Request) BodyParameter(name string) (string, error) { err := r.Request.ParseForm() diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go index f72bf9850..592638ab6 100644 --- a/vendor/github.com/emicklei/go-restful/route.go +++ b/vendor/github.com/emicklei/go-restful/route.go @@ -45,6 +45,9 @@ type Route struct { // marks a route as deprecated Deprecated bool + + //Overrides the container.contentEncodingEnabled + contentEncodingEnabled *bool } // Initialize for Route @@ -147,3 +150,8 @@ func tokenizePath(path string) []string { func (r Route) String() string { return r.Method + " " + r.Path } + +// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. +func (r Route) EnableContentEncoding(enabled bool) { + r.contentEncodingEnabled = &enabled +} diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go index 4ebecbd8c..a4e74d10b 100644 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/route_builder.go @@ -261,11 +261,11 @@ func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBui func (b *RouteBuilder) Build() Route { pathExpr, err := newPathExpression(b.currentPath) if err != nil { - log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err) + log.Printf("Invalid path:%s because:%v", b.currentPath, err) os.Exit(1) } if b.function == nil { - log.Printf("[restful] No function specified for route:" + b.currentPath) + log.Printf("No function specified for route:" + b.currentPath) os.Exit(1) } operationName := b.operation diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go index f7e18a585..77ba9a8cf 100644 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ b/vendor/github.com/emicklei/go-restful/web_service.go @@ -60,7 +60,7 @@ func reflectTypeName(sample interface{}) string { func (w *WebService) compilePathExpression() { compiled, err := newPathExpression(w.rootPath) if err != nil { - log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err) + log.Printf("invalid path:%s because:%v", w.rootPath, err) os.Exit(1) } w.pathExpr = compiled diff --git a/vendor/github.com/go-ldap/ldap/.gitignore b/vendor/github.com/go-ldap/ldap/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/go-ldap/ldap/.travis.yml b/vendor/github.com/go-ldap/ldap/.travis.yml new file mode 100644 index 000000000..d2160fd4a --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/.travis.yml @@ -0,0 +1,32 @@ +sudo: false +language: go +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - tip + +git: + depth: 1 + +matrix: + fast_finish: true + allow_failures: + - go: tip +go_import_path: gopkg.in/ldap.v3 +install: + - go get gopkg.in/asn1-ber.v1 + - go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover + - go get github.com/golang/lint/golint || go get golang.org/x/lint/golint || true + - go build -v ./... +script: + - make test + - make fmt + - make vet + - make lint diff --git a/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md b/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md new file mode 100644 index 000000000..a7885231c --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contribution Guidelines + +We welcome contribution and improvements. + +## Guiding Principles + +To begin with here is a draft from an email exchange: + + * take compatibility seriously (our semvers, compatibility with older go versions, etc) + * don't tag untested code for release + * beware of baking in implicit behavior based on other libraries/tools choices + * be as high-fidelity as possible in plumbing through LDAP data (don't mask errors or reduce power of someone using the library) diff --git a/vendor/github.com/go-ldap/ldap/LICENSE b/vendor/github.com/go-ldap/ldap/LICENSE new file mode 100644 index 000000000..6c0ed4b38 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-ldap Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-ldap/ldap/Makefile b/vendor/github.com/go-ldap/ldap/Makefile new file mode 100644 index 000000000..c49664722 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/Makefile @@ -0,0 +1,82 @@ +.PHONY: default install build test quicktest fmt vet lint + +# List of all release tags "supported" by our current Go version +# E.g. ":go1.1:go1.2:go1.3:go1.4:go1.5:go1.6:go1.7:go1.8:go1.9:go1.10:go1.11:go1.12:" +GO_RELEASE_TAGS := $(shell go list -f ':{{join (context.ReleaseTags) ":"}}:' runtime) + +# Only use the `-race` flag on newer versions of Go (version 1.3 and newer) +ifeq (,$(findstring :go1.3:,$(GO_RELEASE_TAGS))) + RACE_FLAG := +else + RACE_FLAG := -race -cpu 1,2,4 +endif + +# Run `go vet` on Go 1.12 and newer. For Go 1.5-1.11, use `go tool vet` +ifneq (,$(findstring :go1.12:,$(GO_RELEASE_TAGS))) + GO_VET := go vet \ + -atomic \ + -bool \ + -copylocks \ + -nilfunc \ + -printf \ + -rangeloops \ + -unreachable \ + -unsafeptr \ + -unusedresult \ + . +else ifneq (,$(findstring :go1.5:,$(GO_RELEASE_TAGS))) + GO_VET := go tool vet \ + -atomic \ + -bool \ + -copylocks \ + -nilfunc \ + -printf \ + -shadow \ + -rangeloops \ + -unreachable \ + -unsafeptr \ + -unusedresult \ + . +else + GO_VET := @echo "go vet skipped -- not supported on this version of Go" +endif + +default: fmt vet lint build quicktest + +install: + go get -t -v ./... + +build: + go build -v ./... + +test: + go test -v $(RACE_FLAG) -cover ./... + +quicktest: + go test ./... + +# Capture output and force failure when there is non-empty output +fmt: + @echo gofmt -l . + @OUTPUT=`gofmt -l . 2>&1`; \ + if [ "$$OUTPUT" ]; then \ + echo "gofmt must be run on the following files:"; \ + echo "$$OUTPUT"; \ + exit 1; \ + fi + +vet: + $(GO_VET) + +# https://github.com/golang/lint +# go get github.com/golang/lint/golint +# Capture output and force failure when there is non-empty output +# Only run on go1.5+ +lint: + @echo golint ./... + @OUTPUT=`command -v golint >/dev/null 2>&1 && golint ./... 2>&1`; \ + if [ "$$OUTPUT" ]; then \ + echo "golint errors:"; \ + echo "$$OUTPUT"; \ + exit 1; \ + fi diff --git a/vendor/github.com/go-ldap/ldap/README.md b/vendor/github.com/go-ldap/ldap/README.md new file mode 100644 index 000000000..25cf730b4 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/README.md @@ -0,0 +1,54 @@ +[![GoDoc](https://godoc.org/gopkg.in/ldap.v3?status.svg)](https://godoc.org/gopkg.in/ldap.v3) +[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap) + +# Basic LDAP v3 functionality for the GO programming language. + +## Install + +For the latest version use: + + go get gopkg.in/ldap.v3 + +Import the latest version with: + + import "gopkg.in/ldap.v3" + +## Required Libraries: + + - gopkg.in/asn1-ber.v1 + +## Features: + + - Connecting to LDAP server (non-TLS, TLS, STARTTLS) + - Binding to LDAP server + - Searching for entries + - Filter Compile / Decompile + - Paging Search Results + - Modify Requests / Responses + - Add Requests / Responses + - Delete Requests / Responses + - Modify DN Requests / Responses + +## Examples: + + - search + - modify + +## Contributing: + +Bug reports and pull requests are welcome! + +Before submitting a pull request, please make sure tests and verification scripts pass: +``` +make all +``` + +To set up a pre-push hook to run the tests and verify scripts before pushing: +``` +ln -s ../../.githooks/pre-push .git/hooks/pre-push +``` + +--- +The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/) +The design is licensed under the Creative Commons 3.0 Attributions license. +Read this article for more details: http://blog.golang.org/gopher diff --git a/vendor/github.com/go-ldap/ldap/add.go b/vendor/github.com/go-ldap/ldap/add.go new file mode 100644 index 000000000..19bce1b75 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/add.go @@ -0,0 +1,119 @@ +// +// https://tools.ietf.org/html/rfc4511 +// +// AddRequest ::= [APPLICATION 8] SEQUENCE { +// entry LDAPDN, +// attributes AttributeList } +// +// AttributeList ::= SEQUENCE OF attribute Attribute + +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// Attribute represents an LDAP attribute +type Attribute struct { + // Type is the name of the LDAP attribute + Type string + // Vals are the LDAP attribute values + Vals []string +} + +func (a *Attribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range a.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// AddRequest represents an LDAP AddRequest operation +type AddRequest struct { + // DN identifies the entry being added + DN string + // Attributes list the attributes of the new entry + Attributes []Attribute + // Controls hold optional controls to send with the request + Controls []Control +} + +func (a AddRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.DN, "DN")) + attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range a.Attributes { + attributes.AppendChild(attribute.encode()) + } + request.AppendChild(attributes) + return request +} + +// Attribute adds an attribute with the given type and values +func (a *AddRequest) Attribute(attrType string, attrVals []string) { + a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals}) +} + +// NewAddRequest returns an AddRequest for the given DN, with no attributes +func NewAddRequest(dn string, controls []Control) *AddRequest { + return &AddRequest{ + DN: dn, + Controls: controls, + } + +} + +// Add performs the given AddRequest +func (l *Conn) Add(addRequest *AddRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(addRequest.encode()) + if len(addRequest.Controls) > 0 { + packet.AppendChild(encodeControls(addRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationAddResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/bind.go b/vendor/github.com/go-ldap/ldap/bind.go new file mode 100644 index 000000000..59c3f5ef5 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/bind.go @@ -0,0 +1,135 @@ +package ldap + +import ( + "errors" + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +// SimpleBindRequest represents a username/password bind operation +type SimpleBindRequest struct { + // Username is the name of the Directory object that the client wishes to bind as + Username string + // Password is the credentials to bind with + Password string + // Controls are optional controls to send with the bind request + Controls []Control + // AllowEmptyPassword sets whether the client allows binding with an empty password + // (normally used for unauthenticated bind). + AllowEmptyPassword bool +} + +// SimpleBindResult contains the response from the server +type SimpleBindResult struct { + Controls []Control +} + +// NewSimpleBindRequest returns a bind request +func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest { + return &SimpleBindRequest{ + Username: username, + Password: password, + Controls: controls, + AllowEmptyPassword: false, + } +} + +func (bindRequest *SimpleBindRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, bindRequest.Username, "User Name")) + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, bindRequest.Password, "Password")) + + return request +} + +// SimpleBind performs the simple bind operation defined in the given request +func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) { + if simpleBindRequest.Password == "" && !simpleBindRequest.AllowEmptyPassword { + return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client")) + } + + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + encodedBindRequest := simpleBindRequest.encode() + packet.AppendChild(encodedBindRequest) + if len(simpleBindRequest.Controls) > 0 { + packet.AppendChild(encodeControls(simpleBindRequest.Controls)) + } + + if l.Debug { + ber.PrintPacket(packet) + } + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if l.Debug { + if err = addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + result := &SimpleBindResult{ + Controls: make([]Control, 0), + } + + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + decodedChild, decodeErr := DecodeControl(child) + if decodeErr != nil { + return nil, fmt.Errorf("failed to decode child control: %s", decodeErr) + } + result.Controls = append(result.Controls, decodedChild) + } + } + + err = GetLDAPError(packet) + return result, err +} + +// Bind performs a bind with the given username and password. +// +// It does not allow unauthenticated bind (i.e. empty password). Use the UnauthenticatedBind method +// for that. +func (l *Conn) Bind(username, password string) error { + req := &SimpleBindRequest{ + Username: username, + Password: password, + AllowEmptyPassword: false, + } + _, err := l.SimpleBind(req) + return err +} + +// UnauthenticatedBind performs an unauthenticated bind. +// +// A username may be provided for trace (e.g. logging) purpose only, but it is normally not +// authenticated or otherwise validated by the LDAP server. +// +// See https://tools.ietf.org/html/rfc4513#section-5.1.2 . +// See https://tools.ietf.org/html/rfc4513#section-6.3.1 . +func (l *Conn) UnauthenticatedBind(username string) error { + req := &SimpleBindRequest{ + Username: username, + Password: "", + AllowEmptyPassword: true, + } + _, err := l.SimpleBind(req) + return err +} diff --git a/vendor/github.com/go-ldap/ldap/client.go b/vendor/github.com/go-ldap/ldap/client.go new file mode 100644 index 000000000..c7f41f6f9 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/client.go @@ -0,0 +1,28 @@ +package ldap + +import ( + "crypto/tls" + "time" +) + +// Client knows how to interact with an LDAP server +type Client interface { + Start() + StartTLS(config *tls.Config) error + Close() + SetTimeout(time.Duration) + + Bind(username, password string) error + SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) + + Add(addRequest *AddRequest) error + Del(delRequest *DelRequest) error + Modify(modifyRequest *ModifyRequest) error + ModifyDN(modifyDNRequest *ModifyDNRequest) error + + Compare(dn, attribute, value string) (bool, error) + PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) + + Search(searchRequest *SearchRequest) (*SearchResult, error) + SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) +} diff --git a/vendor/github.com/go-ldap/ldap/compare.go b/vendor/github.com/go-ldap/ldap/compare.go new file mode 100644 index 000000000..5b5013cbe --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/compare.go @@ -0,0 +1,83 @@ +// File contains Compare functionality +// +// https://tools.ietf.org/html/rfc4511 +// +// CompareRequest ::= [APPLICATION 14] SEQUENCE { +// entry LDAPDN, +// ava AttributeValueAssertion } +// +// AttributeValueAssertion ::= SEQUENCE { +// attributeDesc AttributeDescription, +// assertionValue AssertionValue } +// +// AttributeDescription ::= LDAPString +// -- Constrained to +// -- [RFC4512] +// +// AttributeValue ::= OCTET STRING +// + +package ldap + +import ( + "errors" + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise +// false with any error that occurs if any. +func (l *Conn) Compare(dn, attribute, value string) (bool, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, dn, "DN")) + + ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion") + ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "AttributeDesc")) + ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "AssertionValue")) + request.AppendChild(ava) + packet.AppendChild(request) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return false, err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return false, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return false, err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return false, err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationCompareResponse { + err := GetLDAPError(packet) + + switch { + case IsErrorWithCode(err, LDAPResultCompareTrue): + return true, nil + case IsErrorWithCode(err, LDAPResultCompareFalse): + return false, nil + default: + return false, err + } + } + return false, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag) +} diff --git a/vendor/github.com/go-ldap/ldap/conn.go b/vendor/github.com/go-ldap/ldap/conn.go new file mode 100644 index 000000000..c20471fc2 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/conn.go @@ -0,0 +1,516 @@ +package ldap + +import ( + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "net/url" + "sync" + "sync/atomic" + "time" + + "gopkg.in/asn1-ber.v1" +) + +const ( + // MessageQuit causes the processMessages loop to exit + MessageQuit = 0 + // MessageRequest sends a request to the server + MessageRequest = 1 + // MessageResponse receives a response from the server + MessageResponse = 2 + // MessageFinish indicates the client considers a particular message ID to be finished + MessageFinish = 3 + // MessageTimeout indicates the client-specified timeout for a particular message ID has been reached + MessageTimeout = 4 +) + +const ( + // DefaultLdapPort default ldap port for pure TCP connection + DefaultLdapPort = "389" + // DefaultLdapsPort default ldap port for SSL connection + DefaultLdapsPort = "636" +) + +// PacketResponse contains the packet or error encountered reading a response +type PacketResponse struct { + // Packet is the packet read from the server + Packet *ber.Packet + // Error is an error encountered while reading + Error error +} + +// ReadPacket returns the packet or an error +func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) { + if (pr == nil) || (pr.Packet == nil && pr.Error == nil) { + return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response")) + } + return pr.Packet, pr.Error +} + +type messageContext struct { + id int64 + // close(done) should only be called from finishMessage() + done chan struct{} + // close(responses) should only be called from processMessages(), and only sent to from sendResponse() + responses chan *PacketResponse +} + +// sendResponse should only be called within the processMessages() loop which +// is also responsible for closing the responses channel. +func (msgCtx *messageContext) sendResponse(packet *PacketResponse) { + select { + case msgCtx.responses <- packet: + // Successfully sent packet to message handler. + case <-msgCtx.done: + // The request handler is done and will not receive more + // packets. + } +} + +type messagePacket struct { + Op int + MessageID int64 + Packet *ber.Packet + Context *messageContext +} + +type sendMessageFlags uint + +const ( + startTLS sendMessageFlags = 1 << iota +) + +// Conn represents an LDAP Connection +type Conn struct { + // requestTimeout is loaded atomically + // so we need to ensure 64-bit alignment on 32-bit platforms. + requestTimeout int64 + conn net.Conn + isTLS bool + closing uint32 + closeErr atomic.Value + isStartingTLS bool + Debug debugging + chanConfirm chan struct{} + messageContexts map[int64]*messageContext + chanMessage chan *messagePacket + chanMessageID chan int64 + wgClose sync.WaitGroup + outstandingRequests uint + messageMutex sync.Mutex +} + +var _ Client = &Conn{} + +// DefaultTimeout is a package-level variable that sets the timeout value +// used for the Dial and DialTLS methods. +// +// WARNING: since this is a package-level variable, setting this value from +// multiple places will probably result in undesired behaviour. +var DefaultTimeout = 60 * time.Second + +// Dial connects to the given address on the given network using net.Dial +// and then returns a new Conn for the connection. +func Dial(network, addr string) (*Conn, error) { + c, err := net.DialTimeout(network, addr, DefaultTimeout) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, false) + conn.Start() + return conn, nil +} + +// DialTLS connects to the given address on the given network using tls.Dial +// and then returns a new Conn for the connection. +func DialTLS(network, addr string, config *tls.Config) (*Conn, error) { + c, err := tls.DialWithDialer(&net.Dialer{Timeout: DefaultTimeout}, network, addr, config) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, true) + conn.Start() + return conn, nil +} + +// DialURL connects to the given ldap URL vie TCP using tls.Dial or net.Dial if ldaps:// +// or ldap:// specified as protocol. On success a new Conn for the connection +// is returned. +func DialURL(addr string) (*Conn, error) { + + lurl, err := url.Parse(addr) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + + host, port, err := net.SplitHostPort(lurl.Host) + if err != nil { + // we asume that error is due to missing port + host = lurl.Host + port = "" + } + + switch lurl.Scheme { + case "ldap": + if port == "" { + port = DefaultLdapPort + } + return Dial("tcp", net.JoinHostPort(host, port)) + case "ldaps": + if port == "" { + port = DefaultLdapsPort + } + tlsConf := &tls.Config{ + ServerName: host, + } + return DialTLS("tcp", net.JoinHostPort(host, port), tlsConf) + } + + return nil, NewError(ErrorNetwork, fmt.Errorf("Unknown scheme '%s'", lurl.Scheme)) +} + +// NewConn returns a new Conn using conn for network I/O. +func NewConn(conn net.Conn, isTLS bool) *Conn { + return &Conn{ + conn: conn, + chanConfirm: make(chan struct{}), + chanMessageID: make(chan int64), + chanMessage: make(chan *messagePacket, 10), + messageContexts: map[int64]*messageContext{}, + requestTimeout: 0, + isTLS: isTLS, + } +} + +// Start initializes goroutines to read responses and process messages +func (l *Conn) Start() { + go l.reader() + go l.processMessages() + l.wgClose.Add(1) +} + +// IsClosing returns whether or not we're currently closing. +func (l *Conn) IsClosing() bool { + return atomic.LoadUint32(&l.closing) == 1 +} + +// setClosing sets the closing value to true +func (l *Conn) setClosing() bool { + return atomic.CompareAndSwapUint32(&l.closing, 0, 1) +} + +// Close closes the connection. +func (l *Conn) Close() { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + + if l.setClosing() { + l.Debug.Printf("Sending quit message and waiting for confirmation") + l.chanMessage <- &messagePacket{Op: MessageQuit} + <-l.chanConfirm + close(l.chanMessage) + + l.Debug.Printf("Closing network connection") + if err := l.conn.Close(); err != nil { + log.Println(err) + } + + l.wgClose.Done() + } + l.wgClose.Wait() +} + +// SetTimeout sets the time after a request is sent that a MessageTimeout triggers +func (l *Conn) SetTimeout(timeout time.Duration) { + if timeout > 0 { + atomic.StoreInt64(&l.requestTimeout, int64(timeout)) + } +} + +// Returns the next available messageID +func (l *Conn) nextMessageID() int64 { + if messageID, ok := <-l.chanMessageID; ok { + return messageID + } + return 0 +} + +// StartTLS sends the command to start a TLS session and then creates a new TLS Client +func (l *Conn) StartTLS(config *tls.Config) error { + if l.isTLS { + return NewError(ErrorNetwork, errors.New("ldap: already encrypted")) + } + + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS") + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command")) + packet.AppendChild(request) + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessageWithFlags(packet, startTLS) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + l.Close() + return err + } + ber.PrintPacket(packet) + } + + if err := GetLDAPError(packet); err == nil { + conn := tls.Client(l.conn, config) + + if connErr := conn.Handshake(); connErr != nil { + l.Close() + return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", connErr)) + } + + l.isTLS = true + l.conn = conn + } else { + return err + } + go l.reader() + + return nil +} + +// TLSConnectionState returns the client's TLS connection state. +// The return values are their zero values if StartTLS did +// not succeed. +func (l *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) { + tc, ok := l.conn.(*tls.Conn) + if !ok { + return + } + return tc.ConnectionState(), true +} + +func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) { + return l.sendMessageWithFlags(packet, 0) +} + +func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) { + if l.IsClosing() { + return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed")) + } + l.messageMutex.Lock() + l.Debug.Printf("flags&startTLS = %d", flags&startTLS) + if l.isStartingTLS { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase")) + } + if flags&startTLS != 0 { + if l.outstandingRequests != 0 { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests")) + } + l.isStartingTLS = true + } + l.outstandingRequests++ + + l.messageMutex.Unlock() + + responses := make(chan *PacketResponse) + messageID := packet.Children[0].Value.(int64) + message := &messagePacket{ + Op: MessageRequest, + MessageID: messageID, + Packet: packet, + Context: &messageContext{ + id: messageID, + done: make(chan struct{}), + responses: responses, + }, + } + l.sendProcessMessage(message) + return message.Context, nil +} + +func (l *Conn) finishMessage(msgCtx *messageContext) { + close(msgCtx.done) + + if l.IsClosing() { + return + } + + l.messageMutex.Lock() + l.outstandingRequests-- + if l.isStartingTLS { + l.isStartingTLS = false + } + l.messageMutex.Unlock() + + message := &messagePacket{ + Op: MessageFinish, + MessageID: msgCtx.id, + } + l.sendProcessMessage(message) +} + +func (l *Conn) sendProcessMessage(message *messagePacket) bool { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + if l.IsClosing() { + return false + } + l.chanMessage <- message + return true +} + +func (l *Conn) processMessages() { + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in processMessages: %v", err) + } + for messageID, msgCtx := range l.messageContexts { + // If we are closing due to an error, inform anyone who + // is waiting about the error. + if l.IsClosing() && l.closeErr.Load() != nil { + msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)}) + } + l.Debug.Printf("Closing channel for MessageID %d", messageID) + close(msgCtx.responses) + delete(l.messageContexts, messageID) + } + close(l.chanMessageID) + close(l.chanConfirm) + }() + + var messageID int64 = 1 + for { + select { + case l.chanMessageID <- messageID: + messageID++ + case message := <-l.chanMessage: + switch message.Op { + case MessageQuit: + l.Debug.Printf("Shutting down - quit message received") + return + case MessageRequest: + // Add to message list and write to network + l.Debug.Printf("Sending message %d", message.MessageID) + + buf := message.Packet.Bytes() + _, err := l.conn.Write(buf) + if err != nil { + l.Debug.Printf("Error Sending Message: %s", err.Error()) + message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)}) + close(message.Context.responses) + break + } + + // Only add to messageContexts if we were able to + // successfully write the message. + l.messageContexts[message.MessageID] = message.Context + + // Add timeout if defined + requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout)) + if requestTimeout > 0 { + go func() { + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in RequestTimeout: %v", err) + } + }() + time.Sleep(requestTimeout) + timeoutMessage := &messagePacket{ + Op: MessageTimeout, + MessageID: message.MessageID, + } + l.sendProcessMessage(timeoutMessage) + }() + } + case MessageResponse: + l.Debug.Printf("Receiving message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + msgCtx.sendResponse(&PacketResponse{message.Packet, nil}) + } else { + log.Printf("Received unexpected message %d, %v", message.MessageID, l.IsClosing()) + ber.PrintPacket(message.Packet) + } + case MessageTimeout: + // Handle the timeout by closing the channel + // All reads will return immediately + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + l.Debug.Printf("Receiving message timeout for %d", message.MessageID) + msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")}) + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + case MessageFinish: + l.Debug.Printf("Finished message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + } + } + } +} + +func (l *Conn) reader() { + cleanstop := false + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in reader: %v", err) + } + if !cleanstop { + l.Close() + } + }() + + for { + if cleanstop { + l.Debug.Printf("reader clean stopping (without closing the connection)") + return + } + packet, err := ber.ReadPacket(l.conn) + if err != nil { + // A read error is expected here if we are closing the connection... + if !l.IsClosing() { + l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err)) + l.Debug.Printf("reader error: %s", err.Error()) + } + return + } + addLDAPDescriptions(packet) + if len(packet.Children) == 0 { + l.Debug.Printf("Received bad ldap packet") + continue + } + l.messageMutex.Lock() + if l.isStartingTLS { + cleanstop = true + } + l.messageMutex.Unlock() + message := &messagePacket{ + Op: MessageResponse, + MessageID: packet.Children[0].Value.(int64), + Packet: packet, + } + if !l.sendProcessMessage(message) { + return + } + } +} diff --git a/vendor/github.com/go-ldap/ldap/control.go b/vendor/github.com/go-ldap/ldap/control.go new file mode 100644 index 000000000..4439a865d --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/control.go @@ -0,0 +1,497 @@ +package ldap + +import ( + "fmt" + "strconv" + + "gopkg.in/asn1-ber.v1" +) + +const ( + // ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt + ControlTypePaging = "1.2.840.113556.1.4.319" + // ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 + ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1" + // ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4" + // ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5" + // ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296 + ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2" + + // ControlTypeMicrosoftNotification - https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx + ControlTypeMicrosoftNotification = "1.2.840.113556.1.4.528" + // ControlTypeMicrosoftShowDeleted - https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx + ControlTypeMicrosoftShowDeleted = "1.2.840.113556.1.4.417" +) + +// ControlTypeMap maps controls to text descriptions +var ControlTypeMap = map[string]string{ + ControlTypePaging: "Paging", + ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft", + ControlTypeManageDsaIT: "Manage DSA IT", + ControlTypeMicrosoftNotification: "Change Notification - Microsoft", + ControlTypeMicrosoftShowDeleted: "Show Deleted Objects - Microsoft", +} + +// Control defines an interface controls provide to encode and describe themselves +type Control interface { + // GetControlType returns the OID + GetControlType() string + // Encode returns the ber packet representation + Encode() *ber.Packet + // String returns a human-readable description + String() string +} + +// ControlString implements the Control interface for simple controls +type ControlString struct { + ControlType string + Criticality bool + ControlValue string +} + +// GetControlType returns the OID +func (c *ControlString) GetControlType() string { + return c.ControlType +} + +// Encode returns the ber packet representation +func (c *ControlString) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value")) + return packet +} + +// String returns a human-readable description +func (c *ControlString) String() string { + return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue) +} + +// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt +type ControlPaging struct { + // PagingSize indicates the page size + PagingSize uint32 + // Cookie is an opaque value returned by the server to track a paging cursor + Cookie []byte +} + +// GetControlType returns the OID +func (c *ControlPaging) GetControlType() string { + return ControlTypePaging +} + +// Encode returns the ber packet representation +func (c *ControlPaging) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")")) + + p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)") + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value") + seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size")) + cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie") + cookie.Value = c.Cookie + cookie.Data.Write(c.Cookie) + seq.AppendChild(cookie) + p2.AppendChild(seq) + + packet.AppendChild(p2) + return packet +} + +// String returns a human-readable description +func (c *ControlPaging) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q", + ControlTypeMap[ControlTypePaging], + ControlTypePaging, + false, + c.PagingSize, + c.Cookie) +} + +// SetCookie stores the given cookie in the paging control +func (c *ControlPaging) SetCookie(cookie []byte) { + c.Cookie = cookie +} + +// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 +type ControlBeheraPasswordPolicy struct { + // Expire contains the number of seconds before a password will expire + Expire int64 + // Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password + Grace int64 + // Error indicates the error code + Error int8 + // ErrorString is a human readable error + ErrorString string +} + +// GetControlType returns the OID +func (c *ControlBeheraPasswordPolicy) GetControlType() string { + return ControlTypeBeheraPasswordPolicy +} + +// Encode returns the ber packet representation +func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlBeheraPasswordPolicy) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s", + ControlTypeMap[ControlTypeBeheraPasswordPolicy], + ControlTypeBeheraPasswordPolicy, + false, + c.Expire, + c.Grace, + c.Error, + c.ErrorString) +} + +// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordMustChange struct { + // MustChange indicates if the password is required to be changed + MustChange bool +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordMustChange) GetControlType() string { + return ControlTypeVChuPasswordMustChange +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordMustChange) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t MustChange: %v", + ControlTypeMap[ControlTypeVChuPasswordMustChange], + ControlTypeVChuPasswordMustChange, + false, + c.MustChange) +} + +// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordWarning struct { + // Expire indicates the time in seconds until the password expires + Expire int64 +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordWarning) GetControlType() string { + return ControlTypeVChuPasswordWarning +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordWarning) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordWarning) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %b", + ControlTypeMap[ControlTypeVChuPasswordWarning], + ControlTypeVChuPasswordWarning, + false, + c.Expire) +} + +// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296 +type ControlManageDsaIT struct { + // Criticality indicates if this control is required + Criticality bool +} + +// GetControlType returns the OID +func (c *ControlManageDsaIT) GetControlType() string { + return ControlTypeManageDsaIT +} + +// Encode returns the ber packet representation +func (c *ControlManageDsaIT) Encode() *ber.Packet { + //FIXME + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + return packet +} + +// String returns a human-readable description +func (c *ControlManageDsaIT) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t", + ControlTypeMap[ControlTypeManageDsaIT], + ControlTypeManageDsaIT, + c.Criticality) +} + +// NewControlManageDsaIT returns a ControlManageDsaIT control +func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT { + return &ControlManageDsaIT{Criticality: Criticality} +} + +// ControlMicrosoftNotification implements the control described in https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx +type ControlMicrosoftNotification struct{} + +// GetControlType returns the OID +func (c *ControlMicrosoftNotification) GetControlType() string { + return ControlTypeMicrosoftNotification +} + +// Encode returns the ber packet representation +func (c *ControlMicrosoftNotification) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftNotification, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftNotification]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlMicrosoftNotification) String() string { + return fmt.Sprintf( + "Control Type: %s (%q)", + ControlTypeMap[ControlTypeMicrosoftNotification], + ControlTypeMicrosoftNotification) +} + +// NewControlMicrosoftNotification returns a ControlMicrosoftNotification control +func NewControlMicrosoftNotification() *ControlMicrosoftNotification { + return &ControlMicrosoftNotification{} +} + +// ControlMicrosoftShowDeleted implements the control described in https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx +type ControlMicrosoftShowDeleted struct{} + +// GetControlType returns the OID +func (c *ControlMicrosoftShowDeleted) GetControlType() string { + return ControlTypeMicrosoftShowDeleted +} + +// Encode returns the ber packet representation +func (c *ControlMicrosoftShowDeleted) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftShowDeleted, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftShowDeleted]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlMicrosoftShowDeleted) String() string { + return fmt.Sprintf( + "Control Type: %s (%q)", + ControlTypeMap[ControlTypeMicrosoftShowDeleted], + ControlTypeMicrosoftShowDeleted) +} + +// NewControlMicrosoftShowDeleted returns a ControlMicrosoftShowDeleted control +func NewControlMicrosoftShowDeleted() *ControlMicrosoftShowDeleted { + return &ControlMicrosoftShowDeleted{} +} + +// FindControl returns the first control of the given type in the list, or nil +func FindControl(controls []Control, controlType string) Control { + for _, c := range controls { + if c.GetControlType() == controlType { + return c + } + } + return nil +} + +// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made +func DecodeControl(packet *ber.Packet) (Control, error) { + var ( + ControlType = "" + Criticality = false + value *ber.Packet + ) + + switch len(packet.Children) { + case 0: + // at least one child is required for control type + return nil, fmt.Errorf("at least one child is required for control type") + + case 1: + // just type, no criticality or value + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + case 2: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := packet.Children[1].Value.(bool); ok { + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + } else { + packet.Children[1].Description = "Control Value" + value = packet.Children[1] + } + + case 3: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + + packet.Children[2].Description = "Control Value" + value = packet.Children[2] + + default: + // more than 3 children is invalid + return nil, fmt.Errorf("more than 3 children is invalid for controls") + } + + switch ControlType { + case ControlTypeManageDsaIT: + return NewControlManageDsaIT(Criticality), nil + case ControlTypePaging: + value.Description += " (Paging)" + c := new(ControlPaging) + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + value = value.Children[0] + value.Description = "Search Control Value" + value.Children[0].Description = "Paging Size" + value.Children[1].Description = "Cookie" + c.PagingSize = uint32(value.Children[0].Value.(int64)) + c.Cookie = value.Children[1].Data.Bytes() + value.Children[1].Value = c.Cookie + return c, nil + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera)" + c := NewControlBeheraPasswordPolicy() + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + + sequence := value.Children[0] + + for _, child := range sequence.Children { + if child.Tag == 0 { + //Warning + warningPacket := child.Children[0] + packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int64) + if ok { + if warningPacket.Tag == 0 { + //timeBeforeExpiration + c.Expire = val + warningPacket.Value = c.Expire + } else if warningPacket.Tag == 1 { + //graceAuthNsRemaining + c.Grace = val + warningPacket.Value = c.Grace + } + } + } else if child.Tag == 1 { + // Error + packet, err := ber.DecodePacketErr(child.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int8) + if !ok { + // what to do? + val = -1 + } + c.Error = val + child.Value = c.Error + c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error] + } + } + return c, nil + case ControlTypeVChuPasswordMustChange: + c := &ControlVChuPasswordMustChange{MustChange: true} + return c, nil + case ControlTypeVChuPasswordWarning: + c := &ControlVChuPasswordWarning{Expire: -1} + expireStr := ber.DecodeString(value.Data.Bytes()) + + expire, err := strconv.ParseInt(expireStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse value as int: %s", err) + } + c.Expire = expire + value.Value = c.Expire + + return c, nil + case ControlTypeMicrosoftNotification: + return NewControlMicrosoftNotification(), nil + case ControlTypeMicrosoftShowDeleted: + return NewControlMicrosoftShowDeleted(), nil + default: + c := new(ControlString) + c.ControlType = ControlType + c.Criticality = Criticality + if value != nil { + c.ControlValue = value.Value.(string) + } + return c, nil + } +} + +// NewControlString returns a generic control +func NewControlString(controlType string, criticality bool, controlValue string) *ControlString { + return &ControlString{ + ControlType: controlType, + Criticality: criticality, + ControlValue: controlValue, + } +} + +// NewControlPaging returns a paging control +func NewControlPaging(pagingSize uint32) *ControlPaging { + return &ControlPaging{PagingSize: pagingSize} +} + +// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy +func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy { + return &ControlBeheraPasswordPolicy{ + Expire: -1, + Grace: -1, + Error: -1, + } +} + +func encodeControls(controls []Control) *ber.Packet { + packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls") + for _, control := range controls { + packet.AppendChild(control.Encode()) + } + return packet +} diff --git a/vendor/github.com/go-ldap/ldap/debug.go b/vendor/github.com/go-ldap/ldap/debug.go new file mode 100644 index 000000000..7279fc251 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/debug.go @@ -0,0 +1,24 @@ +package ldap + +import ( + "log" + + "gopkg.in/asn1-ber.v1" +) + +// debugging type +// - has a Printf method to write the debug output +type debugging bool + +// write debug output +func (debug debugging) Printf(format string, args ...interface{}) { + if debug { + log.Printf(format, args...) + } +} + +func (debug debugging) PrintPacket(packet *ber.Packet) { + if debug { + ber.PrintPacket(packet) + } +} diff --git a/vendor/github.com/go-ldap/ldap/del.go b/vendor/github.com/go-ldap/ldap/del.go new file mode 100644 index 000000000..6f78beb18 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/del.go @@ -0,0 +1,84 @@ +// +// https://tools.ietf.org/html/rfc4511 +// +// DelRequest ::= [APPLICATION 10] LDAPDN + +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// DelRequest implements an LDAP deletion request +type DelRequest struct { + // DN is the name of the directory entry to delete + DN string + // Controls hold optional controls to send with the request + Controls []Control +} + +func (d DelRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, d.DN, "Del Request") + request.Data.Write([]byte(d.DN)) + return request +} + +// NewDelRequest creates a delete request for the given DN and controls +func NewDelRequest(DN string, + Controls []Control) *DelRequest { + return &DelRequest{ + DN: DN, + Controls: Controls, + } +} + +// Del executes the given delete request +func (l *Conn) Del(delRequest *DelRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(delRequest.encode()) + if len(delRequest.Controls) > 0 { + packet.AppendChild(encodeControls(delRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationDelResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/dn.go b/vendor/github.com/go-ldap/ldap/dn.go new file mode 100644 index 000000000..f89e73a9a --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/dn.go @@ -0,0 +1,247 @@ +// File contains DN parsing functionality +// +// https://tools.ietf.org/html/rfc4514 +// +// distinguishedName = [ relativeDistinguishedName +// *( COMMA relativeDistinguishedName ) ] +// relativeDistinguishedName = attributeTypeAndValue +// *( PLUS attributeTypeAndValue ) +// attributeTypeAndValue = attributeType EQUALS attributeValue +// attributeType = descr / numericoid +// attributeValue = string / hexstring +// +// ; The following characters are to be escaped when they appear +// ; in the value to be encoded: ESC, one of , leading +// ; SHARP or SPACE, trailing SPACE, and NULL. +// string = [ ( leadchar / pair ) [ *( stringchar / pair ) +// ( trailchar / pair ) ] ] +// +// leadchar = LUTF1 / UTFMB +// LUTF1 = %x01-1F / %x21 / %x24-2A / %x2D-3A / +// %x3D / %x3F-5B / %x5D-7F +// +// trailchar = TUTF1 / UTFMB +// TUTF1 = %x01-1F / %x21 / %x23-2A / %x2D-3A / +// %x3D / %x3F-5B / %x5D-7F +// +// stringchar = SUTF1 / UTFMB +// SUTF1 = %x01-21 / %x23-2A / %x2D-3A / +// %x3D / %x3F-5B / %x5D-7F +// +// pair = ESC ( ESC / special / hexpair ) +// special = escaped / SPACE / SHARP / EQUALS +// escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE +// hexstring = SHARP 1*hexpair +// hexpair = HEX HEX +// +// where the productions , , , , +// , , , , , , , , +// , , and are defined in [RFC4512]. +// + +package ldap + +import ( + "bytes" + enchex "encoding/hex" + "errors" + "fmt" + "strings" + + "gopkg.in/asn1-ber.v1" +) + +// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514 +type AttributeTypeAndValue struct { + // Type is the attribute type + Type string + // Value is the attribute value + Value string +} + +// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514 +type RelativeDN struct { + Attributes []*AttributeTypeAndValue +} + +// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514 +type DN struct { + RDNs []*RelativeDN +} + +// ParseDN returns a distinguishedName or an error +func ParseDN(str string) (*DN, error) { + dn := new(DN) + dn.RDNs = make([]*RelativeDN, 0) + rdn := new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + buffer := bytes.Buffer{} + attribute := new(AttributeTypeAndValue) + escaping := false + + unescapedTrailingSpaces := 0 + stringFromBuffer := func() string { + s := buffer.String() + s = s[0 : len(s)-unescapedTrailingSpaces] + buffer.Reset() + unescapedTrailingSpaces = 0 + return s + } + + for i := 0; i < len(str); i++ { + char := str[i] + switch { + case escaping: + unescapedTrailingSpaces = 0 + escaping = false + switch char { + case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\': + buffer.WriteByte(char) + continue + } + // Not a special character, assume hex encoded octet + if len(str) == i+1 { + return nil, errors.New("got corrupted escaped character") + } + + dst := []byte{0} + n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2])) + if err != nil { + return nil, fmt.Errorf("failed to decode escaped character: %s", err) + } else if n != 1 { + return nil, fmt.Errorf("expected 1 byte when un-escaping, got %d", n) + } + buffer.WriteByte(dst[0]) + i++ + case char == '\\': + unescapedTrailingSpaces = 0 + escaping = true + case char == '=': + attribute.Type = stringFromBuffer() + // Special case: If the first character in the value is # the + // following data is BER encoded so we can just fast forward + // and decode. + if len(str) > i+1 && str[i+1] == '#' { + i += 2 + index := strings.IndexAny(str[i:], ",+") + data := str + if index > 0 { + data = str[i : i+index] + } else { + data = str[i:] + } + rawBER, err := enchex.DecodeString(data) + if err != nil { + return nil, fmt.Errorf("failed to decode BER encoding: %s", err) + } + packet, err := ber.DecodePacketErr(rawBER) + if err != nil { + return nil, fmt.Errorf("failed to decode BER packet: %s", err) + } + buffer.WriteString(packet.Data.String()) + i += len(data) - 1 + } + case char == ',' || char == '+': + // We're done with this RDN or value, push it + if len(attribute.Type) == 0 { + return nil, errors.New("incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + attribute = new(AttributeTypeAndValue) + if char == ',' { + dn.RDNs = append(dn.RDNs, rdn) + rdn = new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + } + case char == ' ' && buffer.Len() == 0: + // ignore unescaped leading spaces + continue + default: + if char == ' ' { + // Track unescaped spaces in case they are trailing and we need to remove them + unescapedTrailingSpaces++ + } else { + // Reset if we see a non-space char + unescapedTrailingSpaces = 0 + } + buffer.WriteByte(char) + } + } + if buffer.Len() > 0 { + if len(attribute.Type) == 0 { + return nil, errors.New("DN ended with incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + dn.RDNs = append(dn.RDNs, rdn) + } + return dn, nil +} + +// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Returns true if they have the same number of relative distinguished names +// and corresponding relative distinguished names (by position) are the same. +func (d *DN) Equal(other *DN) bool { + if len(d.RDNs) != len(other.RDNs) { + return false + } + for i := range d.RDNs { + if !d.RDNs[i].Equal(other.RDNs[i]) { + return false + } + } + return true +} + +// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN. +// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com" +func (d *DN) AncestorOf(other *DN) bool { + if len(d.RDNs) >= len(other.RDNs) { + return false + } + // Take the last `len(d.RDNs)` RDNs from the other DN to compare against + otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):] + for i := range d.RDNs { + if !d.RDNs[i].Equal(otherRDNs[i]) { + return false + } + } + return true +} + +// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues +// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type. +// The order of attributes is not significant. +// Case of attribute types is not significant. +func (r *RelativeDN) Equal(other *RelativeDN) bool { + if len(r.Attributes) != len(other.Attributes) { + return false + } + return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes) +} + +func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool { + for _, attr := range attrs { + found := false + for _, myattr := range r.Attributes { + if myattr.Equal(attr) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue +// Case of the attribute type is not significant +func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool { + return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value +} diff --git a/vendor/github.com/go-ldap/ldap/doc.go b/vendor/github.com/go-ldap/ldap/doc.go new file mode 100644 index 000000000..f20d39bc9 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/doc.go @@ -0,0 +1,4 @@ +/* +Package ldap provides basic LDAP v3 functionality. +*/ +package ldap diff --git a/vendor/github.com/go-ldap/ldap/error.go b/vendor/github.com/go-ldap/ldap/error.go new file mode 100644 index 000000000..639ed8243 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/error.go @@ -0,0 +1,234 @@ +package ldap + +import ( + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +// LDAP Result Codes +const ( + LDAPResultSuccess = 0 + LDAPResultOperationsError = 1 + LDAPResultProtocolError = 2 + LDAPResultTimeLimitExceeded = 3 + LDAPResultSizeLimitExceeded = 4 + LDAPResultCompareFalse = 5 + LDAPResultCompareTrue = 6 + LDAPResultAuthMethodNotSupported = 7 + LDAPResultStrongAuthRequired = 8 + LDAPResultReferral = 10 + LDAPResultAdminLimitExceeded = 11 + LDAPResultUnavailableCriticalExtension = 12 + LDAPResultConfidentialityRequired = 13 + LDAPResultSaslBindInProgress = 14 + LDAPResultNoSuchAttribute = 16 + LDAPResultUndefinedAttributeType = 17 + LDAPResultInappropriateMatching = 18 + LDAPResultConstraintViolation = 19 + LDAPResultAttributeOrValueExists = 20 + LDAPResultInvalidAttributeSyntax = 21 + LDAPResultNoSuchObject = 32 + LDAPResultAliasProblem = 33 + LDAPResultInvalidDNSyntax = 34 + LDAPResultIsLeaf = 35 + LDAPResultAliasDereferencingProblem = 36 + LDAPResultInappropriateAuthentication = 48 + LDAPResultInvalidCredentials = 49 + LDAPResultInsufficientAccessRights = 50 + LDAPResultBusy = 51 + LDAPResultUnavailable = 52 + LDAPResultUnwillingToPerform = 53 + LDAPResultLoopDetect = 54 + LDAPResultSortControlMissing = 60 + LDAPResultOffsetRangeError = 61 + LDAPResultNamingViolation = 64 + LDAPResultObjectClassViolation = 65 + LDAPResultNotAllowedOnNonLeaf = 66 + LDAPResultNotAllowedOnRDN = 67 + LDAPResultEntryAlreadyExists = 68 + LDAPResultObjectClassModsProhibited = 69 + LDAPResultResultsTooLarge = 70 + LDAPResultAffectsMultipleDSAs = 71 + LDAPResultVirtualListViewErrorOrControlError = 76 + LDAPResultOther = 80 + LDAPResultServerDown = 81 + LDAPResultLocalError = 82 + LDAPResultEncodingError = 83 + LDAPResultDecodingError = 84 + LDAPResultTimeout = 85 + LDAPResultAuthUnknown = 86 + LDAPResultFilterError = 87 + LDAPResultUserCanceled = 88 + LDAPResultParamError = 89 + LDAPResultNoMemory = 90 + LDAPResultConnectError = 91 + LDAPResultNotSupported = 92 + LDAPResultControlNotFound = 93 + LDAPResultNoResultsReturned = 94 + LDAPResultMoreResultsToReturn = 95 + LDAPResultClientLoop = 96 + LDAPResultReferralLimitExceeded = 97 + LDAPResultInvalidResponse = 100 + LDAPResultAmbiguousResponse = 101 + LDAPResultTLSNotSupported = 112 + LDAPResultIntermediateResponse = 113 + LDAPResultUnknownType = 114 + LDAPResultCanceled = 118 + LDAPResultNoSuchOperation = 119 + LDAPResultTooLate = 120 + LDAPResultCannotCancel = 121 + LDAPResultAssertionFailed = 122 + LDAPResultAuthorizationDenied = 123 + LDAPResultSyncRefreshRequired = 4096 + + ErrorNetwork = 200 + ErrorFilterCompile = 201 + ErrorFilterDecompile = 202 + ErrorDebugging = 203 + ErrorUnexpectedMessage = 204 + ErrorUnexpectedResponse = 205 + ErrorEmptyPassword = 206 +) + +// LDAPResultCodeMap contains string descriptions for LDAP error codes +var LDAPResultCodeMap = map[uint16]string{ + LDAPResultSuccess: "Success", + LDAPResultOperationsError: "Operations Error", + LDAPResultProtocolError: "Protocol Error", + LDAPResultTimeLimitExceeded: "Time Limit Exceeded", + LDAPResultSizeLimitExceeded: "Size Limit Exceeded", + LDAPResultCompareFalse: "Compare False", + LDAPResultCompareTrue: "Compare True", + LDAPResultAuthMethodNotSupported: "Auth Method Not Supported", + LDAPResultStrongAuthRequired: "Strong Auth Required", + LDAPResultReferral: "Referral", + LDAPResultAdminLimitExceeded: "Admin Limit Exceeded", + LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension", + LDAPResultConfidentialityRequired: "Confidentiality Required", + LDAPResultSaslBindInProgress: "Sasl Bind In Progress", + LDAPResultNoSuchAttribute: "No Such Attribute", + LDAPResultUndefinedAttributeType: "Undefined Attribute Type", + LDAPResultInappropriateMatching: "Inappropriate Matching", + LDAPResultConstraintViolation: "Constraint Violation", + LDAPResultAttributeOrValueExists: "Attribute Or Value Exists", + LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax", + LDAPResultNoSuchObject: "No Such Object", + LDAPResultAliasProblem: "Alias Problem", + LDAPResultInvalidDNSyntax: "Invalid DN Syntax", + LDAPResultIsLeaf: "Is Leaf", + LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem", + LDAPResultInappropriateAuthentication: "Inappropriate Authentication", + LDAPResultInvalidCredentials: "Invalid Credentials", + LDAPResultInsufficientAccessRights: "Insufficient Access Rights", + LDAPResultBusy: "Busy", + LDAPResultUnavailable: "Unavailable", + LDAPResultUnwillingToPerform: "Unwilling To Perform", + LDAPResultLoopDetect: "Loop Detect", + LDAPResultSortControlMissing: "Sort Control Missing", + LDAPResultOffsetRangeError: "Result Offset Range Error", + LDAPResultNamingViolation: "Naming Violation", + LDAPResultObjectClassViolation: "Object Class Violation", + LDAPResultResultsTooLarge: "Results Too Large", + LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf", + LDAPResultNotAllowedOnRDN: "Not Allowed On RDN", + LDAPResultEntryAlreadyExists: "Entry Already Exists", + LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited", + LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs", + LDAPResultVirtualListViewErrorOrControlError: "Failed because of a problem related to the virtual list view", + LDAPResultOther: "Other", + LDAPResultServerDown: "Cannot establish a connection", + LDAPResultLocalError: "An error occurred", + LDAPResultEncodingError: "LDAP encountered an error while encoding", + LDAPResultDecodingError: "LDAP encountered an error while decoding", + LDAPResultTimeout: "LDAP timeout while waiting for a response from the server", + LDAPResultAuthUnknown: "The auth method requested in a bind request is unknown", + LDAPResultFilterError: "An error occurred while encoding the given search filter", + LDAPResultUserCanceled: "The user canceled the operation", + LDAPResultParamError: "An invalid parameter was specified", + LDAPResultNoMemory: "Out of memory error", + LDAPResultConnectError: "A connection to the server could not be established", + LDAPResultNotSupported: "An attempt has been made to use a feature not supported LDAP", + LDAPResultControlNotFound: "The controls required to perform the requested operation were not found", + LDAPResultNoResultsReturned: "No results were returned from the server", + LDAPResultMoreResultsToReturn: "There are more results in the chain of results", + LDAPResultClientLoop: "A loop has been detected. For example when following referrals", + LDAPResultReferralLimitExceeded: "The referral hop limit has been exceeded", + LDAPResultCanceled: "Operation was canceled", + LDAPResultNoSuchOperation: "Server has no knowledge of the operation requested for cancellation", + LDAPResultTooLate: "Too late to cancel the outstanding operation", + LDAPResultCannotCancel: "The identified operation does not support cancellation or the cancel operation cannot be performed", + LDAPResultAssertionFailed: "An assertion control given in the LDAP operation evaluated to false causing the operation to not be performed", + LDAPResultSyncRefreshRequired: "Refresh Required", + LDAPResultInvalidResponse: "Invalid Response", + LDAPResultAmbiguousResponse: "Ambiguous Response", + LDAPResultTLSNotSupported: "Tls Not Supported", + LDAPResultIntermediateResponse: "Intermediate Response", + LDAPResultUnknownType: "Unknown Type", + LDAPResultAuthorizationDenied: "Authorization Denied", + + ErrorNetwork: "Network Error", + ErrorFilterCompile: "Filter Compile Error", + ErrorFilterDecompile: "Filter Decompile Error", + ErrorDebugging: "Debugging Error", + ErrorUnexpectedMessage: "Unexpected Message", + ErrorUnexpectedResponse: "Unexpected Response", + ErrorEmptyPassword: "Empty password not allowed by the client", +} + +// Error holds LDAP error information +type Error struct { + // Err is the underlying error + Err error + // ResultCode is the LDAP error code + ResultCode uint16 + // MatchedDN is the matchedDN returned if any + MatchedDN string +} + +func (e *Error) Error() string { + return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error()) +} + +// GetLDAPError creates an Error out of a BER packet representing a LDAPResult +// The return is an error object. It can be casted to a Error structure. +// This function returns nil if resultCode in the LDAPResult sequence is success(0). +func GetLDAPError(packet *ber.Packet) error { + if packet == nil { + return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")} + } else if len(packet.Children) >= 2 { + response := packet.Children[1] + if response == nil { + return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet")} + } + if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 { + resultCode := uint16(response.Children[0].Value.(int64)) + if resultCode == 0 { // No error + return nil + } + return &Error{ResultCode: resultCode, MatchedDN: response.Children[1].Value.(string), + Err: fmt.Errorf("%s", response.Children[2].Value.(string))} + } + } + + return &Error{ResultCode: ErrorNetwork, Err: fmt.Errorf("Invalid packet format")} +} + +// NewError creates an LDAP error with the given code and underlying error +func NewError(resultCode uint16, err error) error { + return &Error{ResultCode: resultCode, Err: err} +} + +// IsErrorWithCode returns true if the given error is an LDAP error with the given result code +func IsErrorWithCode(err error, desiredResultCode uint16) bool { + if err == nil { + return false + } + + serverError, ok := err.(*Error) + if !ok { + return false + } + + return serverError.ResultCode == desiredResultCode +} diff --git a/vendor/github.com/go-ldap/ldap/filter.go b/vendor/github.com/go-ldap/ldap/filter.go new file mode 100644 index 000000000..4cc4207be --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/filter.go @@ -0,0 +1,465 @@ +package ldap + +import ( + "bytes" + hexpac "encoding/hex" + "errors" + "fmt" + "strings" + "unicode/utf8" + + "gopkg.in/asn1-ber.v1" +) + +// Filter choices +const ( + FilterAnd = 0 + FilterOr = 1 + FilterNot = 2 + FilterEqualityMatch = 3 + FilterSubstrings = 4 + FilterGreaterOrEqual = 5 + FilterLessOrEqual = 6 + FilterPresent = 7 + FilterApproxMatch = 8 + FilterExtensibleMatch = 9 +) + +// FilterMap contains human readable descriptions of Filter choices +var FilterMap = map[uint64]string{ + FilterAnd: "And", + FilterOr: "Or", + FilterNot: "Not", + FilterEqualityMatch: "Equality Match", + FilterSubstrings: "Substrings", + FilterGreaterOrEqual: "Greater Or Equal", + FilterLessOrEqual: "Less Or Equal", + FilterPresent: "Present", + FilterApproxMatch: "Approx Match", + FilterExtensibleMatch: "Extensible Match", +} + +// SubstringFilter options +const ( + FilterSubstringsInitial = 0 + FilterSubstringsAny = 1 + FilterSubstringsFinal = 2 +) + +// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices +var FilterSubstringsMap = map[uint64]string{ + FilterSubstringsInitial: "Substrings Initial", + FilterSubstringsAny: "Substrings Any", + FilterSubstringsFinal: "Substrings Final", +} + +// MatchingRuleAssertion choices +const ( + MatchingRuleAssertionMatchingRule = 1 + MatchingRuleAssertionType = 2 + MatchingRuleAssertionMatchValue = 3 + MatchingRuleAssertionDNAttributes = 4 +) + +// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices +var MatchingRuleAssertionMap = map[uint64]string{ + MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule", + MatchingRuleAssertionType: "Matching Rule Assertion Type", + MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value", + MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes", +} + +// CompileFilter converts a string representation of a filter into a BER-encoded packet +func CompileFilter(filter string) (*ber.Packet, error) { + if len(filter) == 0 || filter[0] != '(' { + return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('")) + } + packet, pos, err := compileFilter(filter, 1) + if err != nil { + return nil, err + } + switch { + case pos > len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + case pos < len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:]))) + } + return packet, nil +} + +// DecompileFilter converts a packet representation of a filter into a string representation +func DecompileFilter(packet *ber.Packet) (ret string, err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter")) + } + }() + ret = "(" + err = nil + childStr := "" + + switch packet.Tag { + case FilterAnd: + ret += "&" + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + ret += childStr + } + case FilterOr: + ret += "|" + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + ret += childStr + } + case FilterNot: + ret += "!" + childStr, err = DecompileFilter(packet.Children[0]) + if err != nil { + return + } + ret += childStr + + case FilterSubstrings: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "=" + for i, child := range packet.Children[1].Children { + if i == 0 && child.Tag != FilterSubstringsInitial { + ret += "*" + } + ret += EscapeFilter(ber.DecodeString(child.Data.Bytes())) + if child.Tag != FilterSubstringsFinal { + ret += "*" + } + } + case FilterEqualityMatch: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterGreaterOrEqual: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += ">=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterLessOrEqual: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "<=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterPresent: + ret += ber.DecodeString(packet.Data.Bytes()) + ret += "=*" + case FilterApproxMatch: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "~=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterExtensibleMatch: + attr := "" + dnAttributes := false + matchingRule := "" + value := "" + + for _, child := range packet.Children { + switch child.Tag { + case MatchingRuleAssertionMatchingRule: + matchingRule = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionType: + attr = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionMatchValue: + value = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionDNAttributes: + dnAttributes = child.Value.(bool) + } + } + + if len(attr) > 0 { + ret += attr + } + if dnAttributes { + ret += ":dn" + } + if len(matchingRule) > 0 { + ret += ":" + ret += matchingRule + } + ret += ":=" + ret += EscapeFilter(value) + } + + ret += ")" + return +} + +func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) { + for pos < len(filter) && filter[pos] == '(' { + child, newPos, err := compileFilter(filter, pos+1) + if err != nil { + return pos, err + } + pos = newPos + parent.AppendChild(child) + } + if pos == len(filter) { + return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + } + + return pos + 1, nil +} + +func compileFilter(filter string, pos int) (*ber.Packet, int, error) { + var ( + packet *ber.Packet + err error + ) + + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter")) + } + }() + newPos := pos + + currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:]) + + switch currentRune { + case utf8.RuneError: + return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + case '(': + packet, newPos, err = compileFilter(filter, pos+currentWidth) + newPos++ + return packet, newPos, err + case '&': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '|': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '!': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot]) + var child *ber.Packet + child, newPos, err = compileFilter(filter, pos+currentWidth) + packet.AppendChild(child) + return packet, newPos, err + default: + const ( + stateReadingAttr = 0 + stateReadingExtensibleMatchingRule = 1 + stateReadingCondition = 2 + ) + + state := stateReadingAttr + + attribute := "" + extensibleDNAttributes := false + extensibleMatchingRule := "" + condition := "" + + for newPos < len(filter) { + remainingFilter := filter[newPos:] + currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter) + if currentRune == ')' { + break + } + if currentRune == utf8.RuneError { + return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + } + + switch state { + case stateReadingAttr: + switch { + // Extensible rule, with only DN-matching + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingCondition + newPos += 5 + + // Extensible rule, with DN-matching and a matching OID + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingExtensibleMatchingRule + newPos += 4 + + // Extensible rule, with attr only + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingCondition + newPos += 2 + + // Extensible rule, with no DN attribute matching + case currentRune == ':': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingExtensibleMatchingRule + newPos++ + + // Equality condition + case currentRune == '=': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch]) + state = stateReadingCondition + newPos++ + + // Greater-than or equal + case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Less-than or equal + case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Approx + case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch]) + state = stateReadingCondition + newPos += 2 + + // Still reading the attribute name + default: + attribute += fmt.Sprintf("%c", currentRune) + newPos += currentWidth + } + + case stateReadingExtensibleMatchingRule: + switch { + + // Matching rule OID is done + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + state = stateReadingCondition + newPos += 2 + + // Still reading the matching rule oid + default: + extensibleMatchingRule += fmt.Sprintf("%c", currentRune) + newPos += currentWidth + } + + case stateReadingCondition: + // append to the condition + condition += fmt.Sprintf("%c", currentRune) + newPos += currentWidth + } + } + + if newPos == len(filter) { + err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + return packet, newPos, err + } + if packet == nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter")) + return packet, newPos, err + } + + switch { + case packet.Tag == FilterExtensibleMatch: + // MatchingRuleAssertion ::= SEQUENCE { + // matchingRule [1] MatchingRuleID OPTIONAL, + // type [2] AttributeDescription OPTIONAL, + // matchValue [3] AssertionValue, + // dnAttributes [4] BOOLEAN DEFAULT FALSE + // } + + // Include the matching rule oid, if specified + if len(extensibleMatchingRule) > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule, MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule])) + } + + // Include the attribute, if specified + if len(attribute) > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute, MatchingRuleAssertionMap[MatchingRuleAssertionType])) + } + + // Add the value (only required child) + encodedString, encodeErr := escapedStringToEncodedBytes(condition) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue])) + + // Defaults to false, so only include in the sequence if true + if extensibleDNAttributes { + packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes])) + } + + case packet.Tag == FilterEqualityMatch && condition == "*": + packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute, FilterMap[FilterPresent]) + case packet.Tag == FilterEqualityMatch && strings.Contains(condition, "*"): + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + packet.Tag = FilterSubstrings + packet.Description = FilterMap[uint64(packet.Tag)] + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings") + parts := strings.Split(condition, "*") + for i, part := range parts { + if part == "" { + continue + } + var tag ber.Tag + switch i { + case 0: + tag = FilterSubstringsInitial + case len(parts) - 1: + tag = FilterSubstringsFinal + default: + tag = FilterSubstringsAny + } + encodedString, encodeErr := escapedStringToEncodedBytes(part) + if encodeErr != nil { + return packet, newPos, encodeErr + } + seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)])) + } + packet.AppendChild(seq) + default: + encodedString, encodeErr := escapedStringToEncodedBytes(condition) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition")) + } + + newPos += currentWidth + return packet, newPos, err + } +} + +// Convert from "ABC\xx\xx\xx" form to literal bytes for transport +func escapedStringToEncodedBytes(escapedString string) (string, error) { + var buffer bytes.Buffer + i := 0 + for i < len(escapedString) { + currentRune, currentWidth := utf8.DecodeRuneInString(escapedString[i:]) + if currentRune == utf8.RuneError { + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", i)) + } + + // Check for escaped hex characters and convert them to their literal value for transport. + if currentRune == '\\' { + // http://tools.ietf.org/search/rfc4515 + // \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not + // being a member of UTF1SUBSET. + if i+2 > len(escapedString) { + return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter")) + } + escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3]) + if decodeErr != nil { + return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter")) + } + buffer.WriteByte(escByte[0]) + i += 2 // +1 from end of loop, so 3 total for \xx. + } else { + buffer.WriteRune(currentRune) + } + + i += currentWidth + } + return buffer.String(), nil +} diff --git a/vendor/github.com/go-ldap/ldap/ldap.go b/vendor/github.com/go-ldap/ldap/ldap.go new file mode 100644 index 000000000..d7666676f --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/ldap.go @@ -0,0 +1,338 @@ +package ldap + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + + "gopkg.in/asn1-ber.v1" +) + +// LDAP Application Codes +const ( + ApplicationBindRequest = 0 + ApplicationBindResponse = 1 + ApplicationUnbindRequest = 2 + ApplicationSearchRequest = 3 + ApplicationSearchResultEntry = 4 + ApplicationSearchResultDone = 5 + ApplicationModifyRequest = 6 + ApplicationModifyResponse = 7 + ApplicationAddRequest = 8 + ApplicationAddResponse = 9 + ApplicationDelRequest = 10 + ApplicationDelResponse = 11 + ApplicationModifyDNRequest = 12 + ApplicationModifyDNResponse = 13 + ApplicationCompareRequest = 14 + ApplicationCompareResponse = 15 + ApplicationAbandonRequest = 16 + ApplicationSearchResultReference = 19 + ApplicationExtendedRequest = 23 + ApplicationExtendedResponse = 24 +) + +// ApplicationMap contains human readable descriptions of LDAP Application Codes +var ApplicationMap = map[uint8]string{ + ApplicationBindRequest: "Bind Request", + ApplicationBindResponse: "Bind Response", + ApplicationUnbindRequest: "Unbind Request", + ApplicationSearchRequest: "Search Request", + ApplicationSearchResultEntry: "Search Result Entry", + ApplicationSearchResultDone: "Search Result Done", + ApplicationModifyRequest: "Modify Request", + ApplicationModifyResponse: "Modify Response", + ApplicationAddRequest: "Add Request", + ApplicationAddResponse: "Add Response", + ApplicationDelRequest: "Del Request", + ApplicationDelResponse: "Del Response", + ApplicationModifyDNRequest: "Modify DN Request", + ApplicationModifyDNResponse: "Modify DN Response", + ApplicationCompareRequest: "Compare Request", + ApplicationCompareResponse: "Compare Response", + ApplicationAbandonRequest: "Abandon Request", + ApplicationSearchResultReference: "Search Result Reference", + ApplicationExtendedRequest: "Extended Request", + ApplicationExtendedResponse: "Extended Response", +} + +// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10) +const ( + BeheraPasswordExpired = 0 + BeheraAccountLocked = 1 + BeheraChangeAfterReset = 2 + BeheraPasswordModNotAllowed = 3 + BeheraMustSupplyOldPassword = 4 + BeheraInsufficientPasswordQuality = 5 + BeheraPasswordTooShort = 6 + BeheraPasswordTooYoung = 7 + BeheraPasswordInHistory = 8 +) + +// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes +var BeheraPasswordPolicyErrorMap = map[int8]string{ + BeheraPasswordExpired: "Password expired", + BeheraAccountLocked: "Account locked", + BeheraChangeAfterReset: "Password must be changed", + BeheraPasswordModNotAllowed: "Policy prevents password modification", + BeheraMustSupplyOldPassword: "Policy requires old password in order to change password", + BeheraInsufficientPasswordQuality: "Password fails quality checks", + BeheraPasswordTooShort: "Password is too short for policy", + BeheraPasswordTooYoung: "Password has been changed too recently", + BeheraPasswordInHistory: "New password is in list of old passwords", +} + +// Adds descriptions to an LDAP Response packet for debugging +func addLDAPDescriptions(packet *ber.Packet) (err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorDebugging, errors.New("ldap: cannot process packet to add descriptions")) + } + }() + packet.Description = "LDAP Response" + packet.Children[0].Description = "Message ID" + + application := uint8(packet.Children[1].Tag) + packet.Children[1].Description = ApplicationMap[application] + + switch application { + case ApplicationBindRequest: + err = addRequestDescriptions(packet) + case ApplicationBindResponse: + err = addDefaultLDAPResponseDescriptions(packet) + case ApplicationUnbindRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchResultEntry: + packet.Children[1].Children[0].Description = "Object Name" + packet.Children[1].Children[1].Description = "Attributes" + for _, child := range packet.Children[1].Children[1].Children { + child.Description = "Attribute" + child.Children[0].Description = "Attribute Name" + child.Children[1].Description = "Attribute Values" + for _, grandchild := range child.Children[1].Children { + grandchild.Description = "Attribute Value" + } + } + if len(packet.Children) == 3 { + err = addControlDescriptions(packet.Children[2]) + } + case ApplicationSearchResultDone: + err = addDefaultLDAPResponseDescriptions(packet) + case ApplicationModifyRequest: + err = addRequestDescriptions(packet) + case ApplicationModifyResponse: + case ApplicationAddRequest: + err = addRequestDescriptions(packet) + case ApplicationAddResponse: + case ApplicationDelRequest: + err = addRequestDescriptions(packet) + case ApplicationDelResponse: + case ApplicationModifyDNRequest: + err = addRequestDescriptions(packet) + case ApplicationModifyDNResponse: + case ApplicationCompareRequest: + err = addRequestDescriptions(packet) + case ApplicationCompareResponse: + case ApplicationAbandonRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchResultReference: + case ApplicationExtendedRequest: + err = addRequestDescriptions(packet) + case ApplicationExtendedResponse: + } + + return err +} + +func addControlDescriptions(packet *ber.Packet) error { + packet.Description = "Controls" + for _, child := range packet.Children { + var value *ber.Packet + controlType := "" + child.Description = "Control" + switch len(child.Children) { + case 0: + // at least one child is required for control type + return fmt.Errorf("at least one child is required for control type") + + case 1: + // just type, no criticality or value + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + + case 2: + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := child.Children[1].Value.(bool); ok { + child.Children[1].Description = "Criticality" + } else { + child.Children[1].Description = "Control Value" + value = child.Children[1] + } + + case 3: + // criticality and value present + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + child.Children[1].Description = "Criticality" + child.Children[2].Description = "Control Value" + value = child.Children[2] + + default: + // more than 3 children is invalid + return fmt.Errorf("more than 3 children for control packet found") + } + + if value == nil { + continue + } + switch controlType { + case ControlTypePaging: + value.Description += " (Paging)" + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes() + value.AppendChild(valueChildren) + } + value.Children[0].Description = "Real Search Control Value" + value.Children[0].Children[0].Description = "Paging Size" + value.Children[0].Children[1].Description = "Cookie" + + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera Draft)" + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + sequence := value.Children[0] + for _, child := range sequence.Children { + if child.Tag == 0 { + //Warning + warningPacket := child.Children[0] + packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int64) + if ok { + if warningPacket.Tag == 0 { + //timeBeforeExpiration + value.Description += " (TimeBeforeExpiration)" + warningPacket.Value = val + } else if warningPacket.Tag == 1 { + //graceAuthNsRemaining + value.Description += " (GraceAuthNsRemaining)" + warningPacket.Value = val + } + } + } else if child.Tag == 1 { + // Error + packet, err := ber.DecodePacketErr(child.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int8) + if !ok { + val = -1 + } + child.Description = "Error" + child.Value = val + } + } + } + } + return nil +} + +func addRequestDescriptions(packet *ber.Packet) error { + packet.Description = "LDAP Request" + packet.Children[0].Description = "Message ID" + packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)] + if len(packet.Children) == 3 { + return addControlDescriptions(packet.Children[2]) + } + return nil +} + +func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error { + err := GetLDAPError(packet) + packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[err.(*Error).ResultCode] + ")" + packet.Children[1].Children[1].Description = "Matched DN (" + err.(*Error).MatchedDN + ")" + packet.Children[1].Children[2].Description = "Error Message" + if len(packet.Children[1].Children) > 3 { + packet.Children[1].Children[3].Description = "Referral" + } + if len(packet.Children) == 3 { + return addControlDescriptions(packet.Children[2]) + } + return nil +} + +// DebugBinaryFile reads and prints packets from the given filename +func DebugBinaryFile(fileName string) error { + file, err := ioutil.ReadFile(fileName) + if err != nil { + return NewError(ErrorDebugging, err) + } + ber.PrintBytes(os.Stdout, file, "") + packet, err := ber.DecodePacketErr(file) + if err != nil { + return fmt.Errorf("failed to decode packet: %s", err) + } + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + + return nil +} + +var hex = "0123456789abcdef" + +func mustEscape(c byte) bool { + return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0 +} + +// EscapeFilter escapes from the provided LDAP filter string the special +// characters in the set `()*\` and those out of the range 0 < c < 0x80, +// as defined in RFC4515. +func EscapeFilter(filter string) string { + escape := 0 + for i := 0; i < len(filter); i++ { + if mustEscape(filter[i]) { + escape++ + } + } + if escape == 0 { + return filter + } + buf := make([]byte, len(filter)+escape*2) + for i, j := 0, 0; i < len(filter); i++ { + c := filter[i] + if mustEscape(c) { + buf[j+0] = '\\' + buf[j+1] = hex[c>>4] + buf[j+2] = hex[c&0xf] + j += 3 + } else { + buf[j] = c + j++ + } + } + return string(buf) +} diff --git a/vendor/github.com/go-ldap/ldap/moddn.go b/vendor/github.com/go-ldap/ldap/moddn.go new file mode 100644 index 000000000..803279d26 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/moddn.go @@ -0,0 +1,104 @@ +// Package ldap - moddn.go contains ModifyDN functionality +// +// https://tools.ietf.org/html/rfc4511 +// ModifyDNRequest ::= [APPLICATION 12] SEQUENCE { +// entry LDAPDN, +// newrdn RelativeLDAPDN, +// deleteoldrdn BOOLEAN, +// newSuperior [0] LDAPDN OPTIONAL } +// +// +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// ModifyDNRequest holds the request to modify a DN +type ModifyDNRequest struct { + DN string + NewRDN string + DeleteOldRDN bool + NewSuperior string +} + +// NewModifyDNRequest creates a new request which can be passed to ModifyDN(). +// +// To move an object in the tree, set the "newSup" to the new parent entry DN. Use an +// empty string for just changing the object's RDN. +// +// For moving the object without renaming, the "rdn" must be the first +// RDN of the given DN. +// +// A call like +// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "") +// will setup the request to just rename uid=someone,dc=example,dc=org to +// uid=newname,dc=example,dc=org. +func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest { + return &ModifyDNRequest{ + DN: dn, + NewRDN: rdn, + DeleteOldRDN: delOld, + NewSuperior: newSup, + } +} + +func (m ModifyDNRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN")) + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.NewRDN, "New RDN")) + request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, m.DeleteOldRDN, "Delete old RDN")) + if m.NewSuperior != "" { + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, m.NewSuperior, "New Superior")) + } + return request +} + +// ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument +// to NewModifyDNRequest() is not ""). +func (l *Conn) ModifyDN(m *ModifyDNRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(m.encode()) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationModifyDNResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/modify.go b/vendor/github.com/go-ldap/ldap/modify.go new file mode 100644 index 000000000..d83e6221f --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/modify.go @@ -0,0 +1,173 @@ +// File contains Modify functionality +// +// https://tools.ietf.org/html/rfc4511 +// +// ModifyRequest ::= [APPLICATION 6] SEQUENCE { +// object LDAPDN, +// changes SEQUENCE OF change SEQUENCE { +// operation ENUMERATED { +// add (0), +// delete (1), +// replace (2), +// ... }, +// modification PartialAttribute } } +// +// PartialAttribute ::= SEQUENCE { +// type AttributeDescription, +// vals SET OF value AttributeValue } +// +// AttributeDescription ::= LDAPString +// -- Constrained to +// -- [RFC4512] +// +// AttributeValue ::= OCTET STRING +// + +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// Change operation choices +const ( + AddAttribute = 0 + DeleteAttribute = 1 + ReplaceAttribute = 2 +) + +// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type PartialAttribute struct { + // Type is the type of the partial attribute + Type string + // Vals are the values of the partial attribute + Vals []string +} + +func (p *PartialAttribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range p.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// Change for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type Change struct { + // Operation is the type of change to be made + Operation uint + // Modification is the attribute to be modified + Modification PartialAttribute +} + +func (c *Change) encode() *ber.Packet { + change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change") + change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(c.Operation), "Operation")) + change.AppendChild(c.Modification.encode()) + return change +} + +// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type ModifyRequest struct { + // DN is the distinguishedName of the directory entry to modify + DN string + // Changes contain the attributes to modify + Changes []Change + // Controls hold optional controls to send with the request + Controls []Control +} + +// Add appends the given attribute to the list of changes to be made +func (m *ModifyRequest) Add(attrType string, attrVals []string) { + m.appendChange(AddAttribute, attrType, attrVals) +} + +// Delete appends the given attribute to the list of changes to be made +func (m *ModifyRequest) Delete(attrType string, attrVals []string) { + m.appendChange(DeleteAttribute, attrType, attrVals) +} + +// Replace appends the given attribute to the list of changes to be made +func (m *ModifyRequest) Replace(attrType string, attrVals []string) { + m.appendChange(ReplaceAttribute, attrType, attrVals) +} + +func (m *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) { + m.Changes = append(m.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}}) +} + +func (m ModifyRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN")) + changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes") + for _, change := range m.Changes { + changes.AppendChild(change.encode()) + } + request.AppendChild(changes) + return request +} + +// NewModifyRequest creates a modify request for the given DN +func NewModifyRequest( + dn string, + controls []Control, +) *ModifyRequest { + return &ModifyRequest{ + DN: dn, + Controls: controls, + } +} + +// Modify performs the ModifyRequest +func (l *Conn) Modify(modifyRequest *ModifyRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(modifyRequest.encode()) + if len(modifyRequest.Controls) > 0 { + packet.AppendChild(encodeControls(modifyRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationModifyResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/passwdmodify.go b/vendor/github.com/go-ldap/ldap/passwdmodify.go new file mode 100644 index 000000000..06bc21db1 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/passwdmodify.go @@ -0,0 +1,157 @@ +// This file contains the password modify extended operation as specified in rfc 3062 +// +// https://tools.ietf.org/html/rfc3062 +// + +package ldap + +import ( + "errors" + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +const ( + passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1" +) + +// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt +type PasswordModifyRequest struct { + // UserIdentity is an optional string representation of the user associated with the request. + // This string may or may not be an LDAPDN [RFC2253]. + // If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session + UserIdentity string + // OldPassword, if present, contains the user's current password + OldPassword string + // NewPassword, if present, contains the desired password for this user + NewPassword string +} + +// PasswordModifyResult holds the server response to a PasswordModifyRequest +type PasswordModifyResult struct { + // GeneratedPassword holds a password generated by the server, if present + GeneratedPassword string + // Referral are the returned referral + Referral string +} + +func (r *PasswordModifyRequest) encode() (*ber.Packet, error) { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation") + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID")) + extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request") + passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request") + if r.UserIdentity != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, r.UserIdentity, "User Identity")) + } + if r.OldPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, r.OldPassword, "Old Password")) + } + if r.NewPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, r.NewPassword, "New Password")) + } + + extendedRequestValue.AppendChild(passwordModifyRequestValue) + request.AppendChild(extendedRequestValue) + + return request, nil +} + +// NewPasswordModifyRequest creates a new PasswordModifyRequest +// +// According to the RFC 3602: +// userIdentity is a string representing the user associated with the request. +// This string may or may not be an LDAPDN (RFC 2253). +// If userIdentity is empty then the operation will act on the user associated +// with the session. +// +// oldPassword is the current user's password, it can be empty or it can be +// needed depending on the session user access rights (usually an administrator +// can change a user's password without knowing the current one) and the +// password policy (see pwdSafeModify password policy's attribute) +// +// newPassword is the desired user's password. If empty the server can return +// an error or generate a new password that will be available in the +// PasswordModifyResult.GeneratedPassword +// +func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest { + return &PasswordModifyRequest{ + UserIdentity: userIdentity, + OldPassword: oldPassword, + NewPassword: newPassword, + } +} + +// PasswordModify performs the modification request +func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + + encodedPasswordModifyRequest, err := passwordModifyRequest.encode() + if err != nil { + return nil, err + } + packet.AppendChild(encodedPasswordModifyRequest) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &PasswordModifyResult{} + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if packet == nil { + return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message")) + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationExtendedResponse { + err := GetLDAPError(packet) + if err != nil { + if IsErrorWithCode(err, LDAPResultReferral) { + for _, child := range packet.Children[1].Children { + if child.Tag == 3 { + result.Referral = child.Children[0].Value.(string) + } + } + } + return result, err + } + } else { + return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag)) + } + + extendedResponse := packet.Children[1] + for _, child := range extendedResponse.Children { + if child.Tag == 11 { + passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes()) + if len(passwordModifyResponseValue.Children) == 1 { + if passwordModifyResponseValue.Children[0].Tag == 0 { + result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes()) + } + } + } + } + + return result, nil +} diff --git a/vendor/github.com/go-ldap/ldap/search.go b/vendor/github.com/go-ldap/ldap/search.go new file mode 100644 index 000000000..3aa6dac0c --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/search.go @@ -0,0 +1,450 @@ +// File contains Search functionality +// +// https://tools.ietf.org/html/rfc4511 +// +// SearchRequest ::= [APPLICATION 3] SEQUENCE { +// baseObject LDAPDN, +// scope ENUMERATED { +// baseObject (0), +// singleLevel (1), +// wholeSubtree (2), +// ... }, +// derefAliases ENUMERATED { +// neverDerefAliases (0), +// derefInSearching (1), +// derefFindingBaseObj (2), +// derefAlways (3) }, +// sizeLimit INTEGER (0 .. maxInt), +// timeLimit INTEGER (0 .. maxInt), +// typesOnly BOOLEAN, +// filter Filter, +// attributes AttributeSelection } +// +// AttributeSelection ::= SEQUENCE OF selector LDAPString +// -- The LDAPString is constrained to +// -- in Section 4.5.1.8 +// +// Filter ::= CHOICE { +// and [0] SET SIZE (1..MAX) OF filter Filter, +// or [1] SET SIZE (1..MAX) OF filter Filter, +// not [2] Filter, +// equalityMatch [3] AttributeValueAssertion, +// substrings [4] SubstringFilter, +// greaterOrEqual [5] AttributeValueAssertion, +// lessOrEqual [6] AttributeValueAssertion, +// present [7] AttributeDescription, +// approxMatch [8] AttributeValueAssertion, +// extensibleMatch [9] MatchingRuleAssertion, +// ... } +// +// SubstringFilter ::= SEQUENCE { +// type AttributeDescription, +// substrings SEQUENCE SIZE (1..MAX) OF substring CHOICE { +// initial [0] AssertionValue, -- can occur at most once +// any [1] AssertionValue, +// final [2] AssertionValue } -- can occur at most once +// } +// +// MatchingRuleAssertion ::= SEQUENCE { +// matchingRule [1] MatchingRuleId OPTIONAL, +// type [2] AttributeDescription OPTIONAL, +// matchValue [3] AssertionValue, +// dnAttributes [4] BOOLEAN DEFAULT FALSE } +// +// + +package ldap + +import ( + "errors" + "fmt" + "sort" + "strings" + + "gopkg.in/asn1-ber.v1" +) + +// scope choices +const ( + ScopeBaseObject = 0 + ScopeSingleLevel = 1 + ScopeWholeSubtree = 2 +) + +// ScopeMap contains human readable descriptions of scope choices +var ScopeMap = map[int]string{ + ScopeBaseObject: "Base Object", + ScopeSingleLevel: "Single Level", + ScopeWholeSubtree: "Whole Subtree", +} + +// derefAliases +const ( + NeverDerefAliases = 0 + DerefInSearching = 1 + DerefFindingBaseObj = 2 + DerefAlways = 3 +) + +// DerefMap contains human readable descriptions of derefAliases choices +var DerefMap = map[int]string{ + NeverDerefAliases: "NeverDerefAliases", + DerefInSearching: "DerefInSearching", + DerefFindingBaseObj: "DerefFindingBaseObj", + DerefAlways: "DerefAlways", +} + +// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs. +// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the +// same input map of attributes, the output entry will contain the same order of attributes +func NewEntry(dn string, attributes map[string][]string) *Entry { + var attributeNames []string + for attributeName := range attributes { + attributeNames = append(attributeNames, attributeName) + } + sort.Strings(attributeNames) + + var encodedAttributes []*EntryAttribute + for _, attributeName := range attributeNames { + encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName])) + } + return &Entry{ + DN: dn, + Attributes: encodedAttributes, + } +} + +// Entry represents a single search result entry +type Entry struct { + // DN is the distinguished name of the entry + DN string + // Attributes are the returned attributes for the entry + Attributes []*EntryAttribute +} + +// GetAttributeValues returns the values for the named attribute, or an empty list +func (e *Entry) GetAttributeValues(attribute string) []string { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.Values + } + } + return []string{} +} + +// GetRawAttributeValues returns the byte values for the named attribute, or an empty list +func (e *Entry) GetRawAttributeValues(attribute string) [][]byte { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.ByteValues + } + } + return [][]byte{} +} + +// GetAttributeValue returns the first value for the named attribute, or "" +func (e *Entry) GetAttributeValue(attribute string) string { + values := e.GetAttributeValues(attribute) + if len(values) == 0 { + return "" + } + return values[0] +} + +// GetRawAttributeValue returns the first value for the named attribute, or an empty slice +func (e *Entry) GetRawAttributeValue(attribute string) []byte { + values := e.GetRawAttributeValues(attribute) + if len(values) == 0 { + return []byte{} + } + return values[0] +} + +// Print outputs a human-readable description +func (e *Entry) Print() { + fmt.Printf("DN: %s\n", e.DN) + for _, attr := range e.Attributes { + attr.Print() + } +} + +// PrettyPrint outputs a human-readable description indenting +func (e *Entry) PrettyPrint(indent int) { + fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN) + for _, attr := range e.Attributes { + attr.PrettyPrint(indent + 2) + } +} + +// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair +func NewEntryAttribute(name string, values []string) *EntryAttribute { + var bytes [][]byte + for _, value := range values { + bytes = append(bytes, []byte(value)) + } + return &EntryAttribute{ + Name: name, + Values: values, + ByteValues: bytes, + } +} + +// EntryAttribute holds a single attribute +type EntryAttribute struct { + // Name is the name of the attribute + Name string + // Values contain the string values of the attribute + Values []string + // ByteValues contain the raw values of the attribute + ByteValues [][]byte +} + +// Print outputs a human-readable description +func (e *EntryAttribute) Print() { + fmt.Printf("%s: %s\n", e.Name, e.Values) +} + +// PrettyPrint outputs a human-readable description with indenting +func (e *EntryAttribute) PrettyPrint(indent int) { + fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values) +} + +// SearchResult holds the server's response to a search request +type SearchResult struct { + // Entries are the returned entries + Entries []*Entry + // Referrals are the returned referrals + Referrals []string + // Controls are the returned controls + Controls []Control +} + +// Print outputs a human-readable description +func (s *SearchResult) Print() { + for _, entry := range s.Entries { + entry.Print() + } +} + +// PrettyPrint outputs a human-readable description with indenting +func (s *SearchResult) PrettyPrint(indent int) { + for _, entry := range s.Entries { + entry.PrettyPrint(indent) + } +} + +// SearchRequest represents a search request to send to the server +type SearchRequest struct { + BaseDN string + Scope int + DerefAliases int + SizeLimit int + TimeLimit int + TypesOnly bool + Filter string + Attributes []string + Controls []Control +} + +func (s *SearchRequest) encode() (*ber.Packet, error) { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, s.BaseDN, "Base DN")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.Scope), "Scope")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.DerefAliases), "Deref Aliases")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.SizeLimit), "Size Limit")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.TimeLimit), "Time Limit")) + request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, s.TypesOnly, "Types Only")) + // compile and encode filter + filterPacket, err := CompileFilter(s.Filter) + if err != nil { + return nil, err + } + request.AppendChild(filterPacket) + // encode attributes + attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range s.Attributes { + attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + } + request.AppendChild(attributesPacket) + return request, nil +} + +// NewSearchRequest creates a new search request +func NewSearchRequest( + BaseDN string, + Scope, DerefAliases, SizeLimit, TimeLimit int, + TypesOnly bool, + Filter string, + Attributes []string, + Controls []Control, +) *SearchRequest { + return &SearchRequest{ + BaseDN: BaseDN, + Scope: Scope, + DerefAliases: DerefAliases, + SizeLimit: SizeLimit, + TimeLimit: TimeLimit, + TypesOnly: TypesOnly, + Filter: Filter, + Attributes: Attributes, + Controls: Controls, + } +} + +// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the +// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically. +// The following four cases are possible given the arguments: +// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size +// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries +// A requested pagingSize of 0 is interpreted as no limit by LDAP servers. +func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) { + var pagingControl *ControlPaging + + control := FindControl(searchRequest.Controls, ControlTypePaging) + if control == nil { + pagingControl = NewControlPaging(pagingSize) + searchRequest.Controls = append(searchRequest.Controls, pagingControl) + } else { + castControl, ok := control.(*ControlPaging) + if !ok { + return nil, fmt.Errorf("expected paging control to be of type *ControlPaging, got %v", control) + } + if castControl.PagingSize != pagingSize { + return nil, fmt.Errorf("paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize) + } + pagingControl = castControl + } + + searchResult := new(SearchResult) + for { + result, err := l.Search(searchRequest) + l.Debug.Printf("Looking for Paging Control...") + if err != nil { + return searchResult, err + } + if result == nil { + return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received")) + } + + for _, entry := range result.Entries { + searchResult.Entries = append(searchResult.Entries, entry) + } + for _, referral := range result.Referrals { + searchResult.Referrals = append(searchResult.Referrals, referral) + } + for _, control := range result.Controls { + searchResult.Controls = append(searchResult.Controls, control) + } + + l.Debug.Printf("Looking for Paging Control...") + pagingResult := FindControl(result.Controls, ControlTypePaging) + if pagingResult == nil { + pagingControl = nil + l.Debug.Printf("Could not find paging control. Breaking...") + break + } + + cookie := pagingResult.(*ControlPaging).Cookie + if len(cookie) == 0 { + pagingControl = nil + l.Debug.Printf("Could not find cookie. Breaking...") + break + } + pagingControl.SetCookie(cookie) + } + + if pagingControl != nil { + l.Debug.Printf("Abandoning Paging...") + pagingControl.PagingSize = 0 + l.Search(searchRequest) + } + + return searchResult, nil +} + +// Search performs the given search request +func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + // encode search request + encodedSearchRequest, err := searchRequest.encode() + if err != nil { + return nil, err + } + packet.AppendChild(encodedSearchRequest) + // encode search controls + if len(searchRequest.Controls) > 0 { + packet.AppendChild(encodeControls(searchRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &SearchResult{ + Entries: make([]*Entry, 0), + Referrals: make([]string, 0), + Controls: make([]Control, 0)} + + foundSearchResultDone := false + for !foundSearchResultDone { + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + switch packet.Children[1].Tag { + case 4: + entry := new(Entry) + entry.DN = packet.Children[1].Children[0].Value.(string) + for _, child := range packet.Children[1].Children[1].Children { + attr := new(EntryAttribute) + attr.Name = child.Children[0].Value.(string) + for _, value := range child.Children[1].Children { + attr.Values = append(attr.Values, value.Value.(string)) + attr.ByteValues = append(attr.ByteValues, value.ByteValue) + } + entry.Attributes = append(entry.Attributes, attr) + } + result.Entries = append(result.Entries, entry) + case 5: + err := GetLDAPError(packet) + if err != nil { + return nil, err + } + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + decodedChild, err := DecodeControl(child) + if err != nil { + return nil, fmt.Errorf("failed to decode child control: %s", err) + } + result.Controls = append(result.Controls, decodedChild) + } + } + foundSearchResultDone = true + case 19: + result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string)) + } + } + l.Debug.Printf("%d: returning", msgCtx.id) + return result, nil +} diff --git a/vendor/github.com/go-logr/zapr/Gopkg.lock b/vendor/github.com/go-logr/zapr/Gopkg.lock index 4e0d08eca..8da0a8f76 100644 --- a/vendor/github.com/go-logr/zapr/Gopkg.lock +++ b/vendor/github.com/go-logr/zapr/Gopkg.lock @@ -2,24 +2,31 @@ [[projects]] - branch = "master" + digest = "1:edd2fa4578eb086265db78a9201d15e76b298dfd0d5c379da83e9c61712cf6df" name = "github.com/go-logr/logr" packages = ["."] + pruneopts = "UT" revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" + version = "v0.1.0" [[projects]] + digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" name = "go.uber.org/atomic" packages = ["."] + pruneopts = "UT" revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" version = "v1.3.2" [[projects]] + digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" name = "go.uber.org/multierr" packages = ["."] + pruneopts = "UT" revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" version = "v1.1.0" [[projects]] + digest = "1:9580b1b079114140ade8cec957685344d14f00119e0241f6b369633cb346eeb3" name = "go.uber.org/zap" packages = [ ".", @@ -27,14 +34,19 @@ "internal/bufferpool", "internal/color", "internal/exit", - "zapcore" + "zapcore", ] + pruneopts = "UT" revision = "eeedf312bc6c57391d84767a4cd413f02a917974" version = "v1.8.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "9b4b2f75bc457ddc6ebb276c32fc8e30525b6133ee76886c804ba0a6b815abc2" + input-imports = [ + "github.com/go-logr/logr", + "go.uber.org/zap", + "go.uber.org/zap/zapcore", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/go-logr/zapr/Gopkg.toml b/vendor/github.com/go-logr/zapr/Gopkg.toml index 78944774c..ae475d72e 100644 --- a/vendor/github.com/go-logr/zapr/Gopkg.toml +++ b/vendor/github.com/go-logr/zapr/Gopkg.toml @@ -26,8 +26,8 @@ [[constraint]] - branch = "master" name = "github.com/go-logr/logr" + version = "0.1.0" [[constraint]] name = "go.uber.org/zap" diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml index 2ee3ab975..3436c4590 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.travis.yml @@ -1,15 +1,15 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- "1.8" -- "1.9" -- "1.10" +- '1.9' +- 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify/assert - go get -u github.com/go-openapi/swag -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonpointer/go.mod b/vendor/github.com/go-openapi/jsonpointer/go.mod new file mode 100644 index 000000000..eb4d623c5 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/go.mod @@ -0,0 +1,10 @@ +module github.com/go-openapi/jsonpointer + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/swag v0.17.0 + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + gopkg.in/yaml.v2 v2.2.1 // indirect +) diff --git a/vendor/github.com/go-openapi/jsonpointer/go.sum b/vendor/github.com/go-openapi/jsonpointer/go.sum new file mode 100644 index 000000000..c71f4d7a2 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= +github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml index 7a261a651..40034d28d 100644 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ b/vendor/github.com/go-openapi/jsonreference/.travis.yml @@ -1,16 +1,16 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- "1.8" -- "1.9" -- "1.10" +- '1.9' +- 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify/assert - go get -u github.com/PuerkitoBio/purell - go get -u github.com/go-openapi/jsonpointer -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod new file mode 100644 index 000000000..6d15a7050 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/go.mod @@ -0,0 +1,15 @@ +module github.com/go-openapi/jsonreference + +require ( + github.com/PuerkitoBio/purell v1.1.0 + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/jsonpointer v0.17.0 + github.com/go-openapi/swag v0.17.0 // indirect + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/yaml.v2 v2.2.1 // indirect +) diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum new file mode 100644 index 000000000..ec9bdbc28 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/go.sum @@ -0,0 +1,20 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= +github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml index 14721a25d..48c9813ff 100644 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -18,3 +18,6 @@ linters: disable: - maligned - unparam + - lll + - gochecknoinits + - gochecknoglobals diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml index 390aa8619..a4f03484b 100644 --- a/vendor/github.com/go-openapi/spec/.travis.yml +++ b/vendor/github.com/go-openapi/spec/.travis.yml @@ -1,18 +1,18 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- 1.7 -- 1.8 -- 1.9 +- '1.9' +- 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify - go get -u github.com/go-openapi/swag - go get -u gopkg.in/yaml.v2 - go get -u github.com/go-openapi/jsonpointer - go get -u github.com/go-openapi/jsonreference -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index e3b1ce9a8..456a9dd7e 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -32,9 +32,10 @@ import ( // ExpandOptions provides options for expand. type ExpandOptions struct { - RelativeBase string - SkipSchemas bool - ContinueOnError bool + RelativeBase string + SkipSchemas bool + ContinueOnError bool + AbsoluteCircularRef bool } // ResolutionCache a cache for resolving urls @@ -444,7 +445,7 @@ func normalizeFileRef(ref *Ref, relativeBase string) *Ref { return &r } - debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, ref.GetURL().String()) + debugLog("normalizing %s against %s", ref.String(), relativeBase) s := normalizePaths(ref.String(), relativeBase) r, _ := NewRef(s) @@ -607,25 +608,35 @@ func shouldStopOnError(err error, opts *ExpandOptions) bool { return false } -// ExpandSchema expands the refs in the schema object with reference to the root object -// go-openapi/validate uses this function -// notice that it is impossible to reference a json scema in a different file other than root -func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - // Only save the root to a tmp file if it isn't nil. - var base string +// baseForRoot loads in the cache the root document and produces a fake "root" base path entry +// for further $ref resolution +func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + const rootBase = "root" if root != nil { - base, _ = absPath("root") + base, _ := absPath(rootBase) + normalizedBase := normalizeAbsPath(base) + debugLog("setting root doc in cache at: %s", normalizedBase) if cache == nil { cache = resCache } - cache.Set(normalizeAbsPath(base), root) - base = "root" + cache.Set(normalizedBase, root) + return rootBase } + return "" +} +// ExpandSchema expands the refs in the schema object with reference to the root object +// go-openapi/validate uses this function +// notice that it is impossible to reference a json schema in a different file other than root +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { opts := &ExpandOptions{ - RelativeBase: base, + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), SkipSchemas: false, ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, } return ExpandSchemaWithBasePath(schema, cache, opts) } @@ -734,6 +745,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba otherwise the basePath should inherit the parent's */ // important: ID can be relative path if target.ID != "" { + debugLog("schema has ID: %s", target.ID) // handling the case when id is a folder // remember that basePath has to be a file refPath := target.ID @@ -757,11 +769,13 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba // this means there is a cycle in the recursion tree: return the Ref // - circular refs cannot be expanded. We leave them as ref. // - denormalization means that a new local file ref is set relative to the original basePath - debugLog("shortcut circular ref") - debugLog("basePath: %s", basePath) - debugLog("normalized basePath: %s", normalizedBasePath) - debugLog("normalized ref: %s", normalizedRef.String()) - target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath) + debugLog("shortcut circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath) + } else { + target.Ref = *normalizedRef + } return &target, nil } @@ -1015,19 +1029,40 @@ func transitiveResolver(basePath string, ref Ref, resolver *schemaLoader) (*sche return resolver, nil } +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err + } + + return expandResponse(response, resolver, opts.RelativeBase) +} + // ExpandResponse expands a response based on a basepath // This is the exported version of expandResponse // all refs inside response will be resolved relative to basePath func ExpandResponse(response *Response, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } opts := &ExpandOptions{ - RelativeBase: basePath, + RelativeBase: specBasePath, } resolver, err := defaultSchemaLoader(nil, opts, nil, nil) if err != nil { return err } - return expandResponse(response, resolver, basePath) + return expandResponse(response, resolver, opts.RelativeBase) } func derefResponse(response *Response, parentRefs []string, resolver *schemaLoader, basePath string) error { @@ -1058,7 +1093,6 @@ func expandResponse(response *Response, resolver *schemaLoader, basePath string) if response == nil { return nil } - parentRefs := []string{} if err := derefResponse(response, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err @@ -1094,19 +1128,40 @@ func expandResponse(response *Response, resolver *schemaLoader, basePath string) return nil } +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err + } + + return expandParameter(parameter, resolver, opts.RelativeBase) +} + // ExpandParameter expands a parameter based on a basepath // This is the exported version of expandParameter // all refs inside parameter will be resolved relative to basePath func ExpandParameter(parameter *Parameter, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } opts := &ExpandOptions{ - RelativeBase: basePath, + RelativeBase: specBasePath, } resolver, err := defaultSchemaLoader(nil, opts, nil, nil) if err != nil { return err } - return expandParameter(parameter, resolver, basePath) + return expandParameter(parameter, resolver, opts.RelativeBase) } func derefParameter(parameter *Parameter, parentRefs []string, resolver *schemaLoader, basePath string) error { diff --git a/vendor/github.com/go-openapi/spec/go.mod b/vendor/github.com/go-openapi/spec/go.mod new file mode 100644 index 000000000..5af64c10b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/go.mod @@ -0,0 +1,16 @@ +module github.com/go-openapi/spec + +require ( + github.com/PuerkitoBio/purell v1.1.0 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-openapi/jsonpointer v0.17.0 + github.com/go-openapi/jsonreference v0.17.0 + github.com/go-openapi/swag v0.17.0 + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/yaml.v2 v2.2.1 +) diff --git a/vendor/github.com/go-openapi/spec/go.sum b/vendor/github.com/go-openapi/spec/go.sum new file mode 100644 index 000000000..ab6bfb608 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/go.sum @@ -0,0 +1,22 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.17.0 h1:d/o7/fsLWWQZACbihvZxcyLQ59jfUVs7WOJv/ak7T7A= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= +github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index 32f7d8fe7..344e31776 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -15,12 +15,20 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) +func init() { + //gob.Register(map[string][]interface{}{}) + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + // OperationProps describes an operation type OperationProps struct { Description string `json:"description,omitempty"` @@ -257,3 +265,126 @@ func (o *Operation) RespondsWith(code int, response *Response) *Operation { o.Responses.StatusCodeResponses[code] = *response return o } + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 1405bfd8e..08ff869b2 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -15,6 +15,8 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" "net/http" "os" @@ -148,6 +150,28 @@ func (r *Ref) UnmarshalJSON(d []byte) error { return r.fromMap(v) } +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + func (r *Ref) fromMap(v map[string]interface{}) error { if v == nil { return nil diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 4029779e1..625c3d6af 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -10,10 +10,13 @@ linters-settings: dupl: threshold: 100 goconst: - min-len: 2 + min-len: 3 min-occurrences: 2 linters: enable-all: true disable: - maligned + - lll + - gochecknoinits + - gochecknoglobals diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml index c41437b7a..bd3a2e527 100644 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ b/vendor/github.com/go-openapi/swag/.travis.yml @@ -1,16 +1,16 @@ -language: go +after_success: +- bash <(curl -s https://codecov.io/bash) go: -- 1.8 -- 1.9.x +- '1.9' - 1.10.x +- 1.11.x install: - go get -u github.com/stretchr/testify - go get -u github.com/mailru/easyjson - go get -u gopkg.in/yaml.v2 -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... -after_success: -- bash <(curl -s https://codecov.io/bash) +language: go notifications: slack: secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 90f5c9b7c..459a3e18d 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -5,11 +5,19 @@ [![GolangCI](https://golangci.com/badges/github.com/go-openapi/swag.svg)](https://golangci.com) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) -Contains a bunch of helper functions: +Contains a bunch of helper functions for go-openapi and go-swagger projects. -* convert between value and pointers for builtins -* convert from string to builtin +You may also use it standalone for your projects. + +* convert between value and pointers for builtin types +* convert from string to builtin types (wraps strconv) * fast json concatenation * search in path * load from file or http * name mangling + + +This repo has only few dependencies outside of the standard library: + +* JSON utilities depend on github.com/mailru/easyjson +* YAML utilities depend on gopkg.in/yaml.v2 diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go index ba269122a..4e446ff70 100644 --- a/vendor/github.com/go-openapi/swag/convert.go +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -22,8 +22,9 @@ import ( // same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + epsilon float64 = 1e-9 ) // IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive @@ -31,21 +32,39 @@ func IsFloat64AJSONInteger(f float64) bool { if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { return false } + fa := math.Abs(f) + g := float64(uint64(f)) + ga := math.Abs(g) - return f == float64(int64(f)) || f == float64(uint64(f)) + diff := math.Abs(f - g) + + // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases + if f == g { // best case + return true + } else if f == float64(int64(f)) || f == float64(uint64(f)) { // optimistic case + return true + } else if f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64 { // very close to 0 values + return diff < (epsilon * math.SmallestNonzeroFloat64) + } + // check the relative error + return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon } -var evaluatesAsTrue = map[string]struct{}{ - "true": {}, - "1": {}, - "yes": {}, - "ok": {}, - "y": {}, - "on": {}, - "selected": {}, - "checked": {}, - "t": {}, - "enabled": {}, +var evaluatesAsTrue map[string]struct{} + +func init() { + evaluatesAsTrue = map[string]struct{}{ + "true": {}, + "1": {}, + "yes": {}, + "ok": {}, + "y": {}, + "on": {}, + "selected": {}, + "checked": {}, + "t": {}, + "enabled": {}, + } } // ConvertBool turn a string into a boolean diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go new file mode 100644 index 000000000..e01e1a023 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + + * convert between value and pointers for builtin types + * convert from string to builtin types (wraps strconv) + * fast json concatenation + * search in path + * load from file or http + * name mangling + + +This repo has only few dependencies outside of the standard library: + + * JSON utilities depend on github.com/mailru/easyjson + * YAML utilities depend on gopkg.in/yaml.v2 +*/ +package swag diff --git a/vendor/github.com/go-openapi/swag/go.mod b/vendor/github.com/go-openapi/swag/go.mod new file mode 100644 index 000000000..9eb936a19 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.mod @@ -0,0 +1,9 @@ +module github.com/go-openapi/swag + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + gopkg.in/yaml.v2 v2.2.1 +) diff --git a/vendor/github.com/go-openapi/swag/go.sum b/vendor/github.com/go-openapi/swag/go.sum new file mode 100644 index 000000000..d6e717bd4 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go index 60e55ecd3..33da5e4e7 100644 --- a/vendor/github.com/go-openapi/swag/json.go +++ b/vendor/github.com/go-openapi/swag/json.go @@ -21,7 +21,6 @@ import ( "reflect" "strings" "sync" - "sync/atomic" "github.com/mailru/easyjson/jlexer" "github.com/mailru/easyjson/jwriter" @@ -35,14 +34,13 @@ var DefaultJSONNameProvider = NewNameProvider() const comma = byte(',') -var atomicClosers atomic.Value +var closers map[byte]byte func init() { - atomicClosers.Store( - map[byte]byte{ - '{': '}', - '[': ']', - }) + closers = map[byte]byte{ + '{': '}', + '[': ']', + } } type ejMarshaler interface { @@ -113,7 +111,6 @@ func ConcatJSON(blobs ...[]byte) []byte { var opening, closing byte var idx, a int buf := bytes.NewBuffer(nil) - closers := atomicClosers.Load().(map[byte]byte) for i, b := range blobs[:last+1] { if b == nil || bytes.Equal(b, nullJSON) { @@ -264,7 +261,7 @@ func (n *NameProvider) GetJSONNames(subject interface{}) []string { names = n.makeNameIndex(tpe) } - var res []string + res := make([]string, 0, len(names.jsonNames)) for k := range names.jsonNames { res = append(res, k) } diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go index 8323fa37b..821235f84 100644 --- a/vendor/github.com/go-openapi/swag/net.go +++ b/vendor/github.com/go-openapi/swag/net.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package swag import ( diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go index ef48086db..c2e686d31 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go index 567680c79..9db23c095 100644 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go index 860bb2bbb..6607f3393 100644 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build !go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go index 72c48ae75..9c96ab90f 100644 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build !go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 6eba19c5c..164c96762 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -33,6 +33,12 @@ var once sync.Once var isInitialism func(string) bool +var ( + splitRex1 *regexp.Regexp + splitRex2 *regexp.Regexp + splitReplacer *strings.Replacer +) + func init() { // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 var configuredInitialisms = map[string]bool{ @@ -88,7 +94,15 @@ func ensureSorted() { initialisms = commonInitialisms.sorted() } -// JoinByFormat joins a string array by a known format: +const ( + //collectionFormatComma = "csv" + collectionFormatSpace = "ssv" + collectionFormatTab = "tsv" + collectionFormatPipe = "pipes" + collectionFormatMulti = "multi" +) + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): // ssv: space separated value // tsv: tab separated value // pipes: pipe (|) separated value @@ -99,13 +113,13 @@ func JoinByFormat(data []string, format string) []string { } var sep string switch format { - case "ssv": + case collectionFormatSpace: sep = " " - case "tsv": + case collectionFormatTab: sep = "\t" - case "pipes": + case collectionFormatPipe: sep = "|" - case "multi": + case collectionFormatMulti: return data default: sep = "," @@ -118,19 +132,20 @@ func JoinByFormat(data []string, format string) []string { // tsv: tab separated value // pipes: pipe (|) separated value // csv: comma separated value (default) +// func SplitByFormat(data, format string) []string { if data == "" { return nil } var sep string switch format { - case "ssv": + case collectionFormatSpace: sep = " " - case "tsv": + case collectionFormatTab: sep = "\t" - case "pipes": + case collectionFormatPipe: sep = "|" - case "multi": + case collectionFormatMulti: return nil default: sep = "," @@ -157,37 +172,37 @@ func (s byLength) Less(i, j int) bool { } // Prepares strings by splitting by caps, spaces, dashes, and underscore -func split(str string) (words []string) { - repl := strings.NewReplacer( - "@", "At ", - "&", "And ", - "|", "Pipe ", - "$", "Dollar ", - "!", "Bang ", - "-", " ", - "_", " ", - ) - - rex1 := regexp.MustCompile(`(\p{Lu})`) - rex2 := regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`) +func split(str string) []string { + // check if consecutive single char things make up an initialism + once.Do(func() { + splitRex1 = regexp.MustCompile(`(\p{Lu})`) + splitRex2 = regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`) + splitReplacer = strings.NewReplacer( + "@", "At ", + "&", "And ", + "|", "Pipe ", + "$", "Dollar ", + "!", "Bang ", + "-", " ", + "_", " ", + ) + ensureSorted() + }) str = trim(str) // Convert dash and underscore to spaces - str = repl.Replace(str) + str = splitReplacer.Replace(str) // Split when uppercase is found (needed for Snake) - str = rex1.ReplaceAllString(str, " $1") + str = splitRex1.ReplaceAllString(str, " $1") - // check if consecutive single char things make up an initialism - once.Do(ensureSorted) for _, k := range initialisms { - str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1) + str = strings.Replace(str, splitRex1.ReplaceAllString(k, " $1"), " "+k, -1) } // Get the final list of words - words = rex2.FindAllString(str, -1) - - return + //words = rex2.FindAllString(str, -1) + return splitRex2.FindAllString(str, -1) } // Removes leading whitespaces @@ -219,9 +234,10 @@ func Camelize(word string) (camelized string) { // ToFileName lowercases and underscores a go type name func ToFileName(name string) string { - var out []string + in := split(name) + out := make([]string, 0, len(in)) - for _, w := range split(name) { + for _, w := range in { out = append(out, lower(w)) } @@ -230,8 +246,10 @@ func ToFileName(name string) string { // ToCommandName lowercases and underscores a go type name func ToCommandName(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { out = append(out, lower(w)) } return strings.Join(out, "-") @@ -239,8 +257,10 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { if !isInitialism(upper(w)) { out = append(out, lower(w)) } else { @@ -252,8 +272,10 @@ func ToHumanNameLower(name string) string { // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { uw := upper(w) if !isInitialism(uw) { out = append(out, upper(w[:1])+lower(w[1:])) @@ -266,8 +288,10 @@ func ToHumanNameTitle(name string) string { // ToJSONName camelcases a name which can be underscored or pascal cased func ToJSONName(name string) string { - var out []string - for i, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for i, w := range in { if i == 0 { out = append(out, lower(w)) continue @@ -291,8 +315,10 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - var out []string - for _, w := range split(name) { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { uw := upper(w) mod := int(math.Min(float64(len(uw)), 2)) if !isInitialism(uw) && !isInitialism(uw[:len(uw)-mod]) { @@ -314,6 +340,16 @@ func ToGoName(name string) string { return result } +// ContainsStrings searches a slice of strings for a case-sensitive match +func ContainsStrings(coll []string, item string) bool { + for _, a := range coll { + if a == item { + return true + } + } + return false +} + // ContainsStringsCI searches a slice of strings for a case-insensitive match func ContainsStringsCI(coll []string, item string) bool { for _, a := range coll { diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f458c81a8..435e2948e 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -22,7 +22,6 @@ import ( "github.com/mailru/easyjson/jlexer" "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v2" ) diff --git a/vendor/github.com/go-redis/redis/.gitignore b/vendor/github.com/go-redis/redis/.gitignore new file mode 100644 index 000000000..ebfe903bc --- /dev/null +++ b/vendor/github.com/go-redis/redis/.gitignore @@ -0,0 +1,2 @@ +*.rdb +testdata/*/ diff --git a/vendor/github.com/go-redis/redis/.travis.yml b/vendor/github.com/go-redis/redis/.travis.yml new file mode 100644 index 000000000..6b110b4cb --- /dev/null +++ b/vendor/github.com/go-redis/redis/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go + +services: + - redis-server + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get github.com/onsi/ginkgo + - go get github.com/onsi/gomega diff --git a/vendor/github.com/go-redis/redis/CHANGELOG.md b/vendor/github.com/go-redis/redis/CHANGELOG.md new file mode 100644 index 000000000..19645661a --- /dev/null +++ b/vendor/github.com/go-redis/redis/CHANGELOG.md @@ -0,0 +1,25 @@ +# Changelog + +## Unreleased + +- Cluster and Ring pipelines process commands for each node in its own goroutine. + +## 6.14 + +- Added Options.MinIdleConns. +- Added Options.MaxConnAge. +- PoolStats.FreeConns is renamed to PoolStats.IdleConns. +- Add Client.Do to simplify creating custom commands. +- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. +- Lower memory usage. + +## v6.13 + +- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards. +- Cluster client was optimized to use much less memory when reloading cluster state. +- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead. +- Dialer.KeepAlive is set to 5 minutes by default. + +## v6.12 + +- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/vendor/github.com/go-redis/redis/LICENSE b/vendor/github.com/go-redis/redis/LICENSE new file mode 100644 index 000000000..298bed9be --- /dev/null +++ b/vendor/github.com/go-redis/redis/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/go-redis/redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-redis/redis/Makefile b/vendor/github.com/go-redis/redis/Makefile new file mode 100644 index 000000000..fa3b4e004 --- /dev/null +++ b/vendor/github.com/go-redis/redis/Makefile @@ -0,0 +1,22 @@ +all: testdeps + go test ./... + go test ./... -short -race + env GOOS=linux GOARCH=386 go test ./... + go vet + go get github.com/gordonklaus/ineffassign + ineffassign . + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- https://github.com/antirez/redis/archive/5.0.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + sed -i.bak 's/libjemalloc.a/libjemalloc.a -lrt/g' $ +} + +func ExampleClient() { + err := client.Set("key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := client.Get("key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := client.Get("key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exist") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exist +} +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package. + +## Look and feel + +Some corner cases: + +```go +// SET key value EX 10 NX +set, err := client.SetNX("key", "value", 10*time.Second).Result() + +// SORT list LIMIT 0 2 ASC +vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + +// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 +vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, +}).Result() + +// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM +vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() + +// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" +vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() +``` + +## Benchmark + +go-redis vs redigo: + +``` +BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op +BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op +``` + +Redis Cluster: + +``` +BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op +BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op +``` + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang msgpack](https://github.com/vmihailenco/msgpack) +- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/vendor/github.com/go-redis/redis/cluster.go b/vendor/github.com/go-redis/redis/cluster.go new file mode 100644 index 000000000..0cecc62c4 --- /dev/null +++ b/vendor/github.com/go-redis/redis/cluster.go @@ -0,0 +1,1621 @@ +package redis + +import ( + "context" + "crypto/tls" + "fmt" + "math" + "math/rand" + "net" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/hashtag" + "github.com/go-redis/redis/internal/pool" + "github.com/go-redis/redis/internal/proto" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 8 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func() ([]ClusterSlot, error) + + // Optional hook that is called when a new node is created. + OnNewNode func(*Client) + + // Following options are copied from Options struct. + + OnConnect func(*Conn) error + + Password string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolSize applies per cluster node and not for the whole cluster. + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 8 + } + + if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil { + opt.ReadOnly = true + } + + if opt.PoolSize == 0 { + opt.PoolSize = 5 * runtime.NumCPU() + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } +} + +func (opt *ClusterOptions) clientOptions() *Options { + const disableIdleCheck = -1 + + return &Options{ + OnConnect: opt.OnConnect, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + Password: opt.Password, + readOnly: opt.ReadOnly, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: disableIdleCheck, + + TLSConfig: opt.TLSConfig, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + loading uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + if clOpt.OnNewNode != nil { + clOpt.OnNewNode(node.Client) + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const probes = 10 + + var latency uint32 + for i := 0; i < probes; i++ { + start := time.Now() + n.Client.Ping() + probe := uint32(time.Since(start) / time.Microsecond) + latency = (latency + probe) / 2 + } + atomic.StoreUint32(&n.latency, latency) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsLoading() { + atomic.StoreUint32(&n.loading, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Loading() bool { + const minute = int64(time.Minute / time.Second) + + loading := atomic.LoadUint32(&n.loading) + if loading == 0 { + return false + } + if time.Now().Unix()-int64(loading) < minute { + return true + } + atomic.StoreUint32(&n.loading, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + allAddrs []string + allNodes map[string]*clusterNode + clusterAddrs []string + closed bool + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + allAddrs: opt.Addrs, + allNodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.allNodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.allNodes = nil + c.clusterAddrs = nil + + return firstErr +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + c.mu.RLock() + closed := c.closed + if !closed { + if len(c.clusterAddrs) > 0 { + addrs = c.clusterAddrs + } else { + addrs = c.allAddrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + var collected []*clusterNode + c.mu.Lock() + for addr, node := range c.allNodes { + if node.Generation() >= generation { + continue + } + + c.clusterAddrs = remove(c.clusterAddrs, addr) + delete(c.allNodes, addr) + collected = append(collected, node) + } + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) Get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.allNodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + node, err := c.Get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.allNodes[addr] + if ok { + return node, err + } + + node = newClusterNode(c.opt, addr) + + c.allAddrs = appendIfNotExists(c.allAddrs, addr) + c.clusterAddrs = append(c.clusterAddrs, addr) + c.allNodes[addr] = node + + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.allNodes)) + for _, node := range c.allNodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.GetOrCreate(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + originHost, _, _ := net.SplitHostPort(origin) + isLoopbackOrigin := isLoopback(originHost) + + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin { + addr = replaceLoopbackHost(addr, originHost) + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func replaceLoopbackHost(nodeAddr, originHost string) string { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return nodeAddr + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return nodeAddr + } + + if !nodeIP.IsLoopback() { + return nodeAddr + } + + // Use origin host which is not loopback and node port. + return net.JoinHostPort(originHost, nodePort) +} + +func isLoopback(host string) bool { + ip := net.ParseIP(host) + if ip == nil { + return true + } + return ip.IsLoopback() +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Loading() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Loading() { + return slave, nil + } + } + + // All slaves are loading - use master. + return nodes[0], nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + const threshold = time.Millisecond + + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Loading() { + continue + } + if node == nil || node.Latency()-n.Latency() > threshold { + node = n + } + } + return node, nil +} + +func (c *clusterState) slotRandomNode(slot int) *clusterNode { + nodes := c.slotNodes(slot) + n := rand.Intn(len(nodes)) + return nodes[n] +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func() (*clusterState, error) + + state atomic.Value + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload() (*clusterState, error) { + state, err := c.load() + if err != nil { + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + _, err := c.Reload() + if err != nil { + return + } + time.Sleep(100 * time.Millisecond) + }() +} + +func (c *clusterStateHolder) Get() (*clusterState, error) { + v := c.state.Load() + if v != nil { + state := v.(*clusterState) + if time.Since(state.createdAt) > time.Minute { + c.LazyReload() + } + return state, nil + } + return c.Reload() +} + +func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) { + state, err := c.Reload() + if err == nil { + return state, nil + } + return c.Get() +} + +//------------------------------------------------------------------------------ + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + cmdable + + ctx context.Context + + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder + cmdsInfoCache *cmdsInfoCache + + process func(Cmder) error + processPipeline func([]Cmder) error + processTxPipeline func([]Cmder) error +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + } + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + + c.process = c.defaultProcess + c.processPipeline = c.defaultProcessPipeline + c.processTxPipeline = c.defaultProcessTxPipeline + + c.init() + if opt.IdleCheckFrequency > 0 { + go c.reaper(opt.IdleCheckFrequency) + } + + return c +} + +func (c *ClusterClient) init() { + c.cmdable.setProcessor(c.Process) +} + +// ReloadState reloads cluster state. If available it calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState() error { + _, err := c.state.Reload() + return err +} + +func (c *ClusterClient) Context() context.Context { + if c.ctx != nil { + return c.ctx + } + return context.Background() +} + +func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { + if ctx == nil { + panic("nil context") + } + c2 := c.copy() + c2.ctx = ctx + return c2 +} + +func (c *ClusterClient) copy() *ClusterClient { + cp := *c + cp.init() + return &cp +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) { + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.Get(addr) + if err != nil { + return nil, err + } + if node == nil { + continue + } + + info, err := node.Client.Command().Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get() + if err != nil { + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logf("info for cmd=%s not found", name) + } + return info +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdSlot(cmd Cmder) int { + args := cmd.Args() + if args[0] == "cluster" && args[1] == "getkeysinslot" { + return args[2].(int) + } + + cmdInfo := c.cmdInfo(cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return 0, nil, err + } + + cmdInfo := c.cmdInfo(cmd.Name()) + slot := c.cmdSlot(cmd) + + if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { + if c.opt.RouteByLatency { + node, err := state.slotClosestNode(slot) + return slot, node, err + } + + if c.opt.RouteRandomly { + node := state.slotRandomNode(slot) + return slot, node, nil + } + + node, err := state.slotSlaveNode(slot) + return slot, node, err + } + + node, err := state.slotMasterNode(slot) + return slot, node, err +} + +func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return nil, err + } + + nodes := state.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + err = node.Client.Watch(fn, keys...) + if err == nil { + break + } + if err != Nil { + c.state.LazyReload() + } + + moved, ask, addr := internal.IsMovedError(err) + if moved || ask { + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if err == pool.ErrClosed || internal.IsReadOnlyError(err) { + node, err = c.slotMasterNode(slot) + if err != nil { + return err + } + continue + } + + if internal.IsRetryableError(err, true) { + continue + } + + return err + } + + return err +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *ClusterClient) Do(args ...interface{}) *Cmd { + cmd := NewCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *ClusterClient) WrapProcess( + fn func(oldProcess func(Cmder) error) func(Cmder) error, +) { + c.process = fn(c.process) +} + +func (c *ClusterClient) Process(cmd Cmder) error { + return c.process(cmd) +} + +func (c *ClusterClient) defaultProcess(cmd Cmder) error { + var node *clusterNode + var ask bool + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + if node == nil { + var err error + _, node, err = c.cmdSlotAndNode(cmd) + if err != nil { + cmd.setErr(err) + break + } + } + + var err error + if ask { + pipe := node.Client.Pipeline() + _ = pipe.Process(NewCmd("ASKING")) + _ = pipe.Process(cmd) + _, err = pipe.Exec() + _ = pipe.Close() + ask = false + } else { + err = node.Client.Process(cmd) + } + + // If there is no error - we are done. + if err == nil { + break + } + if err != Nil { + c.state.LazyReload() + } + + // If slave is loading - pick another node. + if c.opt.ReadOnly && internal.IsLoadingError(err) { + node.MarkAsLoading() + node = nil + continue + } + + var moved bool + var addr string + moved, ask, addr = internal.IsMovedError(err) + if moved || ask { + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + break + } + continue + } + + if err == pool.ErrClosed || internal.IsReadOnlyError(err) { + node = nil + continue + } + + if internal.IsRetryableError(err, true) { + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try random node. + node, err = c.nodes.Random() + if err != nil { + break + } + continue + } + + break + } + + return cmd.Err() +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachNode concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get() + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState() (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots() + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots().Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + return nil, firstErr +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { + ticker := time.NewTicker(idleCheckFrequency) + defer ticker.Stop() + + for range ticker.C { + nodes, err := c.nodes.All() + if err != nil { + break + } + + for _, node := range nodes { + _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() + if err != nil { + internal.Logf("ReapStaleConns failed: %s", err) + } + } + } +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(fn) +} + +func (c *ClusterClient) WrapProcessPipeline( + fn func(oldProcess func([]Cmder) error) func([]Cmder) error, +) { + c.processPipeline = fn(c.processPipeline) +} + +func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error { + cmdsMap := newCmdsMap() + err := c.mapCmdsByNode(cmds, cmdsMap) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap.m { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.mapCmdsByNode(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + return + } + + err = c.pipelineProcessCmds(node, cn, cmds, failedCmds) + node.Client.releaseConnStrict(cn, err) + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds + } + + return cmdsFirstErr(cmds) +} + +type cmdsMap struct { + mu sync.Mutex + m map[*clusterNode][]Cmder +} + +func newCmdsMap() *cmdsMap { + return &cmdsMap{ + m: make(map[*clusterNode][]Cmder), + } +} + +func (c *ClusterClient) mapCmdsByNode(cmds []Cmder, cmdsMap *cmdsMap) error { + state, err := c.state.Get() + if err != nil { + setCmdsErr(cmds, err) + return err + } + + cmdsAreReadOnly := c.cmdsAreReadOnly(cmds) + for _, cmd := range cmds { + var node *clusterNode + var err error + if cmdsAreReadOnly { + _, node, err = c.cmdSlotAndNode(cmd) + } else { + slot := c.cmdSlot(cmd) + node, err = state.slotMasterNode(slot) + } + if err != nil { + return err + } + cmdsMap.mu.Lock() + cmdsMap.m[node] = append(cmdsMap.m[node], cmd) + cmdsMap.mu.Unlock() + } + return nil +} + +func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) pipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, +) error { + err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmd(wr, cmds...) + }) + if err != nil { + setCmdsErr(cmds, err) + failedCmds.mu.Lock() + failedCmds.m[node] = cmds + failedCmds.mu.Unlock() + return err + } + + err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(node, rd, cmds, failedCmds) + }) + return err +} + +func (c *ClusterClient) pipelineReadCmds( + node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap, +) error { + var firstErr error + for _, cmd := range cmds { + err := cmd.readReply(rd) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) { + continue + } + + if internal.IsRedisError(err) { + continue + } + + failedCmds.mu.Lock() + failedCmds.m[node] = append(failedCmds.m[node], cmd) + failedCmds.mu.Unlock() + if firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (c *ClusterClient) checkMovedErr( + cmd Cmder, err error, failedCmds *cmdsMap, +) bool { + moved, ask, addr := internal.IsMovedError(err) + + if moved { + c.state.LazyReload() + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds.mu.Lock() + failedCmds.m[node] = append(failedCmds.m[node], cmd) + failedCmds.mu.Unlock() + return true + } + + if ask { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds.mu.Lock() + failedCmds.m[node] = append(failedCmds.m[node], NewCmd("ASKING"), cmd) + failedCmds.mu.Unlock() + return true + } + + return false +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processTxPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(fn) +} + +func (c *ClusterClient) defaultProcessTxPipeline(cmds []Cmder) error { + state, err := c.state.Get() + if err != nil { + return err + } + + cmdsMap := c.mapCmdsBySlot(cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.mapCmdsByNode(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + return + } + + err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds) + node.Client.releaseConnStrict(cn, err) + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds.m + } + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) txPipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, +) error { + err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { + return txPipelineWriteMulti(wr, cmds) + }) + if err != nil { + setCmdsErr(cmds, err) + failedCmds.mu.Lock() + failedCmds.m[node] = cmds + failedCmds.mu.Unlock() + return err + } + + err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error { + err := c.txPipelineReadQueued(rd, cmds, failedCmds) + if err != nil { + setCmdsErr(cmds, err) + return err + } + return pipelineReadCmds(rd, cmds) + }) + return err +} + +func (c *ClusterClient) txPipelineReadQueued( + rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap, +) error { + // Parse queued replies. + var statusCmd StatusCmd + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(rd) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) || internal.IsRedisError(err) { + continue + } + + return err + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + err := proto.ParseErrorReply(line) + for _, cmd := range cmds { + if !c.checkMovedErr(cmd, err, failedCmds) { + break + } + } + return err + case proto.ArrayReply: + // ok + default: + err := fmt.Errorf("redis: expected '*', but got line %q", line) + return err + } + + return nil +} + +func (c *ClusterClient) pubSub() *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(channels []string) (*pool.Conn, error) { + if node != nil { + panic("node != nil") + } + + slot := hashtag.Slot(channels[0]) + + var err error + node, err = c.slotMasterNode(slot) + if err != nil { + return nil, err + } + + cn, err := node.Client.newConn() + if err != nil { + return nil, err + } + + return cn, nil + }, + closeConn: func(cn *pool.Conn) error { + err := node.Client.connPool.CloseConn(cn) + node = nil + return err + }, + } + pubsub.init() + + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(channels...) + } + return pubsub +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +func remove(ss []string, es ...string) []string { + if len(es) == 0 { + return ss[:0] + } + for _, e := range es { + for i, s := range ss { + if s == e { + ss = append(ss[:i], ss[i+1:]...) + break + } + } + } + return ss +} diff --git a/vendor/github.com/go-redis/redis/cluster_commands.go b/vendor/github.com/go-redis/redis/cluster_commands.go new file mode 100644 index 000000000..dff62c902 --- /dev/null +++ b/vendor/github.com/go-redis/redis/cluster_commands.go @@ -0,0 +1,22 @@ +package redis + +import "sync/atomic" + +func (c *ClusterClient) DBSize() *IntCmd { + cmd := NewIntCmd("dbsize") + var size int64 + err := c.ForEachMaster(func(master *Client) error { + n, err := master.DBSize().Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.setErr(err) + return cmd + } + cmd.val = size + return cmd +} diff --git a/vendor/github.com/go-redis/redis/command.go b/vendor/github.com/go-redis/redis/command.go new file mode 100644 index 000000000..cb4f94b12 --- /dev/null +++ b/vendor/github.com/go-redis/redis/command.go @@ -0,0 +1,1936 @@ +package redis + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/proto" +) + +type Cmder interface { + Name() string + Args() []interface{} + stringArg(int) string + + readReply(rd *proto.Reader) error + setErr(error) + + readTimeout() *time.Duration + + Err() error +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.setErr(e) + } + } +} + +func cmdsFirstErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmd(wr *proto.Writer, cmds ...Cmder) error { + for _, cmd := range cmds { + err := wr.WriteArgs(cmd.Args()) + if err != nil { + return err + } + } + return nil +} + +func cmdString(cmd Cmder, val interface{}) string { + var ss []string + for _, arg := range cmd.Args() { + ss = append(ss, fmt.Sprint(arg)) + } + s := strings.Join(ss, " ") + if err := cmd.Err(); err != nil { + return s + ": " + err.Error() + } + if val != nil { + switch vv := val.(type) { + case []byte: + return s + ": " + string(vv) + default: + return s + ": " + fmt.Sprint(val) + } + } + return s + +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + switch cmd.Name() { + case "eval", "evalsha": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + } + if info == nil { + return 0 + } + return int(info.FirstKeyPos) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + _args []interface{} + err error + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd._args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd._args) { + return "" + } + s, _ := cmd._args[pos].(string) + return s +} + +func (cmd *baseCmd) Name() string { + if len(cmd._args) > 0 { + // Cmd name must be lower cased. + s := internal.ToLower(cmd.stringArg(0)) + cmd._args[0] = s + return s + } + return "" +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +func (cmd *baseCmd) setErr(e error) { + cmd.err = e +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) String() (string, error) { + if cmd.err != nil { + return "", cmd.err + } + switch val := cmd.val.(type) { + case string: + return val, nil + default: + err := fmt.Errorf("redis: unexpected type=%T for String", val) + return "", err + } +} + +func (cmd *Cmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return int(val), nil + case string: + return strconv.Atoi(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int", val) + return 0, err + } +} + +func (cmd *Cmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return val, nil + case string: + return strconv.ParseInt(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int64", val) + return 0, err + } +} + +func (cmd *Cmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return uint64(val), nil + case string: + return strconv.ParseUint(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) + return 0, err + } +} + +func (cmd *Cmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Float64", val) + return 0, err + } +} + +func (cmd *Cmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return val != 0, nil + case string: + return strconv.ParseBool(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Bool", val) + return false, err + } +} + +func (cmd *Cmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadReply(sliceParser) + return cmd.err +} + +// Implements proto.MultiBulkParse +func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(sliceParser) + if err != nil { + if err == Nil { + vals = append(vals, nil) + continue + } + if err, ok := err.(proto.RedisError); ok { + vals = append(vals, err) + continue + } + return nil, err + } + + switch v := v.(type) { + case string: + vals = append(vals, v) + default: + vals = append(vals, v) + } + } + return vals, nil +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(sliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadString() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadIntReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{_args: args}, + precision: precision, + } +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(rd *proto.Reader) error { + var n int64 + n, cmd.err = rd.ReadIntReply() + if cmd.err != nil { + return cmd.err + } + cmd.val = time.Duration(n) * cmd.precision + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(timeParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(time.Time) + return nil +} + +// Implements proto.MultiBulkParse +func timeParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d elements, expected 2", n) + } + + sec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + microsec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + return time.Unix(sec, microsec*1000), nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadReply(nil) + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + // TODO: is this okay? + if cmd.err == Nil { + cmd.val = false + cmd.err = nil + return nil + } + if cmd.err != nil { + return cmd.err + } + switch v := v.(type) { + case int64: + cmd.val = v == 1 + return nil + case string: + cmd.val = v == "OK" + return nil + default: + cmd.err = fmt.Errorf("got %T, wanted int64 or string", v) + return cmd.err + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return []byte(cmd.val), cmd.err +} + +func (cmd *StringCmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.Atoi(cmd.Val()) +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan([]byte(cmd.val), val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadString() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadFloatReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]string) + return nil +} + +// Implements proto.MultiBulkParse +func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + ss := make([]string, 0, n) + for i := int64(0); i < n; i++ { + s, err := rd.ReadString() + if err == Nil { + ss = append(ss, "") + } else if err != nil { + return nil, err + } else { + ss = append(ss, s) + } + } + return ss, nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(boolSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]bool) + return nil +} + +// Implements proto.MultiBulkParse +func boolSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + bools := make([]bool, 0, n) + for i := int64(0); i < n; i++ { + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + bools = append(bools, n == 1) + } + return bools, nil +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*StringStringMapCmd)(nil) + +func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringStringMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]string) + return nil +} + +// Implements proto.MultiBulkParse +func stringStringMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*StringIntMapCmd)(nil) + +func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringIntMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]int64) + return nil +} + +// Implements proto.MultiBulkParse +func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]int64, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + m[key] = n + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringStructMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]struct{}) + return nil +} + +// Implements proto.MultiBulkParse +func stringStructMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]struct{}, n) + for i := int64(0); i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = struct{}{} + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XMessage struct { + ID string + Values map[string]interface{} +} + +type XMessageSliceCmd struct { + baseCmd + + val []XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XMessageSliceCmd) Val() []XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(xMessageSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]XMessage) + return nil +} + +// Implements proto.MultiBulkParse +func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + msgs := make([]XMessage, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(stringInterfaceMapParser) + if err != nil { + return nil, err + } + + msgs = append(msgs, XMessage{ + ID: id, + Values: v.(map[string]interface{}), + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return msgs, nil +} + +// Implements proto.MultiBulkParse +func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]interface{}, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []XMessage +} + +type XStreamSliceCmd struct { + baseCmd + + val []XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XStreamSliceCmd) Val() []XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(xStreamSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]XStream) + return nil +} + +// Implements proto.MultiBulkParse +func xStreamSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + ret := make([]XStream, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + stream, err := rd.ReadString() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(xMessageSliceParser) + if err != nil { + return nil, err + } + + ret = append(ret, XStream{ + Stream: stream, + Messages: v.([]XMessage), + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return ret, nil +} + +//------------------------------------------------------------------------------ + +type XPending struct { + Count int64 + Lower string + Higher string + Consumers map[string]int64 +} + +type XPendingCmd struct { + baseCmd + val *XPending +} + +var _ Cmder = (*XPendingCmd)(nil) + +func NewXPendingCmd(args ...interface{}) *XPendingCmd { + return &XPendingCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XPendingCmd) Val() *XPending { + return cmd.val +} + +func (cmd *XPendingCmd) Result() (*XPending, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { + var info interface{} + info, cmd.err = rd.ReadArrayReply(xPendingParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = info.(*XPending) + return nil +} + +func xPendingParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + count, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + lower, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + higher, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + pending := &XPending{ + Count: count, + Lower: lower, + Higher: higher, + } + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + for i := int64(0); i < n; i++ { + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + consumerName, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumerPending, err := rd.ReadInt() + if err != nil { + return nil, err + } + + if pending.Consumers == nil { + pending.Consumers = make(map[string]int64) + } + pending.Consumers[consumerName] = consumerPending + + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + if err != nil && err != Nil { + return nil, err + } + + return pending, nil +} + +//------------------------------------------------------------------------------ + +type XPendingExt struct { + Id string + Consumer string + Idle time.Duration + RetryCount int64 +} + +type XPendingExtCmd struct { + baseCmd + val []XPendingExt +} + +var _ Cmder = (*XPendingExtCmd)(nil) + +func NewXPendingExtCmd(args ...interface{}) *XPendingExtCmd { + return &XPendingExtCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XPendingExtCmd) Val() []XPendingExt { + return cmd.val +} + +func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingExtCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { + var info interface{} + info, cmd.err = rd.ReadArrayReply(xPendingExtSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = info.([]XPendingExt) + return nil +} + +func xPendingExtSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + ret := make([]XPendingExt, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumer, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + idle, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + retryCount, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + ret = append(ret, XPendingExt{ + Id: id, + Consumer: consumer, + Idle: time.Duration(idle) * time.Millisecond, + RetryCount: retryCount, + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return ret, nil +} + +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(zSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]Z) + return nil +} + +// Implements proto.MultiBulkParse +func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + zz := make([]Z, n/2) + for i := int64(0); i < n; i += 2 { + var err error + + z := &zz[i/2] + + z.Member, err = rd.ReadString() + if err != nil { + return nil, err + } + + z.Score, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + return zz, nil +} + +//------------------------------------------------------------------------------ + +type ZWithKeyCmd struct { + baseCmd + + val ZWithKey +} + +var _ Cmder = (*ZWithKeyCmd)(nil) + +func NewZWithKeyCmd(args ...interface{}) *ZWithKeyCmd { + return &ZWithKeyCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ZWithKeyCmd) Val() ZWithKey { + return cmd.val +} + +func (cmd *ZWithKeyCmd) Result() (ZWithKey, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ZWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(zWithKeyParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(ZWithKey) + return nil +} + +// Implements proto.MultiBulkParse +func zWithKeyParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 3 { + return nil, fmt.Errorf("got %d elements, expected 3", n) + } + + var z ZWithKey + var err error + + z.Key, err = rd.ReadString() + if err != nil { + return nil, err + } + z.Member, err = rd.ReadString() + if err != nil { + return nil, err + } + z.Score, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + return z, nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process func(cmd Cmder) error +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{_args: args}, + process: process, + } +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(rd *proto.Reader) error { + cmd.page, cmd.cursor, cmd.err = rd.ReadScanReply() + return cmd.err +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + Id string + Addr string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(clusterSlotsParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]ClusterSlot) + return nil +} + +// Implements proto.MultiBulkParse +func clusterSlotsParser(rd *proto.Reader, n int64) (interface{}, error) { + slots := make([]ClusterSlot, n) + for i := 0; i < len(slots); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 2 { + err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) + return nil, err + } + + start, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + end, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + nodes := make([]ClusterNode, n-2) + for j := 0; j < len(nodes); j++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 && n != 3 { + err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) + return nil, err + } + + ip, err := rd.ReadString() + if err != nil { + return nil, err + } + + port, err := rd.ReadString() + if err != nil { + return nil, err + } + + nodes[j].Addr = net.JoinHostPort(ip, port) + + if n == 3 { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + nodes[j].Id = id + } + } + + slots[i] = ClusterSlot{ + Start: int(start), + End: int(end), + Nodes: nodes, + } + } + return slots, nil +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithGeoHash { + args = append(args, "withhash") + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return &GeoLocationCmd{ + baseCmd: baseCmd{_args: args}, + q: q, + } +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) + if cmd.err != nil { + return cmd.err + } + cmd.locations = v.([]GeoLocation) + return nil +} + +func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + var loc GeoLocation + var err error + + loc.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + if q.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + if q.WithGeoHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + } + if q.WithCoord { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 { + return nil, fmt.Errorf("got %d coordinates, expected 2", n) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + + return &loc, nil + } +} + +func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + locs := make([]GeoLocation, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(newGeoLocationParser(q)) + if err != nil { + return nil, err + } + switch vv := v.(type) { + case string: + locs = append(locs, GeoLocation{ + Name: vv, + }) + case *GeoLocation: + locs = append(locs, *vv) + default: + return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) + } + } + return locs, nil + } +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + positions []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.positions +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.positions) +} + +func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(geoPosSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.positions = v.([]*GeoPos) + return nil +} + +func geoPosSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + positions := make([]*GeoPos, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(geoPosParser) + if err != nil { + if err == Nil { + positions = append(positions, nil) + continue + } + return nil, err + } + switch v := v.(type) { + case *GeoPos: + positions = append(positions, v) + default: + return nil, fmt.Errorf("got %T, expected *GeoPos", v) + } + } + return positions, nil +} + +func geoPosParser(rd *proto.Reader, n int64) (interface{}, error) { + var pos GeoPos + var err error + + pos.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + pos.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + return &pos, nil +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(commandInfoSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]*CommandInfo) + return nil +} + +// Implements proto.MultiBulkParse +func commandInfoSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]*CommandInfo, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(commandInfoParser) + if err != nil { + return nil, err + } + vv := v.(*CommandInfo) + m[vv.Name] = vv + + } + return m, nil +} + +func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + var cmd CommandInfo + var err error + + if n != 6 { + return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n) + } + + cmd.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + + arity, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.Arity = int8(arity) + + flags, err := rd.ReadReply(stringSliceParser) + if err != nil { + return nil, err + } + cmd.Flags = flags.([]string) + + firstKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.FirstKeyPos = int8(firstKeyPos) + + lastKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.LastKeyPos = int8(lastKeyPos) + + stepCount, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.StepCount = int8(stepCount) + + for _, flag := range cmd.Flags { + if flag == "readonly" { + cmd.ReadOnly = true + break + } + } + + return &cmd, nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func() (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn() + if err != nil { + return err + } + c.cmds = cmds + return nil + }) + return c.cmds, err +} diff --git a/vendor/github.com/go-redis/redis/commands.go b/vendor/github.com/go-redis/redis/commands.go new file mode 100644 index 000000000..653e4abe9 --- /dev/null +++ b/vendor/github.com/go-redis/redis/commands.go @@ -0,0 +1,2583 @@ +package redis + +import ( + "errors" + "io" + "time" + + "github.com/go-redis/redis/internal" +) + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Millisecond, + ) + } + return int64(dur / time.Millisecond) +} + +func formatSec(dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Second, + ) + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + if ss, ok := src[0].([]string); ok { + for _, s := range ss { + dst = append(dst, s) + } + return dst + } + } + + for _, v := range src { + dst = append(dst, v) + } + return dst +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command() *CommandsInfoCmd + ClientGetName() *StringCmd + Echo(message interface{}) *StringCmd + Ping() *StatusCmd + Quit() *StatusCmd + Del(keys ...string) *IntCmd + Unlink(keys ...string) *IntCmd + Dump(key string) *StringCmd + Exists(keys ...string) *IntCmd + Expire(key string, expiration time.Duration) *BoolCmd + ExpireAt(key string, tm time.Time) *BoolCmd + Keys(pattern string) *StringSliceCmd + Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd + Move(key string, db int64) *BoolCmd + ObjectRefCount(key string) *IntCmd + ObjectEncoding(key string) *StringCmd + ObjectIdleTime(key string) *DurationCmd + Persist(key string) *BoolCmd + PExpire(key string, expiration time.Duration) *BoolCmd + PExpireAt(key string, tm time.Time) *BoolCmd + PTTL(key string) *DurationCmd + RandomKey() *StringCmd + Rename(key, newkey string) *StatusCmd + RenameNX(key, newkey string) *BoolCmd + Restore(key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd + Sort(key string, sort *Sort) *StringSliceCmd + SortStore(key, store string, sort *Sort) *IntCmd + SortInterfaces(key string, sort *Sort) *SliceCmd + Touch(keys ...string) *IntCmd + TTL(key string) *DurationCmd + Type(key string) *StatusCmd + Scan(cursor uint64, match string, count int64) *ScanCmd + SScan(key string, cursor uint64, match string, count int64) *ScanCmd + HScan(key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(key string, cursor uint64, match string, count int64) *ScanCmd + Append(key, value string) *IntCmd + BitCount(key string, bitCount *BitCount) *IntCmd + BitOpAnd(destKey string, keys ...string) *IntCmd + BitOpOr(destKey string, keys ...string) *IntCmd + BitOpXor(destKey string, keys ...string) *IntCmd + BitOpNot(destKey string, key string) *IntCmd + BitPos(key string, bit int64, pos ...int64) *IntCmd + Decr(key string) *IntCmd + DecrBy(key string, decrement int64) *IntCmd + Get(key string) *StringCmd + GetBit(key string, offset int64) *IntCmd + GetRange(key string, start, end int64) *StringCmd + GetSet(key string, value interface{}) *StringCmd + Incr(key string) *IntCmd + IncrBy(key string, value int64) *IntCmd + IncrByFloat(key string, value float64) *FloatCmd + MGet(keys ...string) *SliceCmd + MSet(pairs ...interface{}) *StatusCmd + MSetNX(pairs ...interface{}) *BoolCmd + Set(key string, value interface{}, expiration time.Duration) *StatusCmd + SetBit(key string, offset int64, value int) *IntCmd + SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(key string, offset int64, value string) *IntCmd + StrLen(key string) *IntCmd + HDel(key string, fields ...string) *IntCmd + HExists(key, field string) *BoolCmd + HGet(key, field string) *StringCmd + HGetAll(key string) *StringStringMapCmd + HIncrBy(key, field string, incr int64) *IntCmd + HIncrByFloat(key, field string, incr float64) *FloatCmd + HKeys(key string) *StringSliceCmd + HLen(key string) *IntCmd + HMGet(key string, fields ...string) *SliceCmd + HMSet(key string, fields map[string]interface{}) *StatusCmd + HSet(key, field string, value interface{}) *BoolCmd + HSetNX(key, field string, value interface{}) *BoolCmd + HVals(key string) *StringSliceCmd + BLPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(source, destination string, timeout time.Duration) *StringCmd + LIndex(key string, index int64) *StringCmd + LInsert(key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(key string, pivot, value interface{}) *IntCmd + LInsertAfter(key string, pivot, value interface{}) *IntCmd + LLen(key string) *IntCmd + LPop(key string) *StringCmd + LPush(key string, values ...interface{}) *IntCmd + LPushX(key string, value interface{}) *IntCmd + LRange(key string, start, stop int64) *StringSliceCmd + LRem(key string, count int64, value interface{}) *IntCmd + LSet(key string, index int64, value interface{}) *StatusCmd + LTrim(key string, start, stop int64) *StatusCmd + RPop(key string) *StringCmd + RPopLPush(source, destination string) *StringCmd + RPush(key string, values ...interface{}) *IntCmd + RPushX(key string, value interface{}) *IntCmd + SAdd(key string, members ...interface{}) *IntCmd + SCard(key string) *IntCmd + SDiff(keys ...string) *StringSliceCmd + SDiffStore(destination string, keys ...string) *IntCmd + SInter(keys ...string) *StringSliceCmd + SInterStore(destination string, keys ...string) *IntCmd + SIsMember(key string, member interface{}) *BoolCmd + SMembers(key string) *StringSliceCmd + SMembersMap(key string) *StringStructMapCmd + SMove(source, destination string, member interface{}) *BoolCmd + SPop(key string) *StringCmd + SPopN(key string, count int64) *StringSliceCmd + SRandMember(key string) *StringCmd + SRandMemberN(key string, count int64) *StringSliceCmd + SRem(key string, members ...interface{}) *IntCmd + SUnion(keys ...string) *StringSliceCmd + SUnionStore(destination string, keys ...string) *IntCmd + XAdd(a *XAddArgs) *StringCmd + XDel(stream string, ids ...string) *IntCmd + XLen(stream string) *IntCmd + XRange(stream, start, stop string) *XMessageSliceCmd + XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(a *XReadArgs) *XStreamSliceCmd + XReadStreams(streams ...string) *XStreamSliceCmd + XGroupCreate(stream, group, start string) *StatusCmd + XGroupCreateMkStream(stream, group, start string) *StatusCmd + XGroupSetID(stream, group, start string) *StatusCmd + XGroupDestroy(stream, group string) *IntCmd + XGroupDelConsumer(stream, group, consumer string) *IntCmd + XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd + XAck(stream, group string, ids ...string) *IntCmd + XPending(stream, group string) *XPendingCmd + XPendingExt(a *XPendingExtArgs) *XPendingExtCmd + XClaim(a *XClaimArgs) *XMessageSliceCmd + XClaimJustID(a *XClaimArgs) *StringSliceCmd + XTrim(key string, maxLen int64) *IntCmd + XTrimApprox(key string, maxLen int64) *IntCmd + BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd + BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd + ZAdd(key string, members ...Z) *IntCmd + ZAddNX(key string, members ...Z) *IntCmd + ZAddXX(key string, members ...Z) *IntCmd + ZAddCh(key string, members ...Z) *IntCmd + ZAddNXCh(key string, members ...Z) *IntCmd + ZAddXXCh(key string, members ...Z) *IntCmd + ZIncr(key string, member Z) *FloatCmd + ZIncrNX(key string, member Z) *FloatCmd + ZIncrXX(key string, member Z) *FloatCmd + ZCard(key string) *IntCmd + ZCount(key, min, max string) *IntCmd + ZLexCount(key, min, max string) *IntCmd + ZIncrBy(key string, increment float64, member string) *FloatCmd + ZInterStore(destination string, store ZStore, keys ...string) *IntCmd + ZPopMax(key string, count ...int64) *ZSliceCmd + ZPopMin(key string, count ...int64) *ZSliceCmd + ZRange(key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRank(key, member string) *IntCmd + ZRem(key string, members ...interface{}) *IntCmd + ZRemRangeByRank(key string, start, stop int64) *IntCmd + ZRemRangeByScore(key, min, max string) *IntCmd + ZRemRangeByLex(key, min, max string) *IntCmd + ZRevRange(key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRevRank(key, member string) *IntCmd + ZScore(key, member string) *FloatCmd + ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd + PFAdd(key string, els ...interface{}) *IntCmd + PFCount(keys ...string) *IntCmd + PFMerge(dest string, keys ...string) *StatusCmd + BgRewriteAOF() *StatusCmd + BgSave() *StatusCmd + ClientKill(ipPort string) *StatusCmd + ClientKillByFilter(keys ...string) *IntCmd + ClientList() *StringCmd + ClientPause(dur time.Duration) *BoolCmd + ClientID() *IntCmd + ConfigGet(parameter string) *SliceCmd + ConfigResetStat() *StatusCmd + ConfigSet(parameter, value string) *StatusCmd + ConfigRewrite() *StatusCmd + DBSize() *IntCmd + FlushAll() *StatusCmd + FlushAllAsync() *StatusCmd + FlushDB() *StatusCmd + FlushDBAsync() *StatusCmd + Info(section ...string) *StringCmd + LastSave() *IntCmd + Save() *StatusCmd + Shutdown() *StatusCmd + ShutdownSave() *StatusCmd + ShutdownNoSave() *StatusCmd + SlaveOf(host, port string) *StatusCmd + Time() *TimeCmd + Eval(script string, keys []string, args ...interface{}) *Cmd + EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(hashes ...string) *BoolSliceCmd + ScriptFlush() *StatusCmd + ScriptKill() *StatusCmd + ScriptLoad(script string) *StringCmd + DebugObject(key string) *StringCmd + Publish(channel string, message interface{}) *IntCmd + PubSubChannels(pattern string) *StringSliceCmd + PubSubNumSub(channels ...string) *StringIntMapCmd + PubSubNumPat() *IntCmd + ClusterSlots() *ClusterSlotsCmd + ClusterNodes() *StringCmd + ClusterMeet(host, port string) *StatusCmd + ClusterForget(nodeID string) *StatusCmd + ClusterReplicate(nodeID string) *StatusCmd + ClusterResetSoft() *StatusCmd + ClusterResetHard() *StatusCmd + ClusterInfo() *StringCmd + ClusterKeySlot(key string) *IntCmd + ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd + ClusterCountFailureReports(nodeID string) *IntCmd + ClusterCountKeysInSlot(slot int) *IntCmd + ClusterDelSlots(slots ...int) *StatusCmd + ClusterDelSlotsRange(min, max int) *StatusCmd + ClusterSaveConfig() *StatusCmd + ClusterSlaves(nodeID string) *StringSliceCmd + ClusterFailover() *StatusCmd + ClusterAddSlots(slots ...int) *StatusCmd + ClusterAddSlotsRange(min, max int) *StatusCmd + GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(key string, members ...string) *GeoPosCmd + GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusRO(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoDist(key string, member1, member2, unit string) *FloatCmd + GeoHash(key string, members ...string) *StringSliceCmd + ReadOnly() *StatusCmd + ReadWrite() *StatusCmd + MemoryUsage(key string, samples ...int) *IntCmd +} + +type StatefulCmdable interface { + Cmdable + Auth(password string) *StatusCmd + Select(index int) *StatusCmd + SwapDB(index1, index2 int) *StatusCmd + ClientSetName(name string) *BoolCmd +} + +var _ Cmdable = (*Client)(nil) +var _ Cmdable = (*Tx)(nil) +var _ Cmdable = (*Ring)(nil) +var _ Cmdable = (*ClusterClient)(nil) + +type cmdable struct { + process func(cmd Cmder) error +} + +func (c *cmdable) setProcessor(fn func(Cmder) error) { + c.process = fn +} + +type statefulCmdable struct { + cmdable + process func(cmd Cmder) error +} + +func (c *statefulCmdable) setProcessor(fn func(Cmder) error) { + c.process = fn + c.cmdable.setProcessor(fn) +} + +//------------------------------------------------------------------------------ + +func (c *statefulCmdable) Auth(password string) *StatusCmd { + cmd := NewStatusCmd("auth", password) + c.process(cmd) + return cmd +} + +func (c *cmdable) Echo(message interface{}) *StringCmd { + cmd := NewStringCmd("echo", message) + c.process(cmd) + return cmd +} + +func (c *cmdable) Ping() *StatusCmd { + cmd := NewStatusCmd("ping") + c.process(cmd) + return cmd +} + +func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond)) + c.process(cmd) + return cmd +} + +func (c *cmdable) Quit() *StatusCmd { + panic("not implemented") +} + +func (c *statefulCmdable) Select(index int) *StatusCmd { + cmd := NewStatusCmd("select", index) + c.process(cmd) + return cmd +} + +func (c *statefulCmdable) SwapDB(index1, index2 int) *StatusCmd { + cmd := NewStatusCmd("swapdb", index1, index2) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Command() *CommandsInfoCmd { + cmd := NewCommandsInfoCmd("command") + c.process(cmd) + return cmd +} + +func (c *cmdable) Del(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Unlink(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Dump(key string) *StringCmd { + cmd := NewStringCmd("dump", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Exists(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("expire", key, formatSec(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd("expireat", key, tm.Unix()) + c.process(cmd) + return cmd +} + +func (c *cmdable) Keys(pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd("keys", pattern) + c.process(cmd) + return cmd +} + +func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + "migrate", + host, + port, + key, + db, + formatMs(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) Move(key string, db int64) *BoolCmd { + cmd := NewBoolCmd("move", key, db) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectRefCount(key string) *IntCmd { + cmd := NewIntCmd("object", "refcount", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectEncoding(key string) *StringCmd { + cmd := NewStringCmd("object", "encoding", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectIdleTime(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "object", "idletime", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Persist(key string) *BoolCmd { + cmd := NewBoolCmd("persist", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("pexpire", key, formatMs(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) PTTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Millisecond, "pttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RandomKey() *StringCmd { + cmd := NewStringCmd("randomkey") + c.process(cmd) + return cmd +} + +func (c *cmdable) Rename(key, newkey string) *StatusCmd { + cmd := NewStatusCmd("rename", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) RenameNX(key, newkey string) *BoolCmd { + cmd := NewBoolCmd("renamenx", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + "replace", + ) + c.process(cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(key string) []interface{} { + args := []interface{}{"sort", key} + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c *cmdable) Sort(key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortStore(key, store string, sort *Sort) *IntCmd { + args := sort.args(key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortInterfaces(key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Touch(keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) TTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "ttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Type(key string) *StatusCmd { + cmd := NewStatusCmd("type", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Append(key, value string) *IntCmd { + cmd := NewIntCmd("append", key, value) + c.process(cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd { + return c.bitOp("and", destKey, keys...) +} + +func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd { + return c.bitOp("or", destKey, keys...) +} + +func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd { + return c.bitOp("xor", destKey, keys...) +} + +func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd { + return c.bitOp("not", destKey, key) +} + +func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Decr(key string) *IntCmd { + cmd := NewIntCmd("decr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd { + cmd := NewIntCmd("decrby", key, decrement) + c.process(cmd) + return cmd +} + +// Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c *cmdable) Get(key string) *StringCmd { + cmd := NewStringCmd("get", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetBit(key string, offset int64) *IntCmd { + cmd := NewIntCmd("getbit", key, offset) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetRange(key string, start, end int64) *StringCmd { + cmd := NewStringCmd("getrange", key, start, end) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetSet(key string, value interface{}) *StringCmd { + cmd := NewStringCmd("getset", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) Incr(key string) *IntCmd { + cmd := NewIntCmd("incr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrBy(key string, value int64) *IntCmd { + cmd := NewIntCmd("incrby", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd { + cmd := NewFloatCmd("incrbyfloat", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) MGet(keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSet(pairs ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "mset" + args = appendArgs(args, pairs) + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSetNX(pairs ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "msetnx" + args = appendArgs(args, pairs) + cmd := NewBoolCmd(args...) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration]` command. +// +// Use expiration for `SETEX`-like behavior. +// Zero expiration means the key has no expiration time. +func (c *cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 4) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(expiration)) + } else { + args = append(args, "ex", formatSec(expiration)) + } + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SetBit(key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + "setbit", + key, + offset, + value, + ) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd("setnx", key, value) + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx") + } + } + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + cmd = NewBoolCmd("set", key, value, "xx") + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx") + } + } + c.process(cmd) + return cmd +} + +func (c *cmdable) SetRange(key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd("setrange", key, offset, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) StrLen(key string) *IntCmd { + cmd := NewIntCmd("strlen", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) HDel(key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HExists(key, field string) *BoolCmd { + cmd := NewBoolCmd("hexists", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGet(key, field string) *StringCmd { + cmd := NewStringCmd("hget", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGetAll(key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd("hgetall", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrBy(key, field string, incr int64) *IntCmd { + cmd := NewIntCmd("hincrby", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd("hincrbyfloat", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HKeys(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hkeys", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HLen(key string) *IntCmd { + cmd := NewIntCmd("hlen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMGet(key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMSet(key string, fields map[string]interface{}) *StatusCmd { + args := make([]interface{}, 2+len(fields)*2) + args[0] = "hmset" + args[1] = key + i := 2 + for k, v := range fields { + args[i] = k + args[i+1] = v + i += 2 + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSet(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hset", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSetNX(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hsetnx", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HVals(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hvals", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + "brpoplpush", + source, + destination, + formatSec(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) LIndex(key string, index int64) *StringCmd { + cmd := NewStringCmd("lindex", key, index) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, op, pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "before", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "after", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LLen(key string) *IntCmd { + cmd := NewIntCmd("llen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPop(key string) *StringCmd { + cmd := NewStringCmd("lpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("lpushx", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + "lrange", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRem(key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd("lrem", key, count, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LSet(key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd("lset", key, index, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LTrim(key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + "ltrim", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPop(key string) *StringCmd { + cmd := NewStringCmd("rpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPopLPush(source, destination string) *StringCmd { + cmd := NewStringCmd("rpoplpush", source, destination) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("rpushx", key, value) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) SAdd(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SCard(key string) *IntCmd { + cmd := NewIntCmd("scard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiff(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiffStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInter(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInterStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("sismember", key, member) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a slice +func (c *cmdable) SMembers(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("smembers", key) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a map +func (c *cmdable) SMembersMap(key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd("smembers", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("smove", source, destination, member) + c.process(cmd) + return cmd +} + +// Redis `SPOP key` command. +func (c *cmdable) SPop(key string) *StringCmd { + cmd := NewStringCmd("spop", key) + c.process(cmd) + return cmd +} + +// Redis `SPOP key count` command. +func (c *cmdable) SPopN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("spop", key, count) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key` command. +func (c *cmdable) SRandMember(key string) *StringCmd { + cmd := NewStringCmd("srandmember", key) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key count` command. +func (c *cmdable) SRandMemberN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("srandmember", key, count) + c.process(cmd) + return cmd +} + +func (c *cmdable) SRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnion(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +type XAddArgs struct { + Stream string + MaxLen int64 // MAXLEN N + MaxLenApprox int64 // MAXLEN ~ N + ID string + Values map[string]interface{} +} + +func (c *cmdable) XAdd(a *XAddArgs) *StringCmd { + args := make([]interface{}, 0, 6+len(a.Values)*2) + args = append(args, "xadd") + args = append(args, a.Stream) + if a.MaxLen > 0 { + args = append(args, "maxlen", a.MaxLen) + } else if a.MaxLenApprox > 0 { + args = append(args, "maxlen", "~", a.MaxLenApprox) + } + if a.ID != "" { + args = append(args, a.ID) + } else { + args = append(args, "*") + } + for k, v := range a.Values { + args = append(args, k) + args = append(args, v) + } + + cmd := NewStringCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XDel(stream string, ids ...string) *IntCmd { + args := []interface{}{"xdel", stream} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XLen(stream string) *IntCmd { + cmd := NewIntCmd("xlen", stream) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +type XReadArgs struct { + Streams []string + Count int64 + Block time.Duration +} + +func (c *cmdable) XRead(a *XReadArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 5+len(a.Streams)) + args = append(args, "xread") + if a.Count > 0 { + args = append(args, "count") + args = append(args, a.Count) + } + if a.Block >= 0 { + args = append(args, "block") + args = append(args, int64(a.Block/time.Millisecond)) + } + args = append(args, "streams") + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + c.process(cmd) + return cmd +} + +func (c *cmdable) XReadStreams(streams ...string) *XStreamSliceCmd { + return c.XRead(&XReadArgs{ + Streams: streams, + Block: -1, + }) +} + +func (c *cmdable) XGroupCreate(stream, group, start string) *StatusCmd { + cmd := NewStatusCmd("xgroup", "create", stream, group, start) + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupCreateMkStream(stream, group, start string) *StatusCmd { + cmd := NewStatusCmd("xgroup", "create", stream, group, start, "mkstream") + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupSetID(stream, group, start string) *StatusCmd { + cmd := NewStatusCmd("xgroup", "setid", stream, group, start) + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupDestroy(stream, group string) *IntCmd { + cmd := NewIntCmd("xgroup", "destroy", stream, group) + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupDelConsumer(stream, group, consumer string) *IntCmd { + cmd := NewIntCmd("xgroup", "delconsumer", stream, group, consumer) + c.process(cmd) + return cmd +} + +type XReadGroupArgs struct { + Group string + Consumer string + // List of streams and ids. + Streams []string + Count int64 + Block time.Duration + NoAck bool +} + +func (c *cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 8+len(a.Streams)) + args = append(args, "xreadgroup", "group", a.Group, a.Consumer) + if a.Count > 0 { + args = append(args, "count", a.Count) + } + if a.Block >= 0 { + args = append(args, "block", int64(a.Block/time.Millisecond)) + } + if a.NoAck { + args = append(args, "noack") + } + args = append(args, "streams") + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + c.process(cmd) + return cmd +} + +func (c *cmdable) XAck(stream, group string, ids ...string) *IntCmd { + args := []interface{}{"xack", stream, group} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XPending(stream, group string) *XPendingCmd { + cmd := NewXPendingCmd("xpending", stream, group) + c.process(cmd) + return cmd +} + +type XPendingExtArgs struct { + Stream string + Group string + Start string + End string + Count int64 + Consumer string +} + +func (c *cmdable) XPendingExt(a *XPendingExtArgs) *XPendingExtCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count) + if a.Consumer != "" { + args = append(args, a.Consumer) + } + cmd := NewXPendingExtCmd(args...) + c.process(cmd) + return cmd +} + +type XClaimArgs struct { + Stream string + Group string + Consumer string + MinIdle time.Duration + Messages []string +} + +func (c *cmdable) XClaim(a *XClaimArgs) *XMessageSliceCmd { + args := xClaimArgs(a) + cmd := NewXMessageSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XClaimJustID(a *XClaimArgs) *StringSliceCmd { + args := xClaimArgs(a) + args = append(args, "justid") + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func xClaimArgs(a *XClaimArgs) []interface{} { + args := make([]interface{}, 0, 4+len(a.Messages)) + args = append(args, + "xclaim", + a.Stream, + a.Group, a.Consumer, + int64(a.MinIdle/time.Millisecond)) + for _, id := range a.Messages { + args = append(args, id) + } + return args +} + +func (c *cmdable) XTrim(key string, maxLen int64) *IntCmd { + cmd := NewIntCmd("xtrim", key, "maxlen", maxLen) + c.process(cmd) + return cmd +} + +func (c *cmdable) XTrimApprox(key string, maxLen int64) *IntCmd { + cmd := NewIntCmd("xtrim", key, "maxlen", "~", maxLen) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZWithKey represents sorted set member including the name of the key where it was popped. +type ZWithKey struct { + Z + Key string +} + +// ZStore is used as an arg to ZInterStore and ZUnionStore. +type ZStore struct { + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +// Redis `BZPOPMAX key [key ...] timeout` command. +func (c *cmdable) BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmax" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewZWithKeyCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +// Redis `BZPOPMIN key [key ...] timeout` command. +func (c *cmdable) BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmin" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewZWithKeyCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewIntCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key score member [score member ...]` command. +func (c *cmdable) ZAdd(key string, members ...Z) *IntCmd { + const n = 2 + a := make([]interface{}, n+2*len(members)) + a[0], a[1] = "zadd", key + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX score member [score member ...]` command. +func (c *cmdable) ZAddNX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "nx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX score member [score member ...]` command. +func (c *cmdable) ZAddXX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "xx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key CH score member [score member ...]` command. +func (c *cmdable) ZAddCh(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX CH score member [score member ...]` command. +func (c *cmdable) ZAddNXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX CH score member [score member ...]` command. +func (c *cmdable) ZAddXXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch" + return c.zAdd(a, n, members...) +} + +func (c *cmdable) zIncr(a []interface{}, n int, members ...Z) *FloatCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewFloatCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key INCR score member` command. +func (c *cmdable) ZIncr(key string, member Z) *FloatCmd { + const n = 3 + a := make([]interface{}, n+2) + a[0], a[1], a[2] = "zadd", key, "incr" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key NX INCR score member` command. +func (c *cmdable) ZIncrNX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key XX INCR score member` command. +func (c *cmdable) ZIncrXX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx" + return c.zIncr(a, n, member) +} + +func (c *cmdable) ZCard(key string) *IntCmd { + cmd := NewIntCmd("zcard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZLexCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zlexcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd("zincrby", key, increment, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zinterstore" + args[1] = destination + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZPopMax(key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmax", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZPopMin(key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmin", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd { + args := []interface{}{ + "zrange", + key, + start, + stop, + } + if withScores { + args = append(args, "withscores") + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRange(key string, start, stop int64) *StringSliceCmd { + return c.zRange(key, start, stop, false) +} + +func (c *cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c *cmdable) zRangeBy(zcmd, key string, opt ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebyscore", key, opt, false) +} + +func (c *cmdable) ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebylex", key, opt, false) +} + +func (c *cmdable) ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + "zremrangebyrank", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByScore(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebyscore", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByLex(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebylex", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd("zrevrange", key, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +func (c *cmdable) zRevRangeBy(zcmd, key string, opt ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebyscore", key, opt) +} + +func (c *cmdable) ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebylex", key, opt) +} + +func (c *cmdable) ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrevrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScore(key, member string) *FloatCmd { + cmd := NewFloatCmd("zscore", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zunionstore" + args[1] = dest + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) PFAdd(key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFCount(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFMerge(dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BgRewriteAOF() *StatusCmd { + cmd := NewStatusCmd("bgrewriteaof") + c.process(cmd) + return cmd +} + +func (c *cmdable) BgSave() *StatusCmd { + cmd := NewStatusCmd("bgsave") + c.process(cmd) + return cmd +} + +func (c *cmdable) ClientKill(ipPort string) *StatusCmd { + cmd := NewStatusCmd("client", "kill", ipPort) + c.process(cmd) + return cmd +} + +// ClientKillByFilter is new style synx, while the ClientKill is old +// CLIENT KILL