@@ -8,7 +8,6 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
controllermanager "kubesphere.io/kubesphere/cmd/controller-manager/app"
|
||||
ksapigateway "kubesphere.io/kubesphere/cmd/ks-apigateway/app"
|
||||
ksapiserver "kubesphere.io/kubesphere/cmd/ks-apiserver/app"
|
||||
"os"
|
||||
)
|
||||
@@ -45,12 +44,10 @@ func commandFor(basename string, defaultCommand *cobra.Command, commands []func(
|
||||
func NewHyperSphereCommand() (*cobra.Command, []func() *cobra.Command) {
|
||||
apiserver := func() *cobra.Command { return ksapiserver.NewAPIServerCommand() }
|
||||
controllermanager := func() *cobra.Command { return controllermanager.NewControllerManagerCommand() }
|
||||
apigateway := func() *cobra.Command { return ksapigateway.NewAPIGatewayCommand() }
|
||||
|
||||
commandFns := []func() *cobra.Command{
|
||||
apiserver,
|
||||
controllermanager,
|
||||
apigateway,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"kubesphere.io/kubesphere/cmd/ks-apigateway/app"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
cmd := app.NewAPIGatewayCommand()
|
||||
|
||||
if err := cmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/mholt/caddy/caddy/caddymain"
|
||||
"github.com/mholt/caddy/caddyhttp/httpserver"
|
||||
"github.com/spf13/cobra"
|
||||
"kubesphere.io/kubesphere/pkg/utils/signals"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apigateway"
|
||||
)
|
||||
|
||||
func NewAPIGatewayCommand() *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "ks-apigateway",
|
||||
Long: `The KubeSphere API Gateway, which is responsible
|
||||
for proxy request to the right backend. API Gateway also proxy
|
||||
Kubernetes API Server for KubeSphere authorization purpose.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
apigateway.RegisterPlugins()
|
||||
|
||||
return Run(signals.SetupSignalHandler())
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().AddGoFlagSet(flag.CommandLine)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(stopCh <-chan struct{}) error {
|
||||
httpserver.RegisterDevDirective("authenticate", "jwt")
|
||||
httpserver.RegisterDevDirective("authentication", "jwt")
|
||||
httpserver.RegisterDevDirective("swagger", "jwt")
|
||||
caddymain.Run()
|
||||
|
||||
return nil
|
||||
}
|
||||
11
go.mod
11
go.mod
@@ -55,7 +55,6 @@ require (
|
||||
github.com/kelseyhightower/envconfig v1.4.0 // indirect
|
||||
github.com/kiali/kiali v0.15.1-0.20191210080139-edbbad1ef779
|
||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/kubernetes-sigs/application v0.0.0-20191210100950-18cc93526ab4
|
||||
github.com/kubesphere/sonargo v0.0.2
|
||||
github.com/leodido/go-urn v1.1.0 // indirect
|
||||
@@ -69,6 +68,7 @@ require (
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
||||
github.com/onsi/ginkgo v1.8.0
|
||||
github.com/onsi/gomega v1.5.0
|
||||
github.com/open-policy-agent/opa v0.18.0
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/openshift/api v3.9.0+incompatible // indirect
|
||||
@@ -84,7 +84,6 @@ require (
|
||||
github.com/xanzy/ssh-agent v0.2.1 // indirect
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 // indirect
|
||||
google.golang.org/grpc v1.23.1
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
|
||||
gopkg.in/go-playground/validator.v9 v9.29.1 // indirect
|
||||
@@ -208,6 +207,7 @@ replace (
|
||||
github.com/go-sql-driver/mysql => github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/go-stack/stack => github.com/go-stack/stack v1.8.0
|
||||
github.com/gobuffalo/flect => github.com/gobuffalo/flect v0.1.5
|
||||
github.com/gobwas/glob => github.com/gobwas/glob v0.2.3
|
||||
github.com/gocraft/dbr => github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6
|
||||
github.com/gofrs/uuid => github.com/gofrs/uuid v3.2.0+incompatible
|
||||
github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.0
|
||||
@@ -277,6 +277,7 @@ replace (
|
||||
github.com/marten-seemann/qtls => github.com/marten-seemann/qtls v0.2.3
|
||||
github.com/mattn/go-colorable => github.com/mattn/go-colorable v0.1.2
|
||||
github.com/mattn/go-isatty => github.com/mattn/go-isatty v0.0.8
|
||||
github.com/mattn/go-runewidth => github.com/mattn/go-runewidth v0.0.0-20181025052659-b20a3daf6a39
|
||||
github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.11.0
|
||||
github.com/matttproud/golang_protobuf_extensions => github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
github.com/mholt/caddy => github.com/mholt/caddy v1.0.0
|
||||
@@ -284,6 +285,7 @@ replace (
|
||||
github.com/miekg/dns => github.com/miekg/dns v1.1.9
|
||||
github.com/mitchellh/go-homedir => github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/mitchellh/mapstructure => github.com/mitchellh/mapstructure v1.1.2
|
||||
github.com/mna/pigeon => github.com/mna/pigeon v0.0.0-20180808201053-bb0192cfc2ae
|
||||
github.com/modern-go/concurrent => github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
|
||||
github.com/modern-go/reflect2 => github.com/modern-go/reflect2 v1.0.1
|
||||
github.com/morikuni/aec => github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c
|
||||
@@ -293,8 +295,10 @@ replace (
|
||||
github.com/naoina/go-stringutil => github.com/naoina/go-stringutil v0.1.0
|
||||
github.com/naoina/toml => github.com/naoina/toml v0.1.1
|
||||
github.com/oklog/ulid => github.com/oklog/ulid v1.3.1
|
||||
github.com/olekukonko/tablewriter => github.com/olekukonko/tablewriter v0.0.1
|
||||
github.com/onsi/ginkgo => github.com/onsi/ginkgo v1.8.0
|
||||
github.com/onsi/gomega => github.com/onsi/gomega v1.5.0
|
||||
github.com/open-policy-agent/opa => github.com/open-policy-agent/opa v0.18.0
|
||||
github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/openshift/api => github.com/openshift/api v3.9.0+incompatible
|
||||
@@ -302,6 +306,7 @@ replace (
|
||||
github.com/pelletier/go-buffruneio => github.com/pelletier/go-buffruneio v0.2.0
|
||||
github.com/pelletier/go-toml => github.com/pelletier/go-toml v1.2.0
|
||||
github.com/peterbourgon/diskv => github.com/peterbourgon/diskv v2.0.1+incompatible
|
||||
github.com/peterh/liner => github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d
|
||||
github.com/philhofer/fwd => github.com/philhofer/fwd v1.0.0
|
||||
github.com/pkg/errors => github.com/pkg/errors v0.8.1
|
||||
github.com/pmezard/go-difflib => github.com/pmezard/go-difflib v1.0.0
|
||||
@@ -316,6 +321,7 @@ replace (
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.4.0
|
||||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084
|
||||
github.com/prometheus/tsdb => github.com/prometheus/tsdb v0.7.1
|
||||
github.com/rcrowley/go-metrics => github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a
|
||||
github.com/remyoudompheng/bigfft => github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446
|
||||
github.com/rogpeppe/fastuuid => github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af
|
||||
github.com/rogpeppe/go-charset => github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4
|
||||
@@ -346,6 +352,7 @@ replace (
|
||||
github.com/xiang90/probing => github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2
|
||||
github.com/xlab/treeprint => github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6
|
||||
github.com/xordataexchange/crypt => github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77
|
||||
github.com/yashtewari/glob-intersection => github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b
|
||||
go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.3
|
||||
go.opencensus.io => go.opencensus.io v0.21.0
|
||||
go.uber.org/atomic => go.uber.org/atomic v1.4.0
|
||||
|
||||
15
go.sum
15
go.sum
@@ -24,6 +24,7 @@ github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiU
|
||||
github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
|
||||
github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
|
||||
@@ -59,6 +60,7 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
|
||||
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
|
||||
@@ -176,6 +178,8 @@ github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo=
|
||||
github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6 h1:kumyNm8Vr8cbVm/aLQYTbDE3SKCbbn5HEVoDp/Dyyfc=
|
||||
github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6/go.mod h1:K/9g3pPouf13kP5K7pdriQEJAy272R9yXuWuDIEWJTM=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
@@ -296,6 +300,7 @@ github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-runewidth v0.0.0-20181025052659-b20a3daf6a39/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
@@ -310,6 +315,7 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mna/pigeon v0.0.0-20180808201053-bb0192cfc2ae/go.mod h1:Iym28+kJVnC1hfQvv5MUtI6AiFFzvQjHcvI4RFTG/04=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
@@ -326,10 +332,13 @@ github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h
|
||||
github.com/naoina/toml v0.1.1 h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8=
|
||||
github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/open-policy-agent/opa v0.18.0 h1:EC81mO3/517Kq5brJHydqKE5MLzJ+4cdJvUQKxLzHy8=
|
||||
github.com/open-policy-agent/opa v0.18.0/go.mod h1:6pC1cMYDI92i9EY/GoA2m+HcZlcCrh3jbfny5F7JVTA=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
@@ -344,6 +353,7 @@ github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -369,6 +379,8 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzr
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
|
||||
@@ -386,6 +398,7 @@ github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009 h1:3wBL/e/qjpSYaXacpbIV+Bsj/nwQ4UO1llG/av54zzw=
|
||||
github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009/go.mod h1:dVvZuWJd174umvm5g8CmZD6S2GWwHKtpK/0ZPHswuNo=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/speps/go-hashids v2.0.0+incompatible h1:kSfxGfESueJKTx0mpER9Y/1XHl+FVQjtCqRyYcviFbw=
|
||||
github.com/speps/go-hashids v2.0.0+incompatible/go.mod h1:P7hqPzMdnZOfyIk+xrlG1QaSMw+gCBdHKsBDnhpaZvc=
|
||||
@@ -418,6 +431,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b h1:vVRagRXf67ESqAb72hG2C/ZwI8NtJF2u2V76EsuOHGY=
|
||||
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mod h1:HptNXiXVDcJjXe9SqMd0v2FsL9f8dz4GnXgltU6q/co=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
|
||||
@@ -1,248 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
package authenticate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/internal"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/mholt/caddy/caddyhttp/httpserver"
|
||||
)
|
||||
|
||||
type Auth struct {
|
||||
Rule *Rule
|
||||
Next httpserver.Handler
|
||||
}
|
||||
|
||||
type Rule struct {
|
||||
Secret []byte
|
||||
Path string
|
||||
RedisOptions *cache.Options
|
||||
TokenIdleTimeout time.Duration
|
||||
RedisClient cache.Interface
|
||||
ExclusionRules []internal.ExclusionRule
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Username string `json:"username"`
|
||||
UID string `json:"uid"`
|
||||
Groups *[]string `json:"groups,omitempty"`
|
||||
Extra *map[string]interface{} `json:"extra,omitempty"`
|
||||
}
|
||||
|
||||
var requestInfoFactory = request.RequestInfoFactory{
|
||||
APIPrefixes: sets.NewString("api", "apis", "kapis", "kapi"),
|
||||
GrouplessAPIPrefixes: sets.NewString("api")}
|
||||
|
||||
func (h Auth) ServeHTTP(resp http.ResponseWriter, req *http.Request) (int, error) {
|
||||
for _, rule := range h.Rule.ExclusionRules {
|
||||
if httpserver.Path(req.URL.Path).Matches(rule.Path) && (rule.Method == internal.AllMethod || req.Method == rule.Method) {
|
||||
return h.Next.ServeHTTP(resp, req)
|
||||
}
|
||||
}
|
||||
|
||||
if httpserver.Path(req.URL.Path).Matches(h.Rule.Path) {
|
||||
|
||||
uToken, err := h.ExtractToken(req)
|
||||
|
||||
if err != nil {
|
||||
return h.HandleUnauthorized(resp, err), nil
|
||||
}
|
||||
|
||||
token, err := h.Validate(uToken)
|
||||
|
||||
if err != nil {
|
||||
return h.HandleUnauthorized(resp, err), nil
|
||||
}
|
||||
|
||||
req, err = h.InjectContext(req, token)
|
||||
|
||||
if err != nil {
|
||||
return h.HandleUnauthorized(resp, err), nil
|
||||
}
|
||||
}
|
||||
|
||||
return h.Next.ServeHTTP(resp, req)
|
||||
}
|
||||
|
||||
func (h Auth) InjectContext(req *http.Request, token *jwt.Token) (*http.Request, error) {
|
||||
|
||||
payload, ok := token.Claims.(jwt.MapClaims)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("invalid payload")
|
||||
}
|
||||
|
||||
for header := range req.Header {
|
||||
if strings.HasPrefix(header, "X-Token-") {
|
||||
req.Header.Del(header)
|
||||
}
|
||||
}
|
||||
|
||||
usr := &user.DefaultInfo{}
|
||||
|
||||
username, ok := payload["username"].(string)
|
||||
|
||||
if ok && username != "" {
|
||||
req.Header.Set("X-Token-Username", username)
|
||||
usr.Name = username
|
||||
}
|
||||
|
||||
uid := payload["uid"]
|
||||
|
||||
if uid != nil {
|
||||
switch uid.(type) {
|
||||
case int:
|
||||
req.Header.Set("X-Token-UID", strconv.Itoa(uid.(int)))
|
||||
usr.UID = strconv.Itoa(uid.(int))
|
||||
break
|
||||
case string:
|
||||
req.Header.Set("X-Token-UID", uid.(string))
|
||||
usr.UID = uid.(string)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
groups, ok := payload["groups"].([]string)
|
||||
if ok && len(groups) > 0 {
|
||||
req.Header.Set("X-Token-Groups", strings.Join(groups, ","))
|
||||
usr.Groups = groups
|
||||
}
|
||||
|
||||
// hard code, support jenkins auth plugin
|
||||
if httpserver.Path(req.URL.Path).Matches("/kapis/jenkins.kubesphere.io") ||
|
||||
httpserver.Path(req.URL.Path).Matches("job") ||
|
||||
httpserver.Path(req.URL.Path).Matches("/kapis/devops.kubesphere.io/v1alpha2") {
|
||||
req.SetBasicAuth(username, token.Raw)
|
||||
}
|
||||
|
||||
context := request.WithUser(req.Context(), usr)
|
||||
|
||||
requestInfo, err := requestInfoFactory.NewRequestInfo(req)
|
||||
|
||||
if err == nil {
|
||||
context = request.WithRequestInfo(context, requestInfo)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req = req.WithContext(context)
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (h Auth) Validate(uToken string) (*jwt.Token, error) {
|
||||
|
||||
if len(uToken) == 0 {
|
||||
return nil, fmt.Errorf("token length is zero")
|
||||
}
|
||||
|
||||
token, err := jwt.Parse(uToken, h.ProvideKey)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
payload, ok := token.Claims.(jwt.MapClaims)
|
||||
|
||||
if !ok {
|
||||
err := fmt.Errorf("invalid payload")
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
username, ok := payload["username"].(string)
|
||||
|
||||
if !ok {
|
||||
err := fmt.Errorf("invalid payload")
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, ok = payload["exp"]; ok {
|
||||
// allow static token has expiration time
|
||||
return token, nil
|
||||
}
|
||||
|
||||
tokenKey := fmt.Sprintf("kubesphere:users:%s:token:%s", username, uToken)
|
||||
|
||||
exist, err := h.Rule.RedisClient.Exists(tokenKey)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exist {
|
||||
// reset expiration time if token exist
|
||||
h.Rule.RedisClient.Expire(tokenKey, h.Rule.TokenIdleTimeout)
|
||||
return token, nil
|
||||
} else {
|
||||
return nil, errors.New("illegal token")
|
||||
}
|
||||
}
|
||||
|
||||
func (h Auth) HandleUnauthorized(w http.ResponseWriter, err error) int {
|
||||
message := fmt.Sprintf("Unauthorized,%v", err)
|
||||
w.Header().Add("WWW-Authenticate", message)
|
||||
log.Println(message)
|
||||
return http.StatusUnauthorized
|
||||
}
|
||||
|
||||
func (h Auth) ExtractToken(r *http.Request) (string, error) {
|
||||
|
||||
jwtHeader := strings.Split(r.Header.Get("Authorization"), " ")
|
||||
|
||||
if jwtHeader[0] == "Bearer" && len(jwtHeader) == 2 {
|
||||
return jwtHeader[1], nil
|
||||
}
|
||||
|
||||
jwtCookie, err := r.Cookie("token")
|
||||
|
||||
if err == nil {
|
||||
return jwtCookie.Value, nil
|
||||
}
|
||||
|
||||
jwtQuery := r.URL.Query().Get("token")
|
||||
|
||||
if jwtQuery != "" {
|
||||
return jwtQuery, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no token found")
|
||||
}
|
||||
|
||||
func (h Auth) ProvideKey(token *jwt.Token) (interface{}, error) {
|
||||
if _, ok := token.Method.(*jwt.SigningMethodHMAC); ok {
|
||||
return h.Rule.Secret, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("expect token signed with HMAC but got %v", token.Header["alg"])
|
||||
}
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
package authenticate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/internal"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/caddy"
|
||||
"github.com/mholt/caddy/caddyhttp/httpserver"
|
||||
)
|
||||
|
||||
func Setup(c *caddy.Controller) error {
|
||||
|
||||
rule, err := parse(c)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
|
||||
c.OnStartup(func() error {
|
||||
rule.RedisClient, err = cache.NewRedisClient(rule.RedisOptions, stopCh)
|
||||
// ensure redis is connected when startup
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Authenticate middleware is initiated")
|
||||
return nil
|
||||
})
|
||||
|
||||
c.OnShutdown(func() error {
|
||||
close(stopCh)
|
||||
return nil
|
||||
})
|
||||
|
||||
httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
|
||||
return &Auth{Next: next, Rule: rule}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parse(c *caddy.Controller) (*Rule, error) {
|
||||
|
||||
rule := &Rule{}
|
||||
rule.ExclusionRules = make([]internal.ExclusionRule, 0)
|
||||
if c.Next() {
|
||||
args := c.RemainingArgs()
|
||||
switch len(args) {
|
||||
case 0:
|
||||
for c.NextBlock() {
|
||||
switch c.Val() {
|
||||
case "path":
|
||||
if !c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
||||
rule.Path = c.Val()
|
||||
|
||||
if c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
case "token-idle-timeout":
|
||||
if !c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
||||
if timeout, err := time.ParseDuration(c.Val()); err != nil {
|
||||
return nil, c.ArgErr()
|
||||
} else {
|
||||
rule.TokenIdleTimeout = timeout
|
||||
}
|
||||
|
||||
if c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
case "redis-url":
|
||||
if !c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
||||
options := &cache.Options{Host: c.Val()}
|
||||
|
||||
if err := options.Validate(); len(err) > 0 {
|
||||
return nil, c.ArgErr()
|
||||
} else {
|
||||
rule.RedisOptions = options
|
||||
}
|
||||
|
||||
if c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
case "secret":
|
||||
if !c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
||||
rule.Secret = []byte(c.Val())
|
||||
|
||||
if c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
case "except":
|
||||
|
||||
if !c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
||||
method := c.Val()
|
||||
|
||||
if !sliceutil.HasString(internal.HttpMethods, method) {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
||||
for c.NextArg() {
|
||||
path := c.Val()
|
||||
rule.ExclusionRules = append(rule.ExclusionRules, internal.ExclusionRule{Method: method, Path: path})
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
}
|
||||
|
||||
if c.Next() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
||||
if rule.RedisOptions == nil {
|
||||
return nil, c.Err("redis-url must be specified")
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
}
|
||||
@@ -1,309 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
package authentication
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/client-go/informers"
|
||||
"kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/internal"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/caddy/caddyhttp/httpserver"
|
||||
"k8s.io/api/rbac/v1"
|
||||
k8serr "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
)
|
||||
|
||||
type Authentication struct {
|
||||
Rule *Rule
|
||||
Next httpserver.Handler
|
||||
informerFactory informers.SharedInformerFactory
|
||||
}
|
||||
|
||||
type Rule struct {
|
||||
Path string
|
||||
KubernetesOptions *k8s.KubernetesOptions
|
||||
ExclusionRules []internal.ExclusionRule
|
||||
}
|
||||
|
||||
func (c *Authentication) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
|
||||
|
||||
if httpserver.Path(r.URL.Path).Matches(c.Rule.Path) {
|
||||
|
||||
for _, rule := range c.Rule.ExclusionRules {
|
||||
if httpserver.Path(r.URL.Path).Matches(rule.Path) && (rule.Method == internal.AllMethod || r.Method == rule.Method) {
|
||||
return c.Next.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
attrs, err := getAuthorizerAttributes(r.Context())
|
||||
|
||||
// without authenticate, no requestInfo found in the context
|
||||
if err != nil {
|
||||
return c.Next.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
permitted, err := c.permissionValidate(attrs)
|
||||
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
if !permitted {
|
||||
err = k8serr.NewForbidden(schema.GroupResource{Group: attrs.GetAPIGroup(), Resource: attrs.GetResource()}, attrs.GetName(), fmt.Errorf("permission undefined"))
|
||||
return handleForbidden(w, err), nil
|
||||
}
|
||||
}
|
||||
|
||||
return c.Next.ServeHTTP(w, r)
|
||||
|
||||
}
|
||||
|
||||
func handleForbidden(w http.ResponseWriter, err error) int {
|
||||
message := fmt.Sprintf("Forbidden,%s", err.Error())
|
||||
w.Header().Add("WWW-Authenticate", message)
|
||||
log.Println(message)
|
||||
return http.StatusForbidden
|
||||
}
|
||||
|
||||
func (c *Authentication) permissionValidate(attrs authorizer.Attributes) (bool, error) {
|
||||
|
||||
if attrs.GetResource() == "users" && attrs.GetUser().GetName() == attrs.GetName() {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
permitted, err := c.clusterRoleValidate(attrs)
|
||||
|
||||
if err != nil {
|
||||
log.Println("lister error", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if permitted {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if attrs.GetNamespace() != "" {
|
||||
permitted, err = c.roleValidate(attrs)
|
||||
|
||||
if err != nil {
|
||||
log.Println("lister error", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if permitted {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *Authentication) roleValidate(attrs authorizer.Attributes) (bool, error) {
|
||||
roleBindingLister := c.informerFactory.Rbac().V1().RoleBindings().Lister()
|
||||
roleLister := c.informerFactory.Rbac().V1().Roles().Lister()
|
||||
roleBindings, err := roleBindingLister.RoleBindings(attrs.GetNamespace()).List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
fullSource := attrs.GetResource()
|
||||
|
||||
if attrs.GetSubresource() != "" {
|
||||
fullSource = fullSource + "/" + attrs.GetSubresource()
|
||||
}
|
||||
|
||||
for _, roleBinding := range roleBindings {
|
||||
if iam.ContainsUser(roleBinding.Subjects, attrs.GetUser().GetName()) {
|
||||
role, err := roleLister.Roles(attrs.GetNamespace()).Get(roleBinding.RoleRef.Name)
|
||||
|
||||
if err != nil {
|
||||
if k8serr.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, rule := range role.Rules {
|
||||
if ruleMatchesRequest(rule, attrs.GetAPIGroup(), "", attrs.GetResource(), attrs.GetSubresource(), attrs.GetName(), attrs.GetVerb()) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *Authentication) clusterRoleValidate(attrs authorizer.Attributes) (bool, error) {
|
||||
clusterRoleBindingLister := c.informerFactory.Rbac().V1().ClusterRoleBindings().Lister()
|
||||
clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything())
|
||||
clusterRoleLister := c.informerFactory.Rbac().V1().ClusterRoles().Lister()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, clusterRoleBinding := range clusterRoleBindings {
|
||||
|
||||
if iam.ContainsUser(clusterRoleBinding.Subjects, attrs.GetUser().GetName()) {
|
||||
clusterRole, err := clusterRoleLister.Get(clusterRoleBinding.RoleRef.Name)
|
||||
|
||||
if err != nil {
|
||||
if k8serr.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, rule := range clusterRole.Rules {
|
||||
if attrs.IsResourceRequest() {
|
||||
if ruleMatchesRequest(rule, attrs.GetAPIGroup(), "", attrs.GetResource(), attrs.GetSubresource(), attrs.GetName(), attrs.GetVerb()) {
|
||||
return true, nil
|
||||
}
|
||||
} else {
|
||||
if ruleMatchesRequest(rule, "", attrs.GetPath(), "", "", "", attrs.GetVerb()) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func ruleMatchesResources(rule v1.PolicyRule, apiGroup string, resource string, subresource string, resourceName string) bool {
|
||||
|
||||
if resource == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if !sliceutil.HasString(rule.APIGroups, apiGroup) && !sliceutil.HasString(rule.APIGroups, v1.ResourceAll) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(rule.ResourceNames) > 0 && !sliceutil.HasString(rule.ResourceNames, resourceName) {
|
||||
return false
|
||||
}
|
||||
|
||||
combinedResource := resource
|
||||
|
||||
if subresource != "" {
|
||||
combinedResource = combinedResource + "/" + subresource
|
||||
}
|
||||
|
||||
for _, res := range rule.Resources {
|
||||
|
||||
// match "*"
|
||||
if res == v1.ResourceAll || res == combinedResource {
|
||||
return true
|
||||
}
|
||||
|
||||
// match "*/subresource"
|
||||
if len(subresource) > 0 && strings.HasPrefix(res, "*/") && subresource == strings.TrimLeft(res, "*/") {
|
||||
return true
|
||||
}
|
||||
// match "resource/*"
|
||||
if strings.HasSuffix(res, "/*") && resource == strings.TrimRight(res, "/*") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func ruleMatchesRequest(rule v1.PolicyRule, apiGroup string, nonResourceURL string, resource string, subresource string, resourceName string, verb string) bool {
|
||||
|
||||
if !sliceutil.HasString(rule.Verbs, verb) && !sliceutil.HasString(rule.Verbs, v1.VerbAll) {
|
||||
return false
|
||||
}
|
||||
|
||||
if nonResourceURL == "" {
|
||||
return ruleMatchesResources(rule, apiGroup, resource, subresource, resourceName)
|
||||
} else {
|
||||
return ruleMatchesNonResource(rule, nonResourceURL)
|
||||
}
|
||||
}
|
||||
|
||||
func ruleMatchesNonResource(rule v1.PolicyRule, nonResourceURL string) bool {
|
||||
|
||||
if nonResourceURL == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, spec := range rule.NonResourceURLs {
|
||||
if pathMatches(nonResourceURL, spec) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func pathMatches(path, spec string) bool {
|
||||
if spec == "*" {
|
||||
return true
|
||||
}
|
||||
if spec == path {
|
||||
return true
|
||||
}
|
||||
if strings.HasSuffix(spec, "*") && strings.HasPrefix(path, strings.TrimRight(spec, "*")) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getAuthorizerAttributes(ctx context.Context) (authorizer.Attributes, error) {
|
||||
attribs := authorizer.AttributesRecord{}
|
||||
|
||||
user, ok := request.UserFrom(ctx)
|
||||
if ok {
|
||||
attribs.User = user
|
||||
}
|
||||
|
||||
requestInfo, found := request.RequestInfoFrom(ctx)
|
||||
if !found {
|
||||
return nil, errors.New("no RequestInfo found in the context")
|
||||
}
|
||||
|
||||
// Start with common attributes that apply to resource and non-resource requests
|
||||
attribs.ResourceRequest = requestInfo.IsResourceRequest
|
||||
attribs.Path = requestInfo.Path
|
||||
attribs.Verb = requestInfo.Verb
|
||||
|
||||
attribs.APIGroup = requestInfo.APIGroup
|
||||
attribs.APIVersion = requestInfo.APIVersion
|
||||
attribs.Resource = requestInfo.Resource
|
||||
attribs.Subresource = requestInfo.Subresource
|
||||
attribs.Namespace = requestInfo.Namespace
|
||||
attribs.Name = requestInfo.Name
|
||||
|
||||
return &attribs, nil
|
||||
}
|
||||
@@ -1,128 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
package authentication
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mholt/caddy"
|
||||
"github.com/mholt/caddy/caddyhttp/httpserver"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/internal"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
)
|
||||
|
||||
// Setup is called by Caddy to parse the config block
|
||||
func Setup(c *caddy.Controller) error {
|
||||
|
||||
rule, err := parse(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rule.KubernetesOptions == nil && rule.KubernetesOptions.KubeConfig == "" {
|
||||
klog.Warning("no kubeconfig provided, will use in cluster config, this may not work")
|
||||
}
|
||||
|
||||
kubeClient, err := k8s.NewKubernetesClient(rule.KubernetesOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
informerFactory := informers.NewInformerFactories(kubeClient.Kubernetes(), nil, nil, nil)
|
||||
|
||||
stopChan := make(chan struct{}, 0)
|
||||
c.OnStartup(func() error {
|
||||
informerFactory.KubernetesSharedInformerFactory().Rbac().V1().Roles().Lister()
|
||||
informerFactory.KubernetesSharedInformerFactory().Rbac().V1().RoleBindings().Lister()
|
||||
informerFactory.KubernetesSharedInformerFactory().Rbac().V1().ClusterRoles().Lister()
|
||||
informerFactory.KubernetesSharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister()
|
||||
informerFactory.KubernetesSharedInformerFactory().Start(stopChan)
|
||||
informerFactory.KubernetesSharedInformerFactory().WaitForCacheSync(stopChan)
|
||||
fmt.Println("Authentication middleware is initiated")
|
||||
return nil
|
||||
})
|
||||
|
||||
c.OnShutdown(func() error {
|
||||
close(stopChan)
|
||||
return nil
|
||||
})
|
||||
|
||||
httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
|
||||
return &Authentication{Next: next, Rule: rule, informerFactory: informerFactory.KubernetesSharedInformerFactory()}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func parse(c *caddy.Controller) (*Rule, error) {
|
||||
|
||||
rule := &Rule{}
|
||||
rule.ExclusionRules = make([]internal.ExclusionRule, 0)
|
||||
if c.Next() {
|
||||
args := c.RemainingArgs()
|
||||
switch len(args) {
|
||||
case 0:
|
||||
for c.NextBlock() {
|
||||
switch c.Val() {
|
||||
case "path":
|
||||
if !c.NextArg() {
|
||||
return rule, c.ArgErr()
|
||||
}
|
||||
|
||||
rule.Path = c.Val()
|
||||
|
||||
if c.NextArg() {
|
||||
return rule, c.ArgErr()
|
||||
}
|
||||
|
||||
break
|
||||
case "except":
|
||||
if !c.NextArg() {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
||||
method := c.Val()
|
||||
|
||||
if !sliceutil.HasString(internal.HttpMethods, method) {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
|
||||
for c.NextArg() {
|
||||
path := c.Val()
|
||||
rule.ExclusionRules = append(rule.ExclusionRules, internal.ExclusionRule{Method: method, Path: path})
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
rule.Path = args[0]
|
||||
if c.NextBlock() {
|
||||
return rule, c.ArgErr()
|
||||
}
|
||||
default:
|
||||
return rule, c.ArgErr()
|
||||
}
|
||||
}
|
||||
|
||||
if c.Next() {
|
||||
return rule, c.ArgErr()
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2019 The KubeSphere Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* /
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import "net/http"
|
||||
|
||||
const AllMethod = "*"
|
||||
|
||||
var HttpMethods = []string{AllMethod, http.MethodPost, http.MethodDelete,
|
||||
http.MethodPatch, http.MethodPut, http.MethodGet, http.MethodOptions, http.MethodConnect}
|
||||
|
||||
// Path exclusion rule
|
||||
type ExclusionRule struct {
|
||||
Method string
|
||||
Path string
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
package authenticate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/mholt/caddy"
|
||||
"github.com/mholt/caddy/caddyhttp/httpserver"
|
||||
)
|
||||
|
||||
func Setup(c *caddy.Controller) error {
|
||||
|
||||
handler, err := parse(c)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.OnStartup(func() error {
|
||||
fmt.Println("Swagger middleware is initiated")
|
||||
return nil
|
||||
})
|
||||
|
||||
httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
|
||||
return &Swagger{Next: next, Handler: handler}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
func parse(c *caddy.Controller) (Handler, error) {
|
||||
|
||||
handler := Handler{URL: "/swagger-ui", FilePath: "/var/static/swagger-ui"}
|
||||
|
||||
if c.Next() {
|
||||
args := c.RemainingArgs()
|
||||
switch len(args) {
|
||||
case 0:
|
||||
for c.NextBlock() {
|
||||
switch c.Val() {
|
||||
case "url":
|
||||
if !c.NextArg() {
|
||||
return handler, c.ArgErr()
|
||||
}
|
||||
|
||||
handler.URL = c.Val()
|
||||
|
||||
if c.NextArg() {
|
||||
return handler, c.ArgErr()
|
||||
}
|
||||
case "filePath":
|
||||
if !c.NextArg() {
|
||||
return handler, c.ArgErr()
|
||||
}
|
||||
|
||||
handler.FilePath = c.Val()
|
||||
|
||||
if c.NextArg() {
|
||||
return handler, c.ArgErr()
|
||||
}
|
||||
default:
|
||||
return handler, c.ArgErr()
|
||||
}
|
||||
}
|
||||
default:
|
||||
return handler, c.ArgErr()
|
||||
}
|
||||
}
|
||||
|
||||
if c.Next() {
|
||||
return handler, c.ArgErr()
|
||||
}
|
||||
|
||||
handler.Handler = http.StripPrefix(handler.URL, http.FileServer(http.Dir(handler.FilePath)))
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
package authenticate
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/mholt/caddy/caddyhttp/httpserver"
|
||||
)
|
||||
|
||||
type Swagger struct {
|
||||
Handler Handler
|
||||
Next httpserver.Handler
|
||||
}
|
||||
|
||||
type Handler struct {
|
||||
URL string
|
||||
FilePath string
|
||||
Handler http.Handler
|
||||
}
|
||||
|
||||
func (h Swagger) ServeHTTP(resp http.ResponseWriter, req *http.Request) (int, error) {
|
||||
|
||||
if httpserver.Path(req.URL.Path).Matches(h.Handler.URL) {
|
||||
h.Handler.Handler.ServeHTTP(resp, req)
|
||||
return http.StatusOK, nil
|
||||
}
|
||||
|
||||
return h.Next.ServeHTTP(resp, req)
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package apigateway
|
||||
|
||||
import (
|
||||
"github.com/mholt/caddy"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/authenticate"
|
||||
"kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/authentication"
|
||||
swagger "kubesphere.io/kubesphere/pkg/apigateway/caddy-plugin/swagger"
|
||||
)
|
||||
|
||||
func RegisterPlugins() {
|
||||
caddy.RegisterPlugin("swagger", caddy.Plugin{
|
||||
ServerType: "http",
|
||||
Action: swagger.Setup,
|
||||
})
|
||||
|
||||
caddy.RegisterPlugin("authenticate", caddy.Plugin{
|
||||
ServerType: "http",
|
||||
Action: authenticate.Setup,
|
||||
})
|
||||
|
||||
caddy.RegisterPlugin("authentication", caddy.Plugin{
|
||||
ServerType: "http",
|
||||
Action: authentication.Setup,
|
||||
})
|
||||
}
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/servicemesh/metrics/v1alpha2"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/tenant/v1alpha2"
|
||||
terminalv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/terminal/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/am"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
|
||||
@@ -103,8 +104,6 @@ type APIServer struct {
|
||||
|
||||
//
|
||||
LdapClient ldap.Interface
|
||||
|
||||
//
|
||||
}
|
||||
|
||||
func (s *APIServer) PrepareRun() error {
|
||||
@@ -188,7 +187,7 @@ func (s *APIServer) buildHandlerChain() {
|
||||
|
||||
excludedPaths := []string{"/oauth/authorize", "/oauth/token"}
|
||||
pathAuthorizer, _ := path.NewAuthorizer(excludedPaths)
|
||||
authorizer := unionauthorizer.New(pathAuthorizer, authorizerfactory.NewOPAAuthorizer())
|
||||
authorizer := unionauthorizer.New(pathAuthorizer, authorizerfactory.NewOPAAuthorizer(am.NewAMOperator(s.KubernetesClient.Kubernetes(), s.InformerFactory.KubernetesSharedInformerFactory())))
|
||||
handler = filters.WithAuthorization(handler, authorizer)
|
||||
handler = filters.WithMultipleClusterDispatcher(handler, dispatch.DefaultClusterDispatch)
|
||||
handler = filters.WithKubeAPIServer(handler, s.KubernetesClient.Config(), &errorResponder{})
|
||||
|
||||
@@ -18,15 +18,111 @@
|
||||
|
||||
package authorizerfactory
|
||||
|
||||
import "kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizer"
|
||||
import (
|
||||
"context"
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizer"
|
||||
am2 "kubesphere.io/kubesphere/pkg/models/iam/am"
|
||||
)
|
||||
|
||||
type opaAuthorizer struct{}
|
||||
|
||||
func (opaAuthorizer) Authorize(a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
|
||||
// TODO implement.
|
||||
return authorizer.DecisionAllow, "", nil
|
||||
type opaAuthorizer struct {
|
||||
am am2.AccessManagementInterface
|
||||
}
|
||||
|
||||
func NewOPAAuthorizer() *opaAuthorizer {
|
||||
return new(opaAuthorizer)
|
||||
func (o *opaAuthorizer) Authorize(a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
|
||||
|
||||
platformRole, err := o.am.GetPlatformRole(a.GetUser().GetName())
|
||||
if err != nil {
|
||||
return authorizer.DecisionDeny, "", err
|
||||
}
|
||||
|
||||
// check platform role policy rules
|
||||
if a, r, e := o.roleAuthorize(platformRole, a); a == authorizer.DecisionAllow {
|
||||
return a, r, e
|
||||
}
|
||||
|
||||
// it's not in cluster resource, permission denied
|
||||
// TODO declare implicit cluster info in request Info
|
||||
if a.GetCluster() == "" {
|
||||
return authorizer.DecisionDeny, "permission undefined", nil
|
||||
}
|
||||
|
||||
clusterRole, err := o.am.GetClusterRole(a.GetCluster(), a.GetUser().GetName())
|
||||
if err != nil {
|
||||
return authorizer.DecisionDeny, "", err
|
||||
}
|
||||
|
||||
// check cluster role policy rules
|
||||
if a, r, e := o.roleAuthorize(clusterRole, a); a == authorizer.DecisionAllow {
|
||||
return a, r, e
|
||||
}
|
||||
|
||||
// it's not in cluster resource, permission denied
|
||||
if a.GetWorkspace() == "" && a.GetNamespace() == "" && a.GetDevopsProject() == "" {
|
||||
return authorizer.DecisionDeny, "permission undefined", nil
|
||||
}
|
||||
|
||||
workspaceRole, err := o.am.GetWorkspaceRole(a.GetWorkspace(), a.GetUser().GetName())
|
||||
if err != nil {
|
||||
return authorizer.DecisionDeny, "", err
|
||||
}
|
||||
|
||||
// check workspace role policy rules
|
||||
if a, r, e := o.roleAuthorize(workspaceRole, a); a == authorizer.DecisionAllow {
|
||||
return a, r, e
|
||||
}
|
||||
|
||||
// it's not in workspace resource, permission denied
|
||||
if a.GetNamespace() == "" && a.GetDevopsProject() == "" {
|
||||
return authorizer.DecisionDeny, "permission undefined", nil
|
||||
}
|
||||
|
||||
if a.GetNamespace() != "" {
|
||||
namespaceRole, err := o.am.GetNamespaceRole(a.GetNamespace(), a.GetUser().GetName())
|
||||
if err != nil {
|
||||
return authorizer.DecisionDeny, "", err
|
||||
}
|
||||
// check namespace role policy rules
|
||||
if a, r, e := o.roleAuthorize(namespaceRole, a); a == authorizer.DecisionAllow {
|
||||
return a, r, e
|
||||
}
|
||||
}
|
||||
|
||||
if a.GetDevopsProject() != "" {
|
||||
devOpsRole, err := o.am.GetDevOpsRole(a.GetNamespace(), a.GetUser().GetName())
|
||||
if err != nil {
|
||||
return authorizer.DecisionDeny, "", err
|
||||
}
|
||||
// check devops role policy rules
|
||||
if a, r, e := o.roleAuthorize(devOpsRole, a); a == authorizer.DecisionAllow {
|
||||
return a, r, e
|
||||
}
|
||||
}
|
||||
|
||||
return authorizer.DecisionDeny, "", nil
|
||||
}
|
||||
|
||||
func (o *opaAuthorizer) roleAuthorize(role am2.Role, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
|
||||
|
||||
query, err := rego.New(rego.Query("data.authz.allow"), rego.Module("authz.rego", role.GetRego())).PrepareForEval(context.Background())
|
||||
|
||||
if err != nil {
|
||||
return authorizer.DecisionDeny, "", err
|
||||
}
|
||||
|
||||
results, err := query.Eval(context.Background(), rego.EvalInput(a))
|
||||
|
||||
if err != nil {
|
||||
return authorizer.DecisionDeny, "", err
|
||||
}
|
||||
|
||||
if len(results) > 0 && results[0].Expressions[0].Value == true {
|
||||
return authorizer.DecisionAllow, "", nil
|
||||
}
|
||||
|
||||
return authorizer.DecisionDeny, "permission undefined", nil
|
||||
}
|
||||
|
||||
func NewOPAAuthorizer(am am2.AccessManagementInterface) *opaAuthorizer {
|
||||
return &opaAuthorizer{am: am}
|
||||
}
|
||||
|
||||
84
pkg/apiserver/authorization/authorizerfactory/opa_test.go
Normal file
84
pkg/apiserver/authorization/authorizerfactory/opa_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 The KubeSphere Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* /
|
||||
*/
|
||||
|
||||
package authorizerfactory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizer"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPlatformRole(t *testing.T) {
|
||||
|
||||
module := `package platform.authz
|
||||
default allow = false
|
||||
|
||||
allow {
|
||||
input.User.name == "admin"
|
||||
}
|
||||
|
||||
allow {
|
||||
is_admin
|
||||
}
|
||||
|
||||
is_admin {
|
||||
input.User.Groups[_] == "admin"
|
||||
}
|
||||
`
|
||||
query, err := rego.New(rego.Query("data.authz.allow"), rego.Module("authz.rego", module)).PrepareForEval(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
input := authorizer.AttributesRecord{
|
||||
User: &user.DefaultInfo{
|
||||
Name: "admin",
|
||||
UID: "0",
|
||||
Groups: []string{"admin"},
|
||||
Extra: nil,
|
||||
},
|
||||
Verb: "list",
|
||||
Cluster: "",
|
||||
Workspace: "",
|
||||
Namespace: "",
|
||||
DevopsProject: "",
|
||||
APIGroup: "",
|
||||
APIVersion: "v1",
|
||||
Resource: "nodes",
|
||||
Subresource: "",
|
||||
Name: "",
|
||||
KubernetesRequest: true,
|
||||
ResourceRequest: true,
|
||||
Path: "/api/v1/nodes",
|
||||
}
|
||||
|
||||
results, err := query.Eval(context.Background(), rego.EvalInput(input))
|
||||
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
|
||||
if len(results) > 0 && results[0].Expressions[0].Value == true {
|
||||
t.Log("allowed")
|
||||
} else {
|
||||
t.Log("deny")
|
||||
}
|
||||
}
|
||||
@@ -17,9 +17,8 @@ limitations under the License.
|
||||
package path
|
||||
|
||||
import (
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizer"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
)
|
||||
|
||||
func TestNewAuthorizer(t *testing.T) {
|
||||
|
||||
@@ -32,11 +32,9 @@ import (
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"openpitrix.io/openpitrix/pkg/pb"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
@@ -183,14 +181,6 @@ func (r *ReconcileNamespace) Reconcile(request reconcile.Request) (reconcile.Res
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err = r.checkAndCreateRoles(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err = r.checkAndCreateRoleBindings(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.checkAndCreateRuntime(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -210,152 +200,6 @@ func (r *ReconcileNamespace) isControlledByWorkspace(namespace *corev1.Namespace
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Create default roles
|
||||
func (r *ReconcileNamespace) checkAndCreateRoles(namespace *corev1.Namespace) error {
|
||||
for _, role := range defaultRoles {
|
||||
found := &rbac.Role{}
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Namespace: namespace.Name, Name: role.Name}, found)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
role := role.DeepCopy()
|
||||
role.Namespace = namespace.Name
|
||||
err = r.Create(context.TODO(), role)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(found.Rules, role.Rules) {
|
||||
found.Rules = role.Rules
|
||||
if err := r.Update(context.TODO(), found); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) checkAndCreateRoleBindings(namespace *corev1.Namespace) error {
|
||||
|
||||
workspaceName := namespace.Labels[constants.WorkspaceLabelKey]
|
||||
creatorName := namespace.Annotations[constants.CreatorAnnotationKey]
|
||||
|
||||
creator := rbac.Subject{APIGroup: "rbac.authorization.k8s.io", Kind: "User", Name: creatorName}
|
||||
|
||||
workspaceAdminBinding := &rbac.ClusterRoleBinding{}
|
||||
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("workspace:%s:admin", workspaceName)}, workspaceAdminBinding)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
adminBinding := &rbac.RoleBinding{}
|
||||
adminBinding.Name = admin.Name
|
||||
adminBinding.Namespace = namespace.Name
|
||||
adminBinding.RoleRef = rbac.RoleRef{Name: admin.Name, APIGroup: "rbac.authorization.k8s.io", Kind: "Role"}
|
||||
adminBinding.Subjects = workspaceAdminBinding.Subjects
|
||||
|
||||
if creator.Name != "" {
|
||||
if adminBinding.Subjects == nil {
|
||||
adminBinding.Subjects = make([]rbac.Subject, 0)
|
||||
}
|
||||
if !iam.ContainsUser(adminBinding.Subjects, creatorName) {
|
||||
adminBinding.Subjects = append(adminBinding.Subjects, creator)
|
||||
}
|
||||
}
|
||||
|
||||
found := &rbac.RoleBinding{}
|
||||
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Namespace: namespace.Name, Name: adminBinding.Name}, found)
|
||||
|
||||
if errors.IsNotFound(err) {
|
||||
err = r.Create(context.TODO(), adminBinding)
|
||||
if err != nil {
|
||||
klog.Errorf("creating role binding namespace: %s,role binding: %s, error: %s", namespace.Name, adminBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
found = adminBinding
|
||||
} else if err != nil {
|
||||
klog.Errorf("get role binding namespace: %s,role binding: %s, error: %s", namespace.Name, adminBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(found.RoleRef, adminBinding.RoleRef) {
|
||||
err = r.Delete(context.TODO(), found)
|
||||
if err != nil {
|
||||
klog.Errorf("deleting role binding namespace: %s, role binding: %s, error: %s", namespace.Name, adminBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
err = fmt.Errorf("conflict role binding %s.%s, waiting for recreate", namespace.Name, adminBinding.Name)
|
||||
klog.Errorf("conflict role binding namespace: %s, role binding: %s, error: %s", namespace.Name, adminBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(found.Subjects, adminBinding.Subjects) {
|
||||
found.Subjects = adminBinding.Subjects
|
||||
err = r.Update(context.TODO(), found)
|
||||
if err != nil {
|
||||
klog.Errorf("updating role binding namespace: %s, role binding: %s, error: %s", namespace.Name, adminBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
workspaceViewerBinding := &rbac.ClusterRoleBinding{}
|
||||
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("workspace:%s:viewer", workspaceName)}, workspaceViewerBinding)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
viewerBinding := &rbac.RoleBinding{}
|
||||
viewerBinding.Name = viewer.Name
|
||||
viewerBinding.Namespace = namespace.Name
|
||||
viewerBinding.RoleRef = rbac.RoleRef{Name: viewer.Name, APIGroup: "rbac.authorization.k8s.io", Kind: "Role"}
|
||||
viewerBinding.Subjects = workspaceViewerBinding.Subjects
|
||||
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Namespace: namespace.Name, Name: viewerBinding.Name}, found)
|
||||
|
||||
if errors.IsNotFound(err) {
|
||||
err = r.Create(context.TODO(), viewerBinding)
|
||||
if err != nil {
|
||||
klog.Errorf("creating role binding namespace: %s, role binding: %s, error: %s", namespace.Name, viewerBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
found = viewerBinding
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(found.RoleRef, viewerBinding.RoleRef) {
|
||||
err = r.Delete(context.TODO(), found)
|
||||
if err != nil {
|
||||
klog.Errorf("deleting conflict role binding namespace: %s, role binding: %s, %s", namespace.Name, viewerBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
err = fmt.Errorf("conflict role binding %s.%s, waiting for recreate", namespace.Name, viewerBinding.Name)
|
||||
klog.Errorf("conflict role binding namespace: %s, role binding: %s, error: %s", namespace.Name, viewerBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(found.Subjects, viewerBinding.Subjects) {
|
||||
found.Subjects = viewerBinding.Subjects
|
||||
err = r.Update(context.TODO(), found)
|
||||
if err != nil {
|
||||
klog.Errorf("updating role binding namespace: %s, role binding: %s, error: %s", namespace.Name, viewerBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create openpitrix runtime
|
||||
func (r *ReconcileNamespace) checkAndCreateRuntime(namespace *corev1.Namespace) error {
|
||||
|
||||
|
||||
@@ -2,19 +2,14 @@ package v1alpha2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/emicklei/go-restful"
|
||||
"github.com/go-ldap/ldap"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/api/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/policy"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/am"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/im"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2"
|
||||
apierr "kubesphere.io/kubesphere/pkg/server/errors"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
|
||||
@@ -30,14 +25,14 @@ const (
|
||||
)
|
||||
|
||||
type iamHandler struct {
|
||||
amOperator iam.AccessManagementInterface
|
||||
imOperator iam.IdentityManagementInterface
|
||||
amOperator am.AccessManagementInterface
|
||||
imOperator im.IdentityManagementInterface
|
||||
}
|
||||
|
||||
func newIAMHandler(k8sClient k8s.Client, factory informers.InformerFactory, ldapClient ldappool.Interface, cacheClient cache.Interface, options *iamapi.AuthenticationOptions) *iamHandler {
|
||||
return &iamHandler{
|
||||
amOperator: iam.NewAMOperator(k8sClient.Kubernetes(), factory.KubernetesSharedInformerFactory()),
|
||||
imOperator: iam.NewIMOperator(ldapClient, cacheClient, options),
|
||||
amOperator: am.NewAMOperator(k8sClient.Kubernetes(), factory.KubernetesSharedInformerFactory()),
|
||||
imOperator: im.NewIMOperator(ldapClient, cacheClient, options),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,7 +91,7 @@ func (h *iamHandler) Login(req *restful.Request, resp *restful.Response) {
|
||||
token, err := h.imOperator.Login(loginRequest.Username, loginRequest.Password, ip)
|
||||
|
||||
if err != nil {
|
||||
if err == iam.AuthRateLimitExceeded {
|
||||
if err == im.AuthRateLimitExceeded {
|
||||
klog.V(4).Infoln(err)
|
||||
resp.WriteHeaderAndEntity(http.StatusTooManyRequests, err)
|
||||
return
|
||||
@@ -110,151 +105,19 @@ func (h *iamHandler) Login(req *restful.Request, resp *restful.Response) {
|
||||
}
|
||||
|
||||
func (h *iamHandler) CreateUser(req *restful.Request, resp *restful.Response) {
|
||||
var createRequest iamv1alpha2.CreateUserRequest
|
||||
err := req.ReadEntity(&createRequest)
|
||||
if err != nil {
|
||||
klog.V(4).Infoln(err)
|
||||
api.HandleBadRequest(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := createRequest.Validate(); err != nil {
|
||||
klog.V(4).Infoln(err)
|
||||
api.HandleBadRequest(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
created, err := h.imOperator.CreateUser(createRequest.User)
|
||||
|
||||
if err != nil {
|
||||
if err == iam.UserAlreadyExists {
|
||||
klog.V(4).Infoln(err)
|
||||
resp.WriteHeaderAndEntity(http.StatusConflict, err)
|
||||
return
|
||||
}
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = h.amOperator.CreateClusterRoleBinding(created.Username, createRequest.ClusterRole)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteEntity(created)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) DeleteUser(req *restful.Request, resp *restful.Response) {
|
||||
username := req.PathParameter("user")
|
||||
operator := req.HeaderParameter(constants.UserNameHeader)
|
||||
|
||||
if operator == username {
|
||||
err := errors.New("cannot delete yourself")
|
||||
klog.V(4).Infoln(err)
|
||||
api.HandleForbidden(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
err := h.amOperator.UnBindAllRoles(username)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = h.imOperator.DeleteUser(username)
|
||||
|
||||
// TODO release user resources
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteEntity(apierr.None)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) ModifyUser(request *restful.Request, response *restful.Response) {
|
||||
|
||||
username := request.PathParameter("user")
|
||||
operator := request.HeaderParameter(constants.UserNameHeader)
|
||||
var modifyUserRequest iamv1alpha2.ModifyUserRequest
|
||||
|
||||
err := request.ReadEntity(&modifyUserRequest)
|
||||
|
||||
if err != nil {
|
||||
klog.V(4).Infoln(err)
|
||||
api.HandleBadRequest(response, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
if username != modifyUserRequest.Username {
|
||||
err = fmt.Errorf("the name of user (%s) does not match the name on the URL (%s)", modifyUserRequest.Username, username)
|
||||
klog.V(4).Infoln(err)
|
||||
api.HandleBadRequest(response, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = modifyUserRequest.Validate(); err != nil {
|
||||
klog.V(4).Infoln(err)
|
||||
api.HandleBadRequest(response, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
// change password by self
|
||||
if operator == modifyUserRequest.Username && modifyUserRequest.Password != "" {
|
||||
|
||||
}
|
||||
|
||||
result, err := h.imOperator.ModifyUser(modifyUserRequest.User)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(response, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO modify cluster role
|
||||
|
||||
response.WriteEntity(result)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) DescribeUser(req *restful.Request, resp *restful.Response) {
|
||||
username := req.PathParameter("user")
|
||||
|
||||
user, err := h.imOperator.DescribeUser(username)
|
||||
|
||||
if err != nil {
|
||||
if err == iam.UserNotExists {
|
||||
klog.V(4).Infoln(err)
|
||||
api.HandleNotFound(resp, nil, err)
|
||||
return
|
||||
}
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO append more user info
|
||||
clusterRole, err := h.amOperator.GetClusterRole(username)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
result := iamv1alpha2.UserDetail{
|
||||
User: user,
|
||||
ClusterRole: clusterRole.Name,
|
||||
}
|
||||
|
||||
resp.WriteEntity(result)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) ListUsers(req *restful.Request, resp *restful.Response) {
|
||||
@@ -282,202 +145,35 @@ func (h *iamHandler) ListUsers(req *restful.Request, resp *restful.Response) {
|
||||
}
|
||||
|
||||
func (h *iamHandler) ListUserRoles(req *restful.Request, resp *restful.Response) {
|
||||
|
||||
username := req.PathParameter("user")
|
||||
|
||||
roles, err := h.imOperator.GetUserRoles(username)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteEntity(roles)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) ListRoles(req *restful.Request, resp *restful.Response) {
|
||||
namespace := req.PathParameter("namespace")
|
||||
limit, offset := params.ParsePaging(req)
|
||||
orderBy := params.GetStringValueWithDefault(req, params.OrderByParam, v1alpha2.CreateTime)
|
||||
reverse := params.GetBoolValueWithDefault(req, params.ReverseParam, true)
|
||||
conditions, err := params.ParseConditions(req)
|
||||
|
||||
if err != nil {
|
||||
klog.V(4).Infoln(err)
|
||||
api.HandleBadRequest(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.amOperator.ListRoles(namespace, conditions, orderBy, reverse, limit, offset)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteAsJson(result)
|
||||
|
||||
panic("implement me")
|
||||
}
|
||||
func (h *iamHandler) ListClusterRoles(req *restful.Request, resp *restful.Response) {
|
||||
limit, offset := params.ParsePaging(req)
|
||||
orderBy := params.GetStringValueWithDefault(req, params.OrderByParam, v1alpha2.CreateTime)
|
||||
reverse := params.GetBoolValueWithDefault(req, params.ReverseParam, true)
|
||||
conditions, err := params.ParseConditions(req)
|
||||
|
||||
if err != nil {
|
||||
klog.V(4).Infoln(err)
|
||||
api.HandleBadRequest(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.amOperator.ListClusterRoles(conditions, orderBy, reverse, limit, offset)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteEntity(result)
|
||||
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) ListRoleUsers(req *restful.Request, resp *restful.Response) {
|
||||
role := req.PathParameter("role")
|
||||
namespace := req.PathParameter("namespace")
|
||||
|
||||
roleBindings, err := h.amOperator.ListRoleBindings(namespace, role)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
result := make([]*iamapi.User, 0)
|
||||
for _, roleBinding := range roleBindings {
|
||||
for _, subject := range roleBinding.Subjects {
|
||||
if subject.Kind == rbacv1.UserKind {
|
||||
user, err := h.imOperator.DescribeUser(subject.Name)
|
||||
// skip if user not exist
|
||||
if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
result = append(result, user)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp.WriteEntity(result)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// List users by namespace
|
||||
func (h *iamHandler) ListNamespaceUsers(req *restful.Request, resp *restful.Response) {
|
||||
|
||||
namespace := req.PathParameter("namespace")
|
||||
|
||||
roleBindings, err := h.amOperator.ListRoleBindings(namespace, "")
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
result := make([]*iamapi.User, 0)
|
||||
for _, roleBinding := range roleBindings {
|
||||
for _, subject := range roleBinding.Subjects {
|
||||
if subject.Kind == rbacv1.UserKind {
|
||||
user, err := h.imOperator.DescribeUser(subject.Name)
|
||||
// skip if user not exist
|
||||
if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
result = append(result, user)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp.WriteEntity(result)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) ListClusterRoleUsers(req *restful.Request, resp *restful.Response) {
|
||||
clusterRole := req.PathParameter("clusterrole")
|
||||
clusterRoleBindings, err := h.amOperator.ListClusterRoleBindings(clusterRole)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
result := make([]*iamapi.User, 0)
|
||||
for _, roleBinding := range clusterRoleBindings {
|
||||
for _, subject := range roleBinding.Subjects {
|
||||
if subject.Kind == rbacv1.UserKind {
|
||||
user, err := h.imOperator.DescribeUser(subject.Name)
|
||||
// skip if user not exist
|
||||
if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
result = append(result, user)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp.WriteEntity(result)
|
||||
}
|
||||
|
||||
func (h *iamHandler) RulesMapping(req *restful.Request, resp *restful.Response) {
|
||||
rules := policy.RoleRuleMapping
|
||||
resp.WriteEntity(rules)
|
||||
}
|
||||
|
||||
func (h *iamHandler) ClusterRulesMapping(req *restful.Request, resp *restful.Response) {
|
||||
rules := policy.ClusterRoleRuleMapping
|
||||
resp.WriteEntity(rules)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) ListClusterRoleRules(req *restful.Request, resp *restful.Response) {
|
||||
clusterRole := req.PathParameter("clusterrole")
|
||||
rules, err := h.amOperator.GetClusterRoleSimpleRules(clusterRole)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
resp.WriteEntity(rules)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) ListRoleRules(req *restful.Request, resp *restful.Response) {
|
||||
namespace := req.PathParameter("namespace")
|
||||
role := req.PathParameter("role")
|
||||
|
||||
rules, err := h.amOperator.GetRoleSimpleRules(namespace, role)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteEntity(rules)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *iamHandler) ListWorkspaceRoles(request *restful.Request, response *restful.Response) {
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/policy"
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
|
||||
@@ -124,29 +123,6 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, factory informer
|
||||
Param(ws.PathParameter("clusterrole", "cluster role name")).
|
||||
Returns(http.StatusOK, api.StatusOK, []iamv1alpha2.ListUserResponse{}).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.AccessManagementTag}))
|
||||
ws.Route(ws.GET("/clusterroles/{clusterrole}/rules").
|
||||
To(handler.ListClusterRoleRules).
|
||||
Doc("List all policy rules of the specified cluster role.").
|
||||
Param(ws.PathParameter("clusterrole", "cluster role name")).
|
||||
Returns(http.StatusOK, api.StatusOK, []policy.SimpleRule{}).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.AccessManagementTag}))
|
||||
ws.Route(ws.GET("/namespaces/{namespace}/roles/{role}/rules").
|
||||
To(handler.ListRoleRules).
|
||||
Doc("List all policy rules of the specified role in the given namespace.").
|
||||
Param(ws.PathParameter("namespace", "kubernetes namespace")).
|
||||
Param(ws.PathParameter("role", "role name")).
|
||||
Returns(http.StatusOK, api.StatusOK, []policy.SimpleRule{}).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.AccessManagementTag}))
|
||||
ws.Route(ws.GET("/rulesmapping/clusterroles").
|
||||
To(handler.ClusterRulesMapping).
|
||||
Doc("Get the mapping relationships between cluster roles and policy rules.").
|
||||
Returns(http.StatusOK, api.StatusOK, policy.ClusterRoleRuleMapping).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.AccessManagementTag}))
|
||||
ws.Route(ws.GET("/rulesmapping/roles").
|
||||
To(handler.RulesMapping).
|
||||
Doc("Get the mapping relationships between namespaced roles and policy rules.").
|
||||
Returns(http.StatusOK, api.StatusOK, policy.RoleRuleMapping).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.AccessManagementTag}))
|
||||
|
||||
ws.Route(ws.GET("/workspaces/{workspace}/roles").
|
||||
To(handler.ListWorkspaceRoles).
|
||||
|
||||
@@ -3,14 +3,12 @@ package v1alpha2
|
||||
import (
|
||||
"github.com/emicklei/go-restful"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
k8serr "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam"
|
||||
am2 "kubesphere.io/kubesphere/pkg/models/iam/am"
|
||||
"kubesphere.io/kubesphere/pkg/models/monitoring"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/models/tenant"
|
||||
@@ -18,39 +16,22 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/mysql"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type tenantHandler struct {
|
||||
tenant tenant.Interface
|
||||
am iam.AccessManagementInterface
|
||||
am am2.AccessManagementInterface
|
||||
}
|
||||
|
||||
func newTenantHandler(k8sClient k8s.Client, factory informers.InformerFactory, db *mysql.Database) *tenantHandler {
|
||||
|
||||
return &tenantHandler{
|
||||
tenant: tenant.New(k8sClient.Kubernetes(), factory.KubernetesSharedInformerFactory(), factory.KubeSphereSharedInformerFactory(), db),
|
||||
am: iam.NewAMOperator(k8sClient.Kubernetes(), factory.KubernetesSharedInformerFactory()),
|
||||
am: am2.NewAMOperator(k8sClient.Kubernetes(), factory.KubernetesSharedInformerFactory()),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *tenantHandler) ListWorkspaceRules(req *restful.Request, resp *restful.Response) {
|
||||
workspace := req.PathParameter("workspace")
|
||||
username := req.HeaderParameter(constants.UserNameHeader)
|
||||
|
||||
rules, err := h.tenant.GetWorkspaceSimpleRules(workspace, username)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteEntity(rules)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) ListWorkspaces(req *restful.Request, resp *restful.Response) {
|
||||
username := req.HeaderParameter(constants.UserNameHeader)
|
||||
orderBy := params.GetStringValueWithDefault(req, params.OrderByParam, v1alpha2.CreateTime)
|
||||
@@ -256,136 +237,3 @@ func (h *tenantHandler) DeleteDevopsProject(req *restful.Request, resp *restful.
|
||||
func (h *tenantHandler) CreateDevopsProject(req *restful.Request, resp *restful.Response) {
|
||||
|
||||
}
|
||||
|
||||
func (h *tenantHandler) ListNamespaceRules(req *restful.Request, resp *restful.Response) {
|
||||
namespace := req.PathParameter("namespace")
|
||||
username := req.HeaderParameter(constants.UserNameHeader)
|
||||
|
||||
rules, err := h.tenant.GetNamespaceSimpleRules(namespace, username)
|
||||
|
||||
if err != nil {
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteAsJson(rules)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) ListDevopsRules(req *restful.Request, resp *restful.Response) {
|
||||
|
||||
devops := req.PathParameter("devops")
|
||||
username := req.HeaderParameter(constants.UserNameHeader)
|
||||
|
||||
rules, err := h.tenant.GetUserDevopsSimpleRules(username, devops)
|
||||
|
||||
if err != nil {
|
||||
api.HandleInternalError(resp, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteAsJson(rules)
|
||||
}
|
||||
|
||||
//TODO(wansir): We need move this part to logging module
|
||||
//func (h *tenantHandler) LogQuery(req *restful.Request, resp *restful.Response) {
|
||||
// operation := req.QueryParameter("operation")
|
||||
// req, err := h.regenerateLoggingRequest(req)
|
||||
// switch {
|
||||
// case err != nil:
|
||||
// api.HandleInternalError(resp, err)
|
||||
// case req != nil:
|
||||
// loggingv1alpha2.Get(req, loggingv1alpha2.LevelCluster, h.k8s, h.lo, resp)
|
||||
// default:
|
||||
// if operation == "export" {
|
||||
// resp.Header().Set(restful.HEADER_ContentType, "text/plain")
|
||||
// resp.Header().Set("Content-Disposition", "attachment")
|
||||
// resp.Write(nil)
|
||||
// } else {
|
||||
// resp.WriteAsJson(v1alpha2.APIResponse{Logs: new(loggingclient.Logs)})
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
// override namespace query conditions
|
||||
//TODO(wansir): We need move this part to logging module
|
||||
func (h *tenantHandler) regenerateLoggingRequest(req *restful.Request) (*restful.Request, error) {
|
||||
|
||||
username := req.HeaderParameter(constants.UserNameHeader)
|
||||
|
||||
// regenerate the request for log query
|
||||
newUrl := net.FormatURL("http", "127.0.0.1", 80, "/kapis/logging.kubesphere.io/v1alpha2/cluster")
|
||||
values := req.Request.URL.Query()
|
||||
|
||||
clusterRoleRules, err := h.am.GetClusterPolicyRules(username)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hasClusterLogAccess := iam.RulesMatchesRequired(clusterRoleRules, rbacv1.PolicyRule{Verbs: []string{"get"}, Resources: []string{"*"}, APIGroups: []string{"logging.kubesphere.io"}})
|
||||
// if the user is not a cluster admin
|
||||
if !hasClusterLogAccess {
|
||||
queryNamespaces := strings.Split(req.QueryParameter("namespaces"), ",")
|
||||
// then the user can only view logs of namespaces he belongs to
|
||||
namespaces := make([]string, 0)
|
||||
roles, err := h.am.GetRoles("", username)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
for _, role := range roles {
|
||||
if !sliceutil.HasString(namespaces, role.Namespace) && iam.RulesMatchesRequired(role.Rules, rbacv1.PolicyRule{Verbs: []string{"get"}, Resources: []string{"*"}, APIGroups: []string{"logging.kubesphere.io"}}) {
|
||||
namespaces = append(namespaces, role.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// if the user belongs to no namespace
|
||||
// then no log visible
|
||||
if len(namespaces) == 0 {
|
||||
return nil, nil
|
||||
} else if len(queryNamespaces) == 1 && queryNamespaces[0] == "" {
|
||||
values.Set("namespaces", strings.Join(namespaces, ","))
|
||||
} else {
|
||||
inter := intersection(queryNamespaces, namespaces)
|
||||
if len(inter) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
values.Set("namespaces", strings.Join(inter, ","))
|
||||
}
|
||||
}
|
||||
|
||||
newUrl.RawQuery = values.Encode()
|
||||
|
||||
// forward the request to logging model
|
||||
newHttpRequest, _ := http.NewRequest(http.MethodGet, newUrl.String(), nil)
|
||||
return restful.NewRequest(newHttpRequest), nil
|
||||
}
|
||||
|
||||
func intersection(s1, s2 []string) (inter []string) {
|
||||
hash := make(map[string]bool)
|
||||
for _, e := range s1 {
|
||||
hash[e] = true
|
||||
}
|
||||
for _, e := range s2 {
|
||||
// If elements present in the hashmap then append intersection list.
|
||||
if hash[e] {
|
||||
inter = append(inter, e)
|
||||
}
|
||||
}
|
||||
//Remove dups from slice.
|
||||
inter = removeDups(inter)
|
||||
return
|
||||
}
|
||||
|
||||
//Remove dups from slice.
|
||||
func removeDups(elements []string) (nodups []string) {
|
||||
encountered := make(map[string]bool)
|
||||
for _, element := range elements {
|
||||
if !encountered[element] {
|
||||
nodups = append(nodups, element)
|
||||
encountered[element] = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/policy"
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
|
||||
@@ -59,24 +58,6 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, factory informer
|
||||
Param(ws.PathParameter("workspace", "workspace name")).
|
||||
Returns(http.StatusOK, api.StatusOK, v1alpha1.Workspace{}).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag}))
|
||||
ws.Route(ws.GET("/workspaces/{workspace}/rules").
|
||||
To(handler.ListWorkspaceRules).
|
||||
Param(ws.PathParameter("workspace", "workspace name")).
|
||||
Doc("List the rules of the specified workspace for the current user").
|
||||
Returns(http.StatusOK, api.StatusOK, policy.SimpleRule{}).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag}))
|
||||
ws.Route(ws.GET("/namespaces/{namespace}/rules").
|
||||
To(handler.ListNamespaceRules).
|
||||
Param(ws.PathParameter("namespace", "the name of the namespace")).
|
||||
Doc("List the rules of the specified namespace for the current user").
|
||||
Returns(http.StatusOK, api.StatusOK, policy.SimpleRule{}).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag}))
|
||||
ws.Route(ws.GET("/devops/{devops}/rules").
|
||||
To(handler.ListDevopsRules).
|
||||
Param(ws.PathParameter("devops", "devops project ID")).
|
||||
Doc("List the rules of the specified DevOps project for the current user").
|
||||
Returns(http.StatusOK, api.StatusOK, policy.SimpleRule{}).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag}))
|
||||
ws.Route(ws.GET("/workspaces/{workspace}/namespaces").
|
||||
To(handler.ListNamespaces).
|
||||
Param(ws.PathParameter("workspace", "workspace name")).
|
||||
@@ -151,32 +132,6 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, factory informer
|
||||
Doc("Delete the specified devops project from the workspace").
|
||||
Returns(http.StatusOK, api.StatusOK, devopsv1alpha2.DevOpsProject{}).
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag}))
|
||||
//ws.Route(ws.GET("/logs").
|
||||
// To(handler.LogQuery).
|
||||
// Doc("Query cluster-level logs in a multi-tenants environment").
|
||||
// Param(ws.QueryParameter("operation", "Operation type. This can be one of four types: query (for querying logs), statistics (for retrieving statistical data), histogram (for displaying log count by time interval) and export (for exporting logs). Defaults to query.").DefaultValue("query").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("workspaces", "A comma-separated list of workspaces. This field restricts the query to specified workspaces. For example, the following filter matches the workspace my-ws and demo-ws: `my-ws,demo-ws`").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("workspace_query", "A comma-separated list of keywords. Differing from **workspaces**, this field performs fuzzy matching on workspaces. For example, the following value limits the query to workspaces whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("namespaces", "A comma-separated list of namespaces. This field restricts the query to specified namespaces. For example, the following filter matches the namespace my-ns and demo-ns: `my-ns,demo-ns`").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("namespace_query", "A comma-separated list of keywords. Differing from **namespaces**, this field performs fuzzy matching on namespaces. For example, the following value limits the query to namespaces whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("workloads", "A comma-separated list of workloads. This field restricts the query to specified workloads. For example, the following filter matches the workload my-wl and demo-wl: `my-wl,demo-wl`").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("workload_query", "A comma-separated list of keywords. Differing from **workloads**, this field performs fuzzy matching on workloads. For example, the following value limits the query to workloads whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("pods", "A comma-separated list of pods. This field restricts the query to specified pods. For example, the following filter matches the pod my-po and demo-po: `my-po,demo-po`").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("pod_query", "A comma-separated list of keywords. Differing from **pods**, this field performs fuzzy matching on pods. For example, the following value limits the query to pods whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("containers", "A comma-separated list of containers. This field restricts the query to specified containers. For example, the following filter matches the container my-cont and demo-cont: `my-cont,demo-cont`").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("container_query", "A comma-separated list of keywords. Differing from **containers**, this field performs fuzzy matching on containers. For example, the following value limits the query to containers whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("log_query", "A comma-separated list of keywords. The query returns logs which contain at least one keyword. Case-insensitive matching. For example, if the field is set to `err,INFO`, the query returns any log containing err(ERR,Err,...) *OR* INFO(info,InFo,...).").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("interval", "Time interval. It requires **operation** is set to histogram. The format is [0-9]+[smhdwMqy]. Defaults to 15m (i.e. 15 min).").DefaultValue("15m").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("start_time", "Start time of query. Default to 0. The format is a string representing milliseconds since the epoch, eg. 1559664000000.").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("end_time", "End time of query. Default to now. The format is a string representing milliseconds since the epoch, eg. 1559664000000.").DataType("string").Required(false)).
|
||||
// Param(ws.QueryParameter("sort", "Sort order. One of acs, desc. This field sorts logs by timestamp.").DataType("string").DefaultValue("desc").Required(false)).
|
||||
// Param(ws.QueryParameter("from", "The offset from the result set. This field returns query results from the specified offset. It requires **operation** is set to query. Defaults to 0 (i.e. from the beginning of the result set).").DataType("integer").DefaultValue("0").Required(false)).
|
||||
// Param(ws.QueryParameter("size", "Size of result to return. It requires **operation** is set to query. Defaults to 10 (i.e. 10 log records).").DataType("integer").DefaultValue("10").Required(false)).
|
||||
// Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag}).
|
||||
// Writes(v1alpha2.Response{}).
|
||||
// Returns(http.StatusOK, api.StatusOK, v1alpha2.Response{})).
|
||||
// Consumes(restful.MIME_JSON, restful.MIME_XML).
|
||||
// Produces(restful.MIME_JSON, "text/plain")
|
||||
|
||||
c.Add(ws)
|
||||
return nil
|
||||
|
||||
@@ -1,606 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
package iam
|
||||
|
||||
import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/policy"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2/clusterrole"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2/resource"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2/role"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
)
|
||||
|
||||
const (
|
||||
ClusterRoleKind = "ClusterRole"
|
||||
NamespaceAdminRoleBindName = "admin"
|
||||
NamespaceViewerRoleBindName = "viewer"
|
||||
)
|
||||
|
||||
type AccessManagementInterface interface {
|
||||
GetClusterRole(username string) (*rbacv1.ClusterRole, error)
|
||||
UnBindAllRoles(username string) error
|
||||
ListRoleBindings(namespace string, role string) ([]*rbacv1.RoleBinding, error)
|
||||
CreateClusterRoleBinding(username string, clusterRole string) error
|
||||
ListRoles(namespace string, conditions *params.Conditions, orderBy string, reverse bool, limit int, offset int) (*models.PageableResponse, error)
|
||||
ListClusterRoles(conditions *params.Conditions, orderBy string, reverse bool, limit int, offset int) (*models.PageableResponse, error)
|
||||
ListClusterRoleBindings(clusterRole string) ([]*rbacv1.ClusterRoleBinding, error)
|
||||
GetClusterRoleSimpleRules(clusterRole string) ([]policy.SimpleRule, error)
|
||||
GetRoleSimpleRules(namespace string, role string) ([]policy.SimpleRule, error)
|
||||
GetRoles(namespace, username string) ([]*rbacv1.Role, error)
|
||||
GetClusterPolicyRules(username string) ([]rbacv1.PolicyRule, error)
|
||||
GetPolicyRules(namespace, username string) ([]rbacv1.PolicyRule, error)
|
||||
GetWorkspaceRoleSimpleRules(workspace, roleName string) []policy.SimpleRule
|
||||
GetWorkspaceRole(workspace, username string) (*rbacv1.ClusterRole, error)
|
||||
GetWorkspaceRoleMap(username string) (map[string]string, error)
|
||||
}
|
||||
|
||||
type amOperator struct {
|
||||
informers informers.SharedInformerFactory
|
||||
resources resource.ResourceGetter
|
||||
kubeClient kubernetes.Interface
|
||||
}
|
||||
|
||||
func (am *amOperator) ListClusterRoleBindings(clusterRole string) ([]*rbacv1.ClusterRoleBinding, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetRoles(namespace, username string) ([]*rbacv1.Role, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetClusterPolicyRules(username string) ([]rbacv1.PolicyRule, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetPolicyRules(namespace, username string) ([]rbacv1.PolicyRule, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetWorkspaceRole(workspace, username string) (*rbacv1.ClusterRole, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) UnBindAllRoles(username string) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func NewAMOperator(kubeClient kubernetes.Interface, informers informers.SharedInformerFactory) *amOperator {
|
||||
resourceGetter := resource.ResourceGetter{}
|
||||
resourceGetter.Add(v1alpha2.Role, role.NewRoleSearcher(informers))
|
||||
resourceGetter.Add(v1alpha2.ClusterRoles, clusterrole.NewClusterRoleSearcher(informers))
|
||||
return &amOperator{
|
||||
informers: informers,
|
||||
resources: resourceGetter,
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (am *amOperator) GetDevopsRoleSimpleRules(role string) []policy.SimpleRule {
|
||||
var rules []policy.SimpleRule
|
||||
|
||||
switch role {
|
||||
case "developer":
|
||||
rules = []policy.SimpleRule{
|
||||
{Name: "pipelines", Actions: []string{"view", "trigger"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "members", Actions: []string{"view"}},
|
||||
{Name: "devops", Actions: []string{"view"}},
|
||||
}
|
||||
break
|
||||
case "owner":
|
||||
rules = []policy.SimpleRule{
|
||||
{Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "members", Actions: []string{"create", "edit", "view", "delete"}},
|
||||
{Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}},
|
||||
{Name: "devops", Actions: []string{"edit", "view", "delete"}},
|
||||
}
|
||||
break
|
||||
case "maintainer":
|
||||
rules = []policy.SimpleRule{
|
||||
{Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "members", Actions: []string{"view"}},
|
||||
{Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}},
|
||||
{Name: "devops", Actions: []string{"view"}},
|
||||
}
|
||||
break
|
||||
case "reporter":
|
||||
fallthrough
|
||||
default:
|
||||
rules = []policy.SimpleRule{
|
||||
{Name: "pipelines", Actions: []string{"view"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "members", Actions: []string{"view"}},
|
||||
{Name: "devops", Actions: []string{"view"}},
|
||||
}
|
||||
break
|
||||
}
|
||||
return rules
|
||||
}
|
||||
|
||||
// Get user roles in namespace
|
||||
func (am *amOperator) GetUserRoles(namespace, username string) ([]*rbacv1.Role, error) {
|
||||
clusterRoleLister := am.informers.Rbac().V1().ClusterRoles().Lister()
|
||||
roleBindingLister := am.informers.Rbac().V1().RoleBindings().Lister()
|
||||
roleLister := am.informers.Rbac().V1().Roles().Lister()
|
||||
roleBindings, err := roleBindingLister.RoleBindings(namespace).List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
roles := make([]*rbacv1.Role, 0)
|
||||
|
||||
for _, roleBinding := range roleBindings {
|
||||
if ContainsUser(roleBinding.Subjects, username) {
|
||||
if roleBinding.RoleRef.Kind == ClusterRoleKind {
|
||||
clusterRole, err := clusterRoleLister.Get(roleBinding.RoleRef.Name)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.Warningf("cluster role %s not found but bind user %s in namespace %s", roleBinding.RoleRef.Name, username, namespace)
|
||||
continue
|
||||
} else {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
role := rbacv1.Role{}
|
||||
role.TypeMeta = clusterRole.TypeMeta
|
||||
role.ObjectMeta = clusterRole.ObjectMeta
|
||||
role.Rules = clusterRole.Rules
|
||||
role.Namespace = roleBinding.Namespace
|
||||
roles = append(roles, &role)
|
||||
} else {
|
||||
role, err := roleLister.Roles(roleBinding.Namespace).Get(roleBinding.RoleRef.Name)
|
||||
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.Warningf("namespace %s role %s not found, but bind user %s", namespace, roleBinding.RoleRef.Name, username)
|
||||
continue
|
||||
} else {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
roles = append(roles, role)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return roles, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) GetUserClusterRoles(username string) (*rbacv1.ClusterRole, []*rbacv1.ClusterRole, error) {
|
||||
clusterRoleLister := am.informers.Rbac().V1().ClusterRoles().Lister()
|
||||
clusterRoleBindingLister := am.informers.Rbac().V1().ClusterRoleBindings().Lister()
|
||||
clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
clusterRoles := make([]*rbacv1.ClusterRole, 0)
|
||||
userFacingClusterRole := &rbacv1.ClusterRole{}
|
||||
for _, clusterRoleBinding := range clusterRoleBindings {
|
||||
if ContainsUser(clusterRoleBinding.Subjects, username) {
|
||||
clusterRole, err := clusterRoleLister.Get(clusterRoleBinding.RoleRef.Name)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.Warningf("cluster role %s not found but bind user %s", clusterRoleBinding.RoleRef.Name, username)
|
||||
continue
|
||||
} else {
|
||||
klog.Errorln(err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
if clusterRoleBinding.Name == username {
|
||||
userFacingClusterRole = clusterRole
|
||||
}
|
||||
clusterRoles = append(clusterRoles, clusterRole)
|
||||
}
|
||||
}
|
||||
|
||||
return userFacingClusterRole, clusterRoles, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) GetClusterRole(username string) (*rbacv1.ClusterRole, error) {
|
||||
userFacingClusterRole, _, err := am.GetUserClusterRoles(username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return userFacingClusterRole, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) GetUserClusterRules(username string) ([]rbacv1.PolicyRule, error) {
|
||||
_, clusterRoles, err := am.GetUserClusterRoles(username)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rules := make([]rbacv1.PolicyRule, 0)
|
||||
for _, clusterRole := range clusterRoles {
|
||||
rules = append(rules, clusterRole.Rules...)
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) GetUserRules(namespace, username string) ([]rbacv1.PolicyRule, error) {
|
||||
roles, err := am.GetUserRoles(namespace, username)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rules := make([]rbacv1.PolicyRule, 0)
|
||||
for _, role := range roles {
|
||||
rules = append(rules, role.Rules...)
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) GetWorkspaceRoleBindings(workspace string) ([]*rbacv1.ClusterRoleBinding, error) {
|
||||
|
||||
clusterRoleBindings, err := am.informers.Rbac().V1().ClusterRoleBindings().Lister().List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln("get cluster role bindings", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make([]*rbacv1.ClusterRoleBinding, 0)
|
||||
|
||||
for _, roleBinding := range clusterRoleBindings {
|
||||
if k8sutil.IsControlledBy(roleBinding.OwnerReferences, "Workspace", workspace) {
|
||||
result = append(result, roleBinding)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
//func (am *amOperator) GetWorkspaceRole(workspace, role string) (*rbacv1.ClusterRole, error) {
|
||||
// if !sliceutil.HasString(constants.WorkSpaceRoles, role) {
|
||||
// return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "workspace role"}, role)
|
||||
// }
|
||||
// role = fmt.Sprintf("workspace:%s:%s", workspace, strings.TrimPrefix(role, "workspace-"))
|
||||
// return am.informers.Rbac().V1().ClusterRoles().Lister().Get(role)
|
||||
//}
|
||||
|
||||
func (am *amOperator) GetWorkspaceRoleMap(username string) (map[string]string, error) {
|
||||
|
||||
clusterRoleBindings, err := am.informers.Rbac().V1().ClusterRoleBindings().Lister().List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln("get cluster role bindings", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make(map[string]string, 0)
|
||||
|
||||
for _, roleBinding := range clusterRoleBindings {
|
||||
if workspace := k8sutil.GetControlledWorkspace(roleBinding.OwnerReferences); workspace != "" &&
|
||||
ContainsUser(roleBinding.Subjects, username) {
|
||||
result[workspace] = roleBinding.RoleRef.Name
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) GetUserWorkspaceRole(workspace, username string) (*rbacv1.ClusterRole, error) {
|
||||
workspaceRoleMap, err := am.GetWorkspaceRoleMap(username)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if workspaceRole := workspaceRoleMap[workspace]; workspaceRole != "" {
|
||||
return am.informers.Rbac().V1().ClusterRoles().Lister().Get(workspaceRole)
|
||||
}
|
||||
|
||||
return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "workspace user"}, username)
|
||||
}
|
||||
|
||||
func (am *amOperator) GetRoleBindings(namespace string, roleName string) ([]*rbacv1.RoleBinding, error) {
|
||||
roleBindingLister := am.informers.Rbac().V1().RoleBindings().Lister()
|
||||
roleBindings, err := roleBindingLister.RoleBindings(namespace).List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items := make([]*rbacv1.RoleBinding, 0)
|
||||
|
||||
for _, roleBinding := range roleBindings {
|
||||
if roleName == "" {
|
||||
items = append(items, roleBinding)
|
||||
} else if roleBinding.RoleRef.Name == roleName {
|
||||
items = append(items, roleBinding)
|
||||
}
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) GetClusterRoleBindings(clusterRoleName string) ([]*rbacv1.ClusterRoleBinding, error) {
|
||||
clusterRoleBindingLister := am.informers.Rbac().V1().ClusterRoleBindings().Lister()
|
||||
roleBindings, err := clusterRoleBindingLister.List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items := make([]*rbacv1.ClusterRoleBinding, 0)
|
||||
|
||||
for _, roleBinding := range roleBindings {
|
||||
if roleBinding.RoleRef.Name == clusterRoleName {
|
||||
items = append(items, roleBinding)
|
||||
}
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) ListRoles(namespace string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
return am.resources.ListResources(namespace, v1alpha2.Roles, conditions, orderBy, reverse, limit, offset)
|
||||
}
|
||||
|
||||
func (am *amOperator) ListRoleBindings(namespace string, role string) ([]*rbacv1.RoleBinding, error) {
|
||||
rbs, err := am.informers.Rbac().V1().RoleBindings().Lister().RoleBindings(namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]*rbacv1.RoleBinding, 0)
|
||||
for _, rb := range rbs {
|
||||
if rb.RoleRef.Name == role {
|
||||
result = append(result, rb.DeepCopy())
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) ListWorkspaceRoles(workspace string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
conditions.Match[v1alpha2.OwnerName] = workspace
|
||||
conditions.Match[v1alpha2.OwnerKind] = "Workspace"
|
||||
result, err := am.resources.ListResources("", v1alpha2.ClusterRoles, conditions, orderBy, reverse, limit, offset)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, item := range result.Items {
|
||||
if role, ok := item.(*rbacv1.ClusterRole); ok {
|
||||
role = role.DeepCopy()
|
||||
role.Name = role.Annotations[constants.DisplayNameAnnotationKey]
|
||||
result.Items[i] = role
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (am *amOperator) ListClusterRoles(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
return am.resources.ListResources("", v1alpha2.ClusterRoles, conditions, orderBy, reverse, limit, offset)
|
||||
}
|
||||
|
||||
func (am *amOperator) GetWorkspaceRoleSimpleRules(workspace, roleName string) []policy.SimpleRule {
|
||||
|
||||
workspaceRules := make([]policy.SimpleRule, 0)
|
||||
|
||||
switch roleName {
|
||||
case constants.WorkspaceAdmin:
|
||||
workspaceRules = []policy.SimpleRule{
|
||||
{Name: "workspaces", Actions: []string{"edit", "delete", "view"}},
|
||||
{Name: "members", Actions: []string{"edit", "delete", "create", "view"}},
|
||||
{Name: "devops", Actions: []string{"edit", "delete", "create", "view"}},
|
||||
{Name: "projects", Actions: []string{"edit", "delete", "create", "view"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "apps", Actions: []string{"view", "create", "manage"}},
|
||||
{Name: "repos", Actions: []string{"view", "manage"}},
|
||||
}
|
||||
case constants.WorkspaceRegular:
|
||||
workspaceRules = []policy.SimpleRule{
|
||||
{Name: "members", Actions: []string{"view"}},
|
||||
{Name: "devops", Actions: []string{"view", "create"}},
|
||||
{Name: "projects", Actions: []string{"view", "create"}},
|
||||
{Name: "apps", Actions: []string{"view", "create"}},
|
||||
{Name: "repos", Actions: []string{"view"}},
|
||||
}
|
||||
case constants.WorkspaceViewer:
|
||||
workspaceRules = []policy.SimpleRule{
|
||||
{Name: "workspaces", Actions: []string{"view"}},
|
||||
{Name: "members", Actions: []string{"view"}},
|
||||
{Name: "devops", Actions: []string{"view"}},
|
||||
{Name: "projects", Actions: []string{"view"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "apps", Actions: []string{"view"}},
|
||||
{Name: "repos", Actions: []string{"view"}},
|
||||
}
|
||||
case constants.WorkspacesManager:
|
||||
workspaceRules = []policy.SimpleRule{
|
||||
{Name: "workspaces", Actions: []string{"edit", "delete", "view"}},
|
||||
{Name: "members", Actions: []string{"edit", "delete", "create", "view"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
}
|
||||
}
|
||||
|
||||
return workspaceRules
|
||||
}
|
||||
|
||||
// Convert cluster role to rules
|
||||
func (am *amOperator) GetClusterRoleSimpleRules(clusterRoleName string) ([]policy.SimpleRule, error) {
|
||||
|
||||
clusterRoleLister := am.informers.Rbac().V1().ClusterRoles().Lister()
|
||||
clusterRole, err := clusterRoleLister.Get(clusterRoleName)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getClusterSimpleRule(clusterRole.Rules), nil
|
||||
}
|
||||
|
||||
func (am *amOperator) GetUserClusterSimpleRules(username string) ([]policy.SimpleRule, error) {
|
||||
clusterRules, err := am.GetUserClusterRules(username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return getClusterSimpleRule(clusterRules), nil
|
||||
}
|
||||
|
||||
// Convert roles to rules
|
||||
func (am *amOperator) GetRoleSimpleRules(namespace string, roleName string) ([]policy.SimpleRule, error) {
|
||||
|
||||
roleLister := am.informers.Rbac().V1().Roles().Lister()
|
||||
role, err := roleLister.Roles(namespace).Get(roleName)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ConvertToSimpleRule(role.Rules), nil
|
||||
}
|
||||
|
||||
func getClusterSimpleRule(policyRules []rbacv1.PolicyRule) []policy.SimpleRule {
|
||||
rules := make([]policy.SimpleRule, 0)
|
||||
|
||||
for i := 0; i < len(policy.ClusterRoleRuleMapping); i++ {
|
||||
validActions := make([]string, 0)
|
||||
for j := 0; j < (len(policy.ClusterRoleRuleMapping[i].Actions)); j++ {
|
||||
if rulesMatchesAction(policyRules, policy.ClusterRoleRuleMapping[i].Actions[j]) {
|
||||
validActions = append(validActions, policy.ClusterRoleRuleMapping[i].Actions[j].Name)
|
||||
}
|
||||
}
|
||||
if len(validActions) > 0 {
|
||||
rules = append(rules, policy.SimpleRule{Name: policy.ClusterRoleRuleMapping[i].Name, Actions: validActions})
|
||||
}
|
||||
}
|
||||
|
||||
return rules
|
||||
}
|
||||
|
||||
func ConvertToSimpleRule(policyRules []rbacv1.PolicyRule) []policy.SimpleRule {
|
||||
simpleRules := make([]policy.SimpleRule, 0)
|
||||
for i := 0; i < len(policy.RoleRuleMapping); i++ {
|
||||
rule := policy.SimpleRule{Name: policy.RoleRuleMapping[i].Name}
|
||||
rule.Actions = make([]string, 0)
|
||||
for j := 0; j < len(policy.RoleRuleMapping[i].Actions); j++ {
|
||||
if rulesMatchesAction(policyRules, policy.RoleRuleMapping[i].Actions[j]) {
|
||||
rule.Actions = append(rule.Actions, policy.RoleRuleMapping[i].Actions[j].Name)
|
||||
}
|
||||
}
|
||||
if len(rule.Actions) > 0 {
|
||||
simpleRules = append(simpleRules, rule)
|
||||
}
|
||||
}
|
||||
return simpleRules
|
||||
}
|
||||
|
||||
func (am *amOperator) CreateClusterRoleBinding(username string, clusterRoleName string) error {
|
||||
clusterRoleLister := am.informers.Rbac().V1().ClusterRoles().Lister()
|
||||
|
||||
_, err := clusterRoleLister.Get(clusterRoleName)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO move to user controller
|
||||
if clusterRoleName == constants.ClusterAdmin {
|
||||
// create kubectl pod if cluster role is cluster-admin
|
||||
//if err := kubectl.CreateKubectlDeploy(username); err != nil {
|
||||
// klog.Error("create user terminal pod failed", username, err)
|
||||
//}
|
||||
|
||||
} else {
|
||||
// delete kubectl pod if cluster role is not cluster-admin, whether it exists or not
|
||||
//if err := kubectl.DelKubectlDeploy(username); err != nil {
|
||||
// klog.Error("delete user terminal pod failed", username, err)
|
||||
//}
|
||||
}
|
||||
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
|
||||
clusterRoleBinding.Name = username
|
||||
clusterRoleBinding.RoleRef = rbacv1.RoleRef{Name: clusterRoleName, Kind: ClusterRoleKind}
|
||||
clusterRoleBinding.Subjects = []rbacv1.Subject{{Kind: rbacv1.UserKind, Name: username}}
|
||||
|
||||
clusterRoleBindingLister := am.informers.Rbac().V1().ClusterRoleBindings().Lister()
|
||||
found, err := clusterRoleBindingLister.Get(username)
|
||||
|
||||
if apierrors.IsNotFound(err) {
|
||||
_, err = am.kubeClient.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding)
|
||||
if err != nil {
|
||||
klog.Errorln("create cluster role binding", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// cluster role changed
|
||||
if found.RoleRef.Name != clusterRoleName {
|
||||
deletePolicy := metav1.DeletePropagationBackground
|
||||
gracePeriodSeconds := int64(0)
|
||||
deleteOption := &metav1.DeleteOptions{PropagationPolicy: &deletePolicy, GracePeriodSeconds: &gracePeriodSeconds}
|
||||
err = am.kubeClient.RbacV1().ClusterRoleBindings().Delete(found.Name, deleteOption)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
_, err = am.kubeClient.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !ContainsUser(found.Subjects, username) {
|
||||
found.Subjects = clusterRoleBinding.Subjects
|
||||
_, err = am.kubeClient.RbacV1().ClusterRoleBindings().Update(found)
|
||||
if err != nil {
|
||||
klog.Errorln("update cluster role binding", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
100
pkg/models/iam/am/am.go
Normal file
100
pkg/models/iam/am/am.go
Normal file
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 The KubeSphere Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* /
|
||||
*/
|
||||
package am
|
||||
|
||||
import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2/clusterrole"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2/resource"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2/role"
|
||||
)
|
||||
|
||||
const (
|
||||
ClusterRoleKind = "ClusterRole"
|
||||
NamespaceAdminRoleBindName = "admin"
|
||||
NamespaceViewerRoleBindName = "viewer"
|
||||
)
|
||||
|
||||
type AccessManagementInterface interface {
|
||||
GetPlatformRole(username string) (Role, error)
|
||||
GetClusterRole(cluster, username string) (Role, error)
|
||||
GetWorkspaceRole(workspace, username string) (Role, error)
|
||||
GetNamespaceRole(namespace, username string) (Role, error)
|
||||
GetDevOpsRole(project, username string) (Role, error)
|
||||
}
|
||||
|
||||
type Role interface {
|
||||
GetName() string
|
||||
GetRego() string
|
||||
}
|
||||
|
||||
type amOperator struct {
|
||||
informers informers.SharedInformerFactory
|
||||
resources resource.ResourceGetter
|
||||
kubeClient kubernetes.Interface
|
||||
}
|
||||
|
||||
func (am *amOperator) ListClusterRoleBindings(clusterRole string) ([]*rbacv1.ClusterRoleBinding, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetRoles(namespace, username string) ([]*rbacv1.Role, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetClusterPolicyRules(username string) ([]rbacv1.PolicyRule, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetPolicyRules(namespace, username string) ([]rbacv1.PolicyRule, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetWorkspaceRole(workspace, username string) (Role, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func NewAMOperator(kubeClient kubernetes.Interface, informers informers.SharedInformerFactory) AccessManagementInterface {
|
||||
resourceGetter := resource.ResourceGetter{}
|
||||
resourceGetter.Add(v1alpha2.Role, role.NewRoleSearcher(informers))
|
||||
resourceGetter.Add(v1alpha2.ClusterRoles, clusterrole.NewClusterRoleSearcher(informers))
|
||||
return &amOperator{
|
||||
informers: informers,
|
||||
resources: resourceGetter,
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (am *amOperator) GetPlatformRole(username string) (Role, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetClusterRole(cluster, username string) (Role, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetNamespaceRole(namespace, username string) (Role, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (am *amOperator) GetDevOpsRole(namespace, username string) (Role, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
* limitations under the License.
|
||||
* /
|
||||
*/
|
||||
package iam
|
||||
package im
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -16,4 +16,4 @@
|
||||
* /
|
||||
*/
|
||||
|
||||
package iam
|
||||
package im
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,213 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 The KubeSphere Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* /
|
||||
*/
|
||||
|
||||
package iam
|
||||
|
||||
import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"kubesphere.io/kubesphere/pkg/api/iam"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/policy"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func RulesMatchesRequired(rules []rbacv1.PolicyRule, required rbacv1.PolicyRule) bool {
|
||||
for _, rule := range rules {
|
||||
if ruleMatchesRequired(rule, required) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func rulesMatchesAction(rules []rbacv1.PolicyRule, action policy.Action) bool {
|
||||
|
||||
for _, required := range action.Rules {
|
||||
if !RulesMatchesRequired(rules, required) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func ruleMatchesRequired(rule rbacv1.PolicyRule, required rbacv1.PolicyRule) bool {
|
||||
|
||||
if len(required.NonResourceURLs) == 0 {
|
||||
for _, apiGroup := range required.APIGroups {
|
||||
for _, resource := range required.Resources {
|
||||
resources := strings.Split(resource, "/")
|
||||
resource = resources[0]
|
||||
var subsource string
|
||||
if len(resources) > 1 {
|
||||
subsource = resources[1]
|
||||
}
|
||||
|
||||
if len(required.ResourceNames) == 0 {
|
||||
for _, verb := range required.Verbs {
|
||||
if !ruleMatchesRequest(rule, apiGroup, "", resource, subsource, "", verb) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, resourceName := range required.ResourceNames {
|
||||
for _, verb := range required.Verbs {
|
||||
if !ruleMatchesRequest(rule, apiGroup, "", resource, subsource, resourceName, verb) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, apiGroup := range required.APIGroups {
|
||||
for _, nonResourceURL := range required.NonResourceURLs {
|
||||
for _, verb := range required.Verbs {
|
||||
if !ruleMatchesRequest(rule, apiGroup, nonResourceURL, "", "", "", verb) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func ruleMatchesResources(rule rbacv1.PolicyRule, apiGroup string, resource string, subresource string, resourceName string) bool {
|
||||
|
||||
if resource == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if !hasString(rule.APIGroups, apiGroup) && !hasString(rule.APIGroups, rbacv1.ResourceAll) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(rule.ResourceNames) > 0 && !hasString(rule.ResourceNames, resourceName) {
|
||||
return false
|
||||
}
|
||||
|
||||
combinedResource := resource
|
||||
|
||||
if subresource != "" {
|
||||
combinedResource = combinedResource + "/" + subresource
|
||||
}
|
||||
|
||||
for _, res := range rule.Resources {
|
||||
|
||||
// match "*"
|
||||
if res == rbacv1.ResourceAll || res == combinedResource {
|
||||
return true
|
||||
}
|
||||
|
||||
// match "*/subresource"
|
||||
if len(subresource) > 0 && strings.HasPrefix(res, "*/") && subresource == strings.TrimLeft(res, "*/") {
|
||||
return true
|
||||
}
|
||||
// match "resource/*"
|
||||
if strings.HasSuffix(res, "/*") && resource == strings.TrimRight(res, "/*") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func ruleMatchesRequest(rule rbacv1.PolicyRule, apiGroup string, nonResourceURL string, resource string, subresource string, resourceName string, verb string) bool {
|
||||
|
||||
if !hasString(rule.Verbs, verb) && !hasString(rule.Verbs, rbacv1.VerbAll) {
|
||||
return false
|
||||
}
|
||||
|
||||
if nonResourceURL == "" {
|
||||
return ruleMatchesResources(rule, apiGroup, resource, subresource, resourceName)
|
||||
} else {
|
||||
return ruleMatchesNonResource(rule, nonResourceURL)
|
||||
}
|
||||
}
|
||||
|
||||
func ruleMatchesNonResource(rule rbacv1.PolicyRule, nonResourceURL string) bool {
|
||||
|
||||
if nonResourceURL == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, spec := range rule.NonResourceURLs {
|
||||
if pathMatches(nonResourceURL, spec) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func pathMatches(path, spec string) bool {
|
||||
// Allow wildcard match
|
||||
if spec == "*" {
|
||||
return true
|
||||
}
|
||||
// Allow exact match
|
||||
if spec == path {
|
||||
return true
|
||||
}
|
||||
// Allow a trailing * subpath match
|
||||
if strings.HasSuffix(spec, "*") && strings.HasPrefix(path, strings.TrimRight(spec, "*")) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func hasString(slice []string, value string) bool {
|
||||
for _, s := range slice {
|
||||
if s == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ContainsUser(subjects interface{}, username string) bool {
|
||||
switch subjects.(type) {
|
||||
case []*rbacv1.Subject:
|
||||
for _, subject := range subjects.([]*rbacv1.Subject) {
|
||||
if subject.Kind == rbacv1.UserKind && subject.Name == username {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case []rbacv1.Subject:
|
||||
for _, subject := range subjects.([]rbacv1.Subject) {
|
||||
if subject.Kind == rbacv1.UserKind && subject.Name == username {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case []iam.User:
|
||||
for _, u := range subjects.([]iam.User) {
|
||||
if u.Username == username {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
case []*iam.User:
|
||||
for _, u := range subjects.([]*iam.User) {
|
||||
if u.Username == username {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/db"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/models/devops"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/policy"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
dsClient "kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/mysql"
|
||||
@@ -38,7 +37,6 @@ type DevOpsProjectOperator interface {
|
||||
CreateDevOpsProject(username string, workspace string, req *v1alpha2.DevOpsProject) (*v1alpha2.DevOpsProject, error)
|
||||
GetDevOpsProjectsCount(username string) (uint32, error)
|
||||
DeleteDevOpsProject(projectId, username string) error
|
||||
GetUserDevOpsSimpleRules(username, projectId string) ([]policy.SimpleRule, error)
|
||||
}
|
||||
|
||||
type devopsProjectOperator struct {
|
||||
@@ -208,16 +206,6 @@ func (o *devopsProjectOperator) CreateDevOpsProject(username string, workspace s
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func (o *devopsProjectOperator) GetUserDevOpsSimpleRules(username, projectId string) ([]policy.SimpleRule, error) {
|
||||
|
||||
role, err := o.getProjectUserRole(username, projectId)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(http.StatusForbidden, err.Error())
|
||||
}
|
||||
return GetDevopsRoleSimpleRules(role), nil
|
||||
}
|
||||
|
||||
func (o *devopsProjectOperator) getProjectUserRole(username, projectId string) (string, error) {
|
||||
if username == devops.KS_ADMIN {
|
||||
return dsClient.ProjectOwner, nil
|
||||
@@ -235,47 +223,3 @@ func (o *devopsProjectOperator) getProjectUserRole(username, projectId string) (
|
||||
|
||||
return membership.Role, nil
|
||||
}
|
||||
|
||||
func GetDevopsRoleSimpleRules(role string) []policy.SimpleRule {
|
||||
var rules []policy.SimpleRule
|
||||
|
||||
switch role {
|
||||
case "developer":
|
||||
rules = []policy.SimpleRule{
|
||||
{Name: "pipelines", Actions: []string{"view", "trigger"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "members", Actions: []string{"view"}},
|
||||
{Name: "devops", Actions: []string{"view"}},
|
||||
}
|
||||
break
|
||||
case "owner":
|
||||
rules = []policy.SimpleRule{
|
||||
{Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "members", Actions: []string{"create", "edit", "view", "delete"}},
|
||||
{Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}},
|
||||
{Name: "devops", Actions: []string{"edit", "view", "delete"}},
|
||||
}
|
||||
break
|
||||
case "maintainer":
|
||||
rules = []policy.SimpleRule{
|
||||
{Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "members", Actions: []string{"view"}},
|
||||
{Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}},
|
||||
{Name: "devops", Actions: []string{"view"}},
|
||||
}
|
||||
break
|
||||
case "reporter":
|
||||
fallthrough
|
||||
default:
|
||||
rules = []policy.SimpleRule{
|
||||
{Name: "pipelines", Actions: []string{"view"}},
|
||||
{Name: "roles", Actions: []string{"view"}},
|
||||
{Name: "members", Actions: []string{"view"}},
|
||||
{Name: "devops", Actions: []string{"view"}},
|
||||
}
|
||||
break
|
||||
}
|
||||
return rules
|
||||
}
|
||||
|
||||
@@ -19,17 +19,13 @@ package tenant
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam"
|
||||
am2 "kubesphere.io/kubesphere/pkg/models/iam/am"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -41,7 +37,7 @@ type NamespaceInterface interface {
|
||||
type namespaceSearcher struct {
|
||||
k8s kubernetes.Interface
|
||||
informers k8sinformers.SharedInformerFactory
|
||||
am iam.AccessManagementInterface
|
||||
am am2.AccessManagementInterface
|
||||
}
|
||||
|
||||
func (s *namespaceSearcher) CreateNamespace(workspace string, namespace *v1.Namespace, username string) (*v1.Namespace, error) {
|
||||
@@ -57,7 +53,7 @@ func (s *namespaceSearcher) CreateNamespace(workspace string, namespace *v1.Name
|
||||
return s.k8s.CoreV1().Namespaces().Create(namespace)
|
||||
}
|
||||
|
||||
func newNamespaceOperator(k8s kubernetes.Interface, informers k8sinformers.SharedInformerFactory, am iam.AccessManagementInterface) NamespaceInterface {
|
||||
func newNamespaceOperator(k8s kubernetes.Interface, informers k8sinformers.SharedInformerFactory, am am2.AccessManagementInterface) NamespaceInterface {
|
||||
return &namespaceSearcher{k8s: k8s, informers: informers, am: am}
|
||||
}
|
||||
|
||||
@@ -111,76 +107,9 @@ func (s *namespaceSearcher) compare(a, b *v1.Namespace, orderBy string) bool {
|
||||
}
|
||||
|
||||
func (s *namespaceSearcher) GetNamespaces(username string) ([]*v1.Namespace, error) {
|
||||
|
||||
roles, err := s.am.GetRoles("", username)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namespaces := make([]*v1.Namespace, 0)
|
||||
namespaceLister := s.informers.Core().V1().Namespaces().Lister()
|
||||
for _, role := range roles {
|
||||
namespace, err := namespaceLister.Get(role.Namespace)
|
||||
if err != nil {
|
||||
klog.Errorf("get namespace failed: %+v", err)
|
||||
return nil, err
|
||||
}
|
||||
if !containsNamespace(namespaces, namespace) {
|
||||
namespaces = append(namespaces, namespace)
|
||||
}
|
||||
}
|
||||
|
||||
return namespaces, nil
|
||||
}
|
||||
|
||||
func containsNamespace(namespaces []*v1.Namespace, namespace *v1.Namespace) bool {
|
||||
for _, item := range namespaces {
|
||||
if item.Name == namespace.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *namespaceSearcher) Search(username string, conditions *params.Conditions, orderBy string, reverse bool) ([]*v1.Namespace, error) {
|
||||
|
||||
rules, err := s.am.GetClusterPolicyRules(username)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
namespaces := make([]*v1.Namespace, 0)
|
||||
|
||||
if iam.RulesMatchesRequired(rules, rbacv1.PolicyRule{Verbs: []string{"list"}, APIGroups: []string{"tenant.kubesphere.io"}, Resources: []string{"namespaces"}}) {
|
||||
namespaces, err = s.informers.Core().V1().Namespaces().Lister().List(labels.Everything())
|
||||
} else {
|
||||
namespaces, err = s.GetNamespaces(username)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make([]*v1.Namespace, 0)
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
if s.match(conditions.Match, namespace) && s.fuzzy(conditions.Fuzzy, namespace) {
|
||||
result = append(result, namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// order & reverse
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
if reverse {
|
||||
i, j = j, i
|
||||
}
|
||||
return s.compare(result[i], result[j], orderBy)
|
||||
})
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func CreateNamespace() {
|
||||
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -19,20 +19,14 @@ package tenant
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/policy"
|
||||
am2 "kubesphere.io/kubesphere/pkg/models/iam/am"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/mysql"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
@@ -42,17 +36,14 @@ type Interface interface {
|
||||
ListWorkspaces(username string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error)
|
||||
ListNamespaces(username string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error)
|
||||
ListDevopsProjects(username string, conditions *params.Conditions, orderBy string, reverse bool, limit int, offset int) (*models.PageableResponse, error)
|
||||
GetWorkspaceSimpleRules(workspace, username string) ([]policy.SimpleRule, error)
|
||||
GetNamespaceSimpleRules(namespace, username string) ([]policy.SimpleRule, error)
|
||||
CountDevOpsProjects(username string) (uint32, error)
|
||||
DeleteDevOpsProject(username, projectId string) error
|
||||
GetUserDevopsSimpleRules(username string, devops string) (interface{}, error)
|
||||
}
|
||||
|
||||
type tenantOperator struct {
|
||||
workspaces WorkspaceInterface
|
||||
namespaces NamespaceInterface
|
||||
am iam.AccessManagementInterface
|
||||
am am2.AccessManagementInterface
|
||||
devops DevOpsProjectOperator
|
||||
}
|
||||
|
||||
@@ -65,7 +56,7 @@ func (t *tenantOperator) DeleteDevOpsProject(username, projectId string) error {
|
||||
}
|
||||
|
||||
func (t *tenantOperator) GetUserDevopsSimpleRules(username string, projectId string) (interface{}, error) {
|
||||
return t.devops.GetUserDevOpsSimpleRules(username, projectId)
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *tenantOperator) ListDevopsProjects(username string, conditions *params.Conditions, orderBy string, reverse bool, limit int, offset int) (*models.PageableResponse, error) {
|
||||
@@ -77,7 +68,7 @@ func (t *tenantOperator) DeleteNamespace(workspace, namespace string) error {
|
||||
}
|
||||
|
||||
func New(client kubernetes.Interface, informers k8sinformers.SharedInformerFactory, ksinformers ksinformers.SharedInformerFactory, db *mysql.Database) Interface {
|
||||
amOperator := iam.NewAMOperator(client, informers)
|
||||
amOperator := am2.NewAMOperator(client, informers)
|
||||
return &tenantOperator{
|
||||
workspaces: newWorkspaceOperator(client, informers, ksinformers, amOperator, db),
|
||||
namespaces: newNamespaceOperator(client, informers, amOperator),
|
||||
@@ -96,105 +87,11 @@ func (t *tenantOperator) DescribeWorkspace(username, workspaceName string) (*v1a
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if username != "" {
|
||||
workspace = t.appendAnnotations(username, workspace)
|
||||
}
|
||||
|
||||
return workspace, nil
|
||||
}
|
||||
|
||||
func (t *tenantOperator) ListWorkspaces(username string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
|
||||
workspaces, err := t.workspaces.SearchWorkspace(username, conditions, orderBy, reverse)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// limit offset
|
||||
result := make([]interface{}, 0)
|
||||
for i, workspace := range workspaces {
|
||||
if len(result) < limit && i >= offset {
|
||||
workspace := t.appendAnnotations(username, workspace)
|
||||
result = append(result, workspace)
|
||||
}
|
||||
}
|
||||
|
||||
return &models.PageableResponse{Items: result, TotalCount: len(workspaces)}, nil
|
||||
}
|
||||
|
||||
func (t *tenantOperator) GetWorkspaceSimpleRules(workspace, username string) ([]policy.SimpleRule, error) {
|
||||
clusterRules, err := t.am.GetClusterPolicyRules(username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// cluster-admin
|
||||
if iam.RulesMatchesRequired(clusterRules, rbacv1.PolicyRule{
|
||||
Verbs: []string{"*"},
|
||||
APIGroups: []string{"*"},
|
||||
Resources: []string{"*"},
|
||||
}) {
|
||||
return t.am.GetWorkspaceRoleSimpleRules(workspace, constants.WorkspaceAdmin), nil
|
||||
}
|
||||
|
||||
workspaceRole, err := t.am.GetWorkspaceRole(workspace, username)
|
||||
|
||||
// workspaces-manager
|
||||
if iam.RulesMatchesRequired(clusterRules, rbacv1.PolicyRule{
|
||||
Verbs: []string{"*"},
|
||||
APIGroups: []string{"*"},
|
||||
Resources: []string{"workspaces", "workspaces/*"},
|
||||
}) {
|
||||
return t.am.GetWorkspaceRoleSimpleRules(workspace, constants.WorkspacesManager), nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return []policy.SimpleRule{}, nil
|
||||
}
|
||||
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t.am.GetWorkspaceRoleSimpleRules(workspace, workspaceRole.Annotations[constants.DisplayNameAnnotationKey]), nil
|
||||
}
|
||||
|
||||
func (t *tenantOperator) GetNamespaceSimpleRules(namespace, username string) ([]policy.SimpleRule, error) {
|
||||
clusterRules, err := t.am.GetClusterPolicyRules(username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rules, err := t.am.GetPolicyRules(namespace, username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rules = append(rules, clusterRules...)
|
||||
|
||||
return iam.ConvertToSimpleRule(rules), nil
|
||||
}
|
||||
|
||||
func (t *tenantOperator) appendAnnotations(username string, workspace *v1alpha1.Workspace) *v1alpha1.Workspace {
|
||||
workspace = workspace.DeepCopy()
|
||||
if workspace.Annotations == nil {
|
||||
workspace.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
if ns, err := t.ListNamespaces(username, ¶ms.Conditions{Match: map[string]string{constants.WorkspaceLabelKey: workspace.Name}}, "", false, 1, 0); err == nil {
|
||||
workspace.Annotations["kubesphere.io/namespace-count"] = strconv.Itoa(ns.TotalCount)
|
||||
}
|
||||
|
||||
if devops, err := t.ListDevopsProjects(username, ¶ms.Conditions{Match: map[string]string{"workspace": workspace.Name}}, "", false, 1, 0); err == nil {
|
||||
workspace.Annotations["kubesphere.io/devops-count"] = strconv.Itoa(devops.TotalCount)
|
||||
}
|
||||
|
||||
userCount, err := t.workspaces.CountUsersInWorkspace(workspace.Name)
|
||||
|
||||
if err == nil {
|
||||
workspace.Annotations["kubesphere.io/member-count"] = strconv.Itoa(userCount)
|
||||
}
|
||||
return workspace
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *tenantOperator) ListNamespaces(username string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
|
||||
@@ -18,27 +18,22 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
core "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/db"
|
||||
"kubesphere.io/kubesphere/pkg/models/devops"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam"
|
||||
am2 "kubesphere.io/kubesphere/pkg/models/iam/am"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/mysql"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -69,14 +64,14 @@ type workspaceOperator struct {
|
||||
client kubernetes.Interface
|
||||
informers informers.SharedInformerFactory
|
||||
ksInformers externalversions.SharedInformerFactory
|
||||
am iam.AccessManagementInterface
|
||||
am am2.AccessManagementInterface
|
||||
|
||||
// TODO: use db interface instead of mysql client
|
||||
// we can refactor this after rewrite devops using crd
|
||||
db *mysql.Database
|
||||
}
|
||||
|
||||
func newWorkspaceOperator(client kubernetes.Interface, informers informers.SharedInformerFactory, ksinformers externalversions.SharedInformerFactory, am iam.AccessManagementInterface, db *mysql.Database) WorkspaceInterface {
|
||||
func newWorkspaceOperator(client kubernetes.Interface, informers informers.SharedInformerFactory, ksinformers externalversions.SharedInformerFactory, am am2.AccessManagementInterface, db *mysql.Database) WorkspaceInterface {
|
||||
return &workspaceOperator{
|
||||
client: client,
|
||||
informers: informers,
|
||||
@@ -111,96 +106,12 @@ func (w *workspaceOperator) DeleteNamespace(workspace string, namespace string)
|
||||
}
|
||||
|
||||
func (w *workspaceOperator) RemoveUser(workspace string, username string) error {
|
||||
workspaceRole, err := w.am.GetWorkspaceRole(workspace, username)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = w.deleteWorkspaceRoleBinding(workspace, username, workspaceRole.Annotations[constants.DisplayNameAnnotationKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (w *workspaceOperator) AddUser(workspaceName string, user *InWorkspaceUser) error {
|
||||
|
||||
workspaceRole, err := w.am.GetWorkspaceRole(workspaceName, user.Username)
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("get workspace role failed: %+v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
workspaceRoleName := fmt.Sprintf("workspace:%s:%s", workspaceName, strings.TrimPrefix(user.WorkspaceRole, "workspace-"))
|
||||
var currentWorkspaceRoleName string
|
||||
if workspaceRole != nil {
|
||||
currentWorkspaceRoleName = workspaceRole.Name
|
||||
}
|
||||
|
||||
if currentWorkspaceRoleName != workspaceRoleName && currentWorkspaceRoleName != "" {
|
||||
err := w.deleteWorkspaceRoleBinding(workspaceName, user.Username, workspaceRole.Annotations[constants.DisplayNameAnnotationKey])
|
||||
if err != nil {
|
||||
klog.Errorf("delete workspace role binding failed: %+v", err)
|
||||
return err
|
||||
}
|
||||
} else if currentWorkspaceRoleName != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return w.createWorkspaceRoleBinding(workspaceName, user.Username, user.WorkspaceRole)
|
||||
}
|
||||
|
||||
func (w *workspaceOperator) createWorkspaceRoleBinding(workspace, username string, role string) error {
|
||||
|
||||
if !sliceutil.HasString(constants.WorkSpaceRoles, role) {
|
||||
return apierrors.NewNotFound(schema.GroupResource{Resource: "workspace role"}, role)
|
||||
}
|
||||
|
||||
roleBindingName := fmt.Sprintf("workspace:%s:%s", workspace, strings.TrimPrefix(role, "workspace-"))
|
||||
workspaceRoleBinding, err := w.informers.Rbac().V1().ClusterRoleBindings().Lister().Get(roleBindingName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !iam.ContainsUser(workspaceRoleBinding.Subjects, username) {
|
||||
workspaceRoleBinding = workspaceRoleBinding.DeepCopy()
|
||||
workspaceRoleBinding.Subjects = append(workspaceRoleBinding.Subjects, v1.Subject{APIGroup: "rbac.authorization.k8s.io", Kind: "User", Name: username})
|
||||
_, err = w.client.RbacV1().ClusterRoleBindings().Update(workspaceRoleBinding)
|
||||
if err != nil {
|
||||
klog.Errorf("update workspace role binding failed: %+v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *workspaceOperator) deleteWorkspaceRoleBinding(workspace, username string, role string) error {
|
||||
|
||||
if !sliceutil.HasString(constants.WorkSpaceRoles, role) {
|
||||
return apierrors.NewNotFound(schema.GroupResource{Resource: "workspace role"}, role)
|
||||
}
|
||||
|
||||
roleBindingName := fmt.Sprintf("workspace:%s:%s", workspace, strings.TrimPrefix(role, "workspace-"))
|
||||
|
||||
workspaceRoleBinding, err := w.informers.Rbac().V1().ClusterRoleBindings().Lister().Get(roleBindingName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
workspaceRoleBinding = workspaceRoleBinding.DeepCopy()
|
||||
|
||||
for i, v := range workspaceRoleBinding.Subjects {
|
||||
if v.Kind == v1.UserKind && v.Name == username {
|
||||
workspaceRoleBinding.Subjects = append(workspaceRoleBinding.Subjects[:i], workspaceRoleBinding.Subjects[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
||||
workspaceRoleBinding, err = w.client.RbacV1().ClusterRoleBindings().Update(workspaceRoleBinding)
|
||||
|
||||
return err
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (w *workspaceOperator) CountDevopsProjectsInWorkspace(workspaceName string) (int, error) {
|
||||
@@ -288,50 +199,7 @@ func (*workspaceOperator) compare(a, b *v1alpha1.Workspace, orderBy string) bool
|
||||
}
|
||||
|
||||
func (w *workspaceOperator) SearchWorkspace(username string, conditions *params.Conditions, orderBy string, reverse bool) ([]*v1alpha1.Workspace, error) {
|
||||
rules, err := w.am.GetClusterPolicyRules(username)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaces := make([]*v1alpha1.Workspace, 0)
|
||||
|
||||
if iam.RulesMatchesRequired(rules, rbacv1.PolicyRule{Verbs: []string{"list"}, APIGroups: []string{"tenant.kubesphere.io"}, Resources: []string{"workspaces"}}) {
|
||||
workspaces, err = w.ksInformers.Tenant().V1alpha1().Workspaces().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
workspaceRoles, err := w.am.GetWorkspaceRoleMap(username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k := range workspaceRoles {
|
||||
workspace, err := w.ksInformers.Tenant().V1alpha1().Workspaces().Lister().Get(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workspaces = append(workspaces, workspace)
|
||||
}
|
||||
}
|
||||
|
||||
result := make([]*v1alpha1.Workspace, 0)
|
||||
|
||||
for _, workspace := range workspaces {
|
||||
if w.match(conditions.Match, workspace) && w.fuzzy(conditions.Fuzzy, workspace) {
|
||||
result = append(result, workspace)
|
||||
}
|
||||
}
|
||||
|
||||
// order & reverse
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
if reverse {
|
||||
i, j = j, i
|
||||
}
|
||||
return w.compare(result[i], result[j], orderBy)
|
||||
})
|
||||
|
||||
return result, nil
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (w *workspaceOperator) GetWorkspace(workspaceName string) (*v1alpha1.Workspace, error) {
|
||||
|
||||
4
vendor/github.com/OneOfOne/xxhash/.gitignore
generated
vendored
Normal file
4
vendor/github.com/OneOfOne/xxhash/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
*.txt
|
||||
*.pprof
|
||||
cmap2/
|
||||
cache/
|
||||
12
vendor/github.com/OneOfOne/xxhash/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/OneOfOne/xxhash/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.9
|
||||
- "1.10"
|
||||
- master
|
||||
|
||||
script:
|
||||
- go test -tags safe ./...
|
||||
- go test ./...
|
||||
187
vendor/github.com/OneOfOne/xxhash/LICENSE
generated
vendored
Normal file
187
vendor/github.com/OneOfOne/xxhash/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
75
vendor/github.com/OneOfOne/xxhash/README.md
generated
vendored
Normal file
75
vendor/github.com/OneOfOne/xxhash/README.md
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
# xxhash [](https://godoc.org/github.com/OneOfOne/xxhash) [](https://travis-ci.org/OneOfOne/xxhash) [](https://gocover.io/github.com/OneOfOne/xxhash)
|
||||
|
||||
This is a native Go implementation of the excellent [xxhash](https://github.com/Cyan4973/xxHash)* algorithm, an extremely fast non-cryptographic Hash algorithm, working at speeds close to RAM limits.
|
||||
|
||||
* The C implementation is ([Copyright](https://github.com/Cyan4973/xxHash/blob/master/LICENSE) (c) 2012-2014, Yann Collet)
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/OneOfOne/xxhash
|
||||
|
||||
## Features
|
||||
|
||||
* On Go 1.7+ the pure go version is faster than CGO for all inputs.
|
||||
* Supports ChecksumString{32,64} xxhash{32,64}.WriteString, which uses no copies when it can, falls back to copy on appengine.
|
||||
* The native version falls back to a less optimized version on appengine due to the lack of unsafe.
|
||||
* Almost as fast as the mostly pure assembly version written by the brilliant [cespare](https://github.com/cespare/xxhash), while also supporting seeds.
|
||||
* To manually toggle the appengine version build with `-tags safe`.
|
||||
|
||||
## Benchmark
|
||||
|
||||
### Core i7-4790 @ 3.60GHz, Linux 4.12.6-1-ARCH (64bit), Go tip (+ff90f4af66 2017-08-19)
|
||||
|
||||
```bash
|
||||
➤ go test -bench '64' -count 5 -tags cespare | benchstat /dev/stdin
|
||||
name time/op
|
||||
|
||||
# https://github.com/cespare/xxhash
|
||||
XXSum64Cespare/Func-8 160ns ± 2%
|
||||
XXSum64Cespare/Struct-8 173ns ± 1%
|
||||
XXSum64ShortCespare/Func-8 6.78ns ± 1%
|
||||
XXSum64ShortCespare/Struct-8 19.6ns ± 2%
|
||||
|
||||
# this package (default mode, using unsafe)
|
||||
XXSum64/Func-8 170ns ± 1%
|
||||
XXSum64/Struct-8 182ns ± 1%
|
||||
XXSum64Short/Func-8 13.5ns ± 3%
|
||||
XXSum64Short/Struct-8 20.4ns ± 0%
|
||||
|
||||
# this package (appengine, *not* using unsafe)
|
||||
XXSum64/Func-8 241ns ± 5%
|
||||
XXSum64/Struct-8 243ns ± 6%
|
||||
XXSum64Short/Func-8 15.2ns ± 2%
|
||||
XXSum64Short/Struct-8 23.7ns ± 5%
|
||||
|
||||
CRC64ISO-8 1.23µs ± 1%
|
||||
CRC64ISOString-8 2.71µs ± 4%
|
||||
CRC64ISOShort-8 22.2ns ± 3%
|
||||
|
||||
Fnv64-8 2.34µs ± 1%
|
||||
Fnv64Short-8 74.7ns ± 8%
|
||||
#
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
h := xxhash.New64()
|
||||
// r, err := os.Open("......")
|
||||
// defer f.Close()
|
||||
r := strings.NewReader(F)
|
||||
io.Copy(h, r)
|
||||
fmt.Println("xxhash.Backend:", xxhash.Backend)
|
||||
fmt.Println("File checksum:", h.Sum64())
|
||||
```
|
||||
|
||||
[<kbd>playground</kbd>](http://play.golang.org/p/rhRN3RdQyd)
|
||||
|
||||
## TODO
|
||||
|
||||
* Rewrite the 32bit version to be more optimized.
|
||||
* General cleanup as the Go inliner gets smarter.
|
||||
|
||||
## License
|
||||
|
||||
This project is released under the Apache v2. licence. See [LICENCE](LICENCE) for more details.
|
||||
1
vendor/github.com/OneOfOne/xxhash/go.mod
generated
vendored
Normal file
1
vendor/github.com/OneOfOne/xxhash/go.mod
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
module github.com/OneOfOne/xxhash
|
||||
189
vendor/github.com/OneOfOne/xxhash/xxhash.go
generated
vendored
Normal file
189
vendor/github.com/OneOfOne/xxhash/xxhash.go
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
package xxhash
|
||||
|
||||
const (
|
||||
prime32x1 uint32 = 2654435761
|
||||
prime32x2 uint32 = 2246822519
|
||||
prime32x3 uint32 = 3266489917
|
||||
prime32x4 uint32 = 668265263
|
||||
prime32x5 uint32 = 374761393
|
||||
|
||||
prime64x1 uint64 = 11400714785074694791
|
||||
prime64x2 uint64 = 14029467366897019727
|
||||
prime64x3 uint64 = 1609587929392839161
|
||||
prime64x4 uint64 = 9650029242287828579
|
||||
prime64x5 uint64 = 2870177450012600261
|
||||
|
||||
maxInt32 int32 = (1<<31 - 1)
|
||||
|
||||
// precomputed zero Vs for seed 0
|
||||
zero64x1 = 0x60ea27eeadc0b5d6
|
||||
zero64x2 = 0xc2b2ae3d27d4eb4f
|
||||
zero64x3 = 0x0
|
||||
zero64x4 = 0x61c8864e7a143579
|
||||
)
|
||||
|
||||
// Checksum32 returns the checksum of the input data with the seed set to 0.
|
||||
func Checksum32(in []byte) uint32 {
|
||||
return Checksum32S(in, 0)
|
||||
}
|
||||
|
||||
// ChecksumString32 returns the checksum of the input data, without creating a copy, with the seed set to 0.
|
||||
func ChecksumString32(s string) uint32 {
|
||||
return ChecksumString32S(s, 0)
|
||||
}
|
||||
|
||||
type XXHash32 struct {
|
||||
mem [16]byte
|
||||
ln, memIdx int32
|
||||
v1, v2, v3, v4 uint32
|
||||
seed uint32
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (xx *XXHash32) Size() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (xx *XXHash32) BlockSize() int {
|
||||
return 16
|
||||
}
|
||||
|
||||
// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed.
|
||||
func NewS32(seed uint32) (xx *XXHash32) {
|
||||
xx = &XXHash32{
|
||||
seed: seed,
|
||||
}
|
||||
xx.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0.
|
||||
func New32() *XXHash32 {
|
||||
return NewS32(0)
|
||||
}
|
||||
|
||||
func (xx *XXHash32) Reset() {
|
||||
xx.v1 = xx.seed + prime32x1 + prime32x2
|
||||
xx.v2 = xx.seed + prime32x2
|
||||
xx.v3 = xx.seed
|
||||
xx.v4 = xx.seed - prime32x1
|
||||
xx.ln, xx.memIdx = 0, 0
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (xx *XXHash32) Sum(in []byte) []byte {
|
||||
s := xx.Sum32()
|
||||
return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// Checksum64 an alias for Checksum64S(in, 0)
|
||||
func Checksum64(in []byte) uint64 {
|
||||
return Checksum64S(in, 0)
|
||||
}
|
||||
|
||||
// ChecksumString64 returns the checksum of the input data, without creating a copy, with the seed set to 0.
|
||||
func ChecksumString64(s string) uint64 {
|
||||
return ChecksumString64S(s, 0)
|
||||
}
|
||||
|
||||
type XXHash64 struct {
|
||||
v1, v2, v3, v4 uint64
|
||||
seed uint64
|
||||
ln uint64
|
||||
mem [32]byte
|
||||
memIdx int8
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (xx *XXHash64) Size() int {
|
||||
return 8
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (xx *XXHash64) BlockSize() int {
|
||||
return 32
|
||||
}
|
||||
|
||||
// NewS64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the specific seed.
|
||||
func NewS64(seed uint64) (xx *XXHash64) {
|
||||
xx = &XXHash64{
|
||||
seed: seed,
|
||||
}
|
||||
xx.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
// New64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the seed set to 0x0.
|
||||
func New64() *XXHash64 {
|
||||
return NewS64(0)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Reset() {
|
||||
xx.ln, xx.memIdx = 0, 0
|
||||
xx.v1, xx.v2, xx.v3, xx.v4 = resetVs64(xx.seed)
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (xx *XXHash64) Sum(in []byte) []byte {
|
||||
s := xx.Sum64()
|
||||
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// force the compiler to use ROTL instructions
|
||||
|
||||
func rotl32_1(x uint32) uint32 { return (x << 1) | (x >> (32 - 1)) }
|
||||
func rotl32_7(x uint32) uint32 { return (x << 7) | (x >> (32 - 7)) }
|
||||
func rotl32_11(x uint32) uint32 { return (x << 11) | (x >> (32 - 11)) }
|
||||
func rotl32_12(x uint32) uint32 { return (x << 12) | (x >> (32 - 12)) }
|
||||
func rotl32_13(x uint32) uint32 { return (x << 13) | (x >> (32 - 13)) }
|
||||
func rotl32_17(x uint32) uint32 { return (x << 17) | (x >> (32 - 17)) }
|
||||
func rotl32_18(x uint32) uint32 { return (x << 18) | (x >> (32 - 18)) }
|
||||
|
||||
func rotl64_1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
|
||||
func rotl64_7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
|
||||
func rotl64_11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
|
||||
func rotl64_12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
|
||||
func rotl64_18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
|
||||
func rotl64_23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
|
||||
func rotl64_27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
|
||||
func rotl64_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
|
||||
|
||||
func mix64(h uint64) uint64 {
|
||||
h ^= h >> 33
|
||||
h *= prime64x2
|
||||
h ^= h >> 29
|
||||
h *= prime64x3
|
||||
h ^= h >> 32
|
||||
return h
|
||||
}
|
||||
|
||||
func resetVs64(seed uint64) (v1, v2, v3, v4 uint64) {
|
||||
if seed == 0 {
|
||||
return zero64x1, zero64x2, zero64x3, zero64x4
|
||||
}
|
||||
return (seed + prime64x1 + prime64x2), (seed + prime64x2), (seed), (seed - prime64x1)
|
||||
}
|
||||
|
||||
// borrowed from cespare
|
||||
func round64(h, v uint64) uint64 {
|
||||
h += v * prime64x2
|
||||
h = rotl64_31(h)
|
||||
h *= prime64x1
|
||||
return h
|
||||
}
|
||||
|
||||
func mergeRound64(h, v uint64) uint64 {
|
||||
v = round64(0, v)
|
||||
h ^= v
|
||||
h = h*prime64x1 + prime64x4
|
||||
return h
|
||||
}
|
||||
161
vendor/github.com/OneOfOne/xxhash/xxhash_go17.go
generated
vendored
Normal file
161
vendor/github.com/OneOfOne/xxhash/xxhash_go17.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
package xxhash
|
||||
|
||||
func u32(in []byte) uint32 {
|
||||
return uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
|
||||
}
|
||||
|
||||
func u64(in []byte) uint64 {
|
||||
return uint64(in[0]) | uint64(in[1])<<8 | uint64(in[2])<<16 | uint64(in[3])<<24 | uint64(in[4])<<32 | uint64(in[5])<<40 | uint64(in[6])<<48 | uint64(in[7])<<56
|
||||
}
|
||||
|
||||
// Checksum32S returns the checksum of the input bytes with the specific seed.
|
||||
func Checksum32S(in []byte, seed uint32) (h uint32) {
|
||||
var i int
|
||||
|
||||
if len(in) > 15 {
|
||||
var (
|
||||
v1 = seed + prime32x1 + prime32x2
|
||||
v2 = seed + prime32x2
|
||||
v3 = seed + 0
|
||||
v4 = seed - prime32x1
|
||||
)
|
||||
for ; i < len(in)-15; i += 16 {
|
||||
in := in[i : i+16 : len(in)]
|
||||
v1 += u32(in[0:4:len(in)]) * prime32x2
|
||||
v1 = rotl32_13(v1) * prime32x1
|
||||
|
||||
v2 += u32(in[4:8:len(in)]) * prime32x2
|
||||
v2 = rotl32_13(v2) * prime32x1
|
||||
|
||||
v3 += u32(in[8:12:len(in)]) * prime32x2
|
||||
v3 = rotl32_13(v3) * prime32x1
|
||||
|
||||
v4 += u32(in[12:16:len(in)]) * prime32x2
|
||||
v4 = rotl32_13(v4) * prime32x1
|
||||
}
|
||||
|
||||
h = rotl32_1(v1) + rotl32_7(v2) + rotl32_12(v3) + rotl32_18(v4)
|
||||
|
||||
} else {
|
||||
h = seed + prime32x5
|
||||
}
|
||||
|
||||
h += uint32(len(in))
|
||||
for ; i <= len(in)-4; i += 4 {
|
||||
in := in[i : i+4 : len(in)]
|
||||
h += u32(in[0:4:len(in)]) * prime32x3
|
||||
h = rotl32_17(h) * prime32x4
|
||||
}
|
||||
|
||||
for ; i < len(in); i++ {
|
||||
h += uint32(in[i]) * prime32x5
|
||||
h = rotl32_11(h) * prime32x1
|
||||
}
|
||||
|
||||
h ^= h >> 15
|
||||
h *= prime32x2
|
||||
h ^= h >> 13
|
||||
h *= prime32x3
|
||||
h ^= h >> 16
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (xx *XXHash32) Write(in []byte) (n int, err error) {
|
||||
i, ml := 0, int(xx.memIdx)
|
||||
n = len(in)
|
||||
xx.ln += int32(n)
|
||||
|
||||
if d := 16 - ml; ml > 0 && ml+len(in) > 16 {
|
||||
xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[:d]))
|
||||
ml, in = 16, in[d:len(in):len(in)]
|
||||
} else if ml+len(in) < 16 {
|
||||
xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in))
|
||||
return
|
||||
}
|
||||
|
||||
if ml > 0 {
|
||||
i += 16 - ml
|
||||
xx.memIdx += int32(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
|
||||
in := xx.mem[:16:len(xx.mem)]
|
||||
|
||||
xx.v1 += u32(in[0:4:len(in)]) * prime32x2
|
||||
xx.v1 = rotl32_13(xx.v1) * prime32x1
|
||||
|
||||
xx.v2 += u32(in[4:8:len(in)]) * prime32x2
|
||||
xx.v2 = rotl32_13(xx.v2) * prime32x1
|
||||
|
||||
xx.v3 += u32(in[8:12:len(in)]) * prime32x2
|
||||
xx.v3 = rotl32_13(xx.v3) * prime32x1
|
||||
|
||||
xx.v4 += u32(in[12:16:len(in)]) * prime32x2
|
||||
xx.v4 = rotl32_13(xx.v4) * prime32x1
|
||||
|
||||
xx.memIdx = 0
|
||||
}
|
||||
|
||||
for ; i <= len(in)-16; i += 16 {
|
||||
in := in[i : i+16 : len(in)]
|
||||
xx.v1 += u32(in[0:4:len(in)]) * prime32x2
|
||||
xx.v1 = rotl32_13(xx.v1) * prime32x1
|
||||
|
||||
xx.v2 += u32(in[4:8:len(in)]) * prime32x2
|
||||
xx.v2 = rotl32_13(xx.v2) * prime32x1
|
||||
|
||||
xx.v3 += u32(in[8:12:len(in)]) * prime32x2
|
||||
xx.v3 = rotl32_13(xx.v3) * prime32x1
|
||||
|
||||
xx.v4 += u32(in[12:16:len(in)]) * prime32x2
|
||||
xx.v4 = rotl32_13(xx.v4) * prime32x1
|
||||
}
|
||||
|
||||
if len(in)-i != 0 {
|
||||
xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (xx *XXHash32) Sum32() (h uint32) {
|
||||
var i int32
|
||||
if xx.ln > 15 {
|
||||
h = rotl32_1(xx.v1) + rotl32_7(xx.v2) + rotl32_12(xx.v3) + rotl32_18(xx.v4)
|
||||
} else {
|
||||
h = xx.seed + prime32x5
|
||||
}
|
||||
|
||||
h += uint32(xx.ln)
|
||||
|
||||
if xx.memIdx > 0 {
|
||||
for ; i < xx.memIdx-3; i += 4 {
|
||||
in := xx.mem[i : i+4 : len(xx.mem)]
|
||||
h += u32(in[0:4:len(in)]) * prime32x3
|
||||
h = rotl32_17(h) * prime32x4
|
||||
}
|
||||
|
||||
for ; i < xx.memIdx; i++ {
|
||||
h += uint32(xx.mem[i]) * prime32x5
|
||||
h = rotl32_11(h) * prime32x1
|
||||
}
|
||||
}
|
||||
h ^= h >> 15
|
||||
h *= prime32x2
|
||||
h ^= h >> 13
|
||||
h *= prime32x3
|
||||
h ^= h >> 16
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Checksum64S returns the 64bit xxhash checksum for a single input
|
||||
func Checksum64S(in []byte, seed uint64) uint64 {
|
||||
if len(in) == 0 && seed == 0 {
|
||||
return 0xef46db3751d8e999
|
||||
}
|
||||
|
||||
if len(in) > 31 {
|
||||
return checksum64(in, seed)
|
||||
}
|
||||
|
||||
return checksum64Short(in, seed)
|
||||
}
|
||||
183
vendor/github.com/OneOfOne/xxhash/xxhash_safe.go
generated
vendored
Normal file
183
vendor/github.com/OneOfOne/xxhash/xxhash_safe.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// +build appengine safe ppc64le ppc64be mipsle mipsbe
|
||||
|
||||
package xxhash
|
||||
|
||||
// Backend returns the current version of xxhash being used.
|
||||
const Backend = "GoSafe"
|
||||
|
||||
func ChecksumString32S(s string, seed uint32) uint32 {
|
||||
return Checksum32S([]byte(s), seed)
|
||||
}
|
||||
|
||||
func (xx *XXHash32) WriteString(s string) (int, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return xx.Write([]byte(s))
|
||||
}
|
||||
|
||||
func ChecksumString64S(s string, seed uint64) uint64 {
|
||||
return Checksum64S([]byte(s), seed)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) WriteString(s string) (int, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return xx.Write([]byte(s))
|
||||
}
|
||||
|
||||
func checksum64(in []byte, seed uint64) (h uint64) {
|
||||
var (
|
||||
v1, v2, v3, v4 = resetVs64(seed)
|
||||
|
||||
i int
|
||||
)
|
||||
|
||||
for ; i < len(in)-31; i += 32 {
|
||||
in := in[i : i+32 : len(in)]
|
||||
v1 = round64(v1, u64(in[0:8:len(in)]))
|
||||
v2 = round64(v2, u64(in[8:16:len(in)]))
|
||||
v3 = round64(v3, u64(in[16:24:len(in)]))
|
||||
v4 = round64(v4, u64(in[24:32:len(in)]))
|
||||
}
|
||||
|
||||
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
|
||||
|
||||
h = mergeRound64(h, v1)
|
||||
h = mergeRound64(h, v2)
|
||||
h = mergeRound64(h, v3)
|
||||
h = mergeRound64(h, v4)
|
||||
|
||||
h += uint64(len(in))
|
||||
|
||||
for ; i < len(in)-7; i += 8 {
|
||||
h ^= round64(0, u64(in[i:len(in):len(in)]))
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
for ; i < len(in)-3; i += 4 {
|
||||
h ^= uint64(u32(in[i:len(in):len(in)])) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
}
|
||||
|
||||
for ; i < len(in); i++ {
|
||||
h ^= uint64(in[i]) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
func checksum64Short(in []byte, seed uint64) uint64 {
|
||||
var (
|
||||
h = seed + prime64x5 + uint64(len(in))
|
||||
i int
|
||||
)
|
||||
|
||||
for ; i < len(in)-7; i += 8 {
|
||||
k := u64(in[i : i+8 : len(in)])
|
||||
h ^= round64(0, k)
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
for ; i < len(in)-3; i += 4 {
|
||||
h ^= uint64(u32(in[i:i+4:len(in)])) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
}
|
||||
|
||||
for ; i < len(in); i++ {
|
||||
h ^= uint64(in[i]) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Write(in []byte) (n int, err error) {
|
||||
var (
|
||||
ml = int(xx.memIdx)
|
||||
d = 32 - ml
|
||||
)
|
||||
|
||||
n = len(in)
|
||||
xx.ln += uint64(n)
|
||||
|
||||
if ml+len(in) < 32 {
|
||||
xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
|
||||
return
|
||||
}
|
||||
|
||||
i, v1, v2, v3, v4 := 0, xx.v1, xx.v2, xx.v3, xx.v4
|
||||
if ml > 0 && ml+len(in) > 32 {
|
||||
xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in[:d:len(in)]))
|
||||
in = in[d:len(in):len(in)]
|
||||
|
||||
in := xx.mem[0:32:len(xx.mem)]
|
||||
|
||||
v1 = round64(v1, u64(in[0:8:len(in)]))
|
||||
v2 = round64(v2, u64(in[8:16:len(in)]))
|
||||
v3 = round64(v3, u64(in[16:24:len(in)]))
|
||||
v4 = round64(v4, u64(in[24:32:len(in)]))
|
||||
|
||||
xx.memIdx = 0
|
||||
}
|
||||
|
||||
for ; i < len(in)-31; i += 32 {
|
||||
in := in[i : i+32 : len(in)]
|
||||
v1 = round64(v1, u64(in[0:8:len(in)]))
|
||||
v2 = round64(v2, u64(in[8:16:len(in)]))
|
||||
v3 = round64(v3, u64(in[16:24:len(in)]))
|
||||
v4 = round64(v4, u64(in[24:32:len(in)]))
|
||||
}
|
||||
|
||||
if len(in)-i != 0 {
|
||||
xx.memIdx += int8(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
|
||||
}
|
||||
|
||||
xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Sum64() (h uint64) {
|
||||
var i int
|
||||
if xx.ln > 31 {
|
||||
v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
|
||||
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
|
||||
|
||||
h = mergeRound64(h, v1)
|
||||
h = mergeRound64(h, v2)
|
||||
h = mergeRound64(h, v3)
|
||||
h = mergeRound64(h, v4)
|
||||
} else {
|
||||
h = xx.seed + prime64x5
|
||||
}
|
||||
|
||||
h += uint64(xx.ln)
|
||||
if xx.memIdx > 0 {
|
||||
in := xx.mem[:xx.memIdx]
|
||||
for ; i < int(xx.memIdx)-7; i += 8 {
|
||||
in := in[i : i+8 : len(in)]
|
||||
k := u64(in[0:8:len(in)])
|
||||
k *= prime64x2
|
||||
k = rotl64_31(k)
|
||||
k *= prime64x1
|
||||
h ^= k
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
for ; i < int(xx.memIdx)-3; i += 4 {
|
||||
in := in[i : i+4 : len(in)]
|
||||
h ^= uint64(u32(in[0:4:len(in)])) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
}
|
||||
|
||||
for ; i < int(xx.memIdx); i++ {
|
||||
h ^= uint64(in[i]) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
238
vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go
generated
vendored
Normal file
238
vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,238 @@
|
||||
// +build !safe
|
||||
// +build !appengine
|
||||
// +build !ppc64le
|
||||
// +build !mipsle
|
||||
// +build !ppc64be
|
||||
// +build !mipsbe
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Backend returns the current version of xxhash being used.
|
||||
const Backend = "GoUnsafe"
|
||||
|
||||
// ChecksumString32S returns the checksum of the input data, without creating a copy, with the specific seed.
|
||||
func ChecksumString32S(s string, seed uint32) uint32 {
|
||||
if len(s) == 0 {
|
||||
return Checksum32S(nil, seed)
|
||||
}
|
||||
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return Checksum32S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
|
||||
}
|
||||
|
||||
func (xx *XXHash32) WriteString(s string) (int, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)])
|
||||
}
|
||||
|
||||
// ChecksumString64S returns the checksum of the input data, without creating a copy, with the specific seed.
|
||||
func ChecksumString64S(s string, seed uint64) uint64 {
|
||||
if len(s) == 0 {
|
||||
return Checksum64S(nil, seed)
|
||||
}
|
||||
|
||||
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return Checksum64S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) WriteString(s string) (int, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s)])
|
||||
}
|
||||
|
||||
func checksum64(in []byte, seed uint64) uint64 {
|
||||
var (
|
||||
wordsLen = len(in) >> 3
|
||||
words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
|
||||
|
||||
h uint64 = prime64x5
|
||||
|
||||
v1, v2, v3, v4 = resetVs64(seed)
|
||||
|
||||
i int
|
||||
)
|
||||
|
||||
for ; i < len(words)-3; i += 4 {
|
||||
words := (*[4]uint64)(unsafe.Pointer(&words[i]))
|
||||
|
||||
v1 = round64(v1, words[0])
|
||||
v2 = round64(v2, words[1])
|
||||
v3 = round64(v3, words[2])
|
||||
v4 = round64(v4, words[3])
|
||||
}
|
||||
|
||||
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
|
||||
|
||||
h = mergeRound64(h, v1)
|
||||
h = mergeRound64(h, v2)
|
||||
h = mergeRound64(h, v3)
|
||||
h = mergeRound64(h, v4)
|
||||
|
||||
h += uint64(len(in))
|
||||
|
||||
for _, k := range words[i:] {
|
||||
h ^= round64(0, k)
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
|
||||
words := (*[1]uint32)(unsafe.Pointer(&in[0]))
|
||||
h ^= uint64(words[0]) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
|
||||
in = in[4:len(in):len(in)]
|
||||
}
|
||||
|
||||
for _, b := range in {
|
||||
h ^= uint64(b) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
func checksum64Short(in []byte, seed uint64) uint64 {
|
||||
var (
|
||||
h = seed + prime64x5 + uint64(len(in))
|
||||
i int
|
||||
)
|
||||
|
||||
if len(in) > 7 {
|
||||
var (
|
||||
wordsLen = len(in) >> 3
|
||||
words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
|
||||
)
|
||||
|
||||
for i := range words {
|
||||
h ^= round64(0, words[i])
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
i = wordsLen << 3
|
||||
}
|
||||
|
||||
if in = in[i:len(in):len(in)]; len(in) > 3 {
|
||||
words := (*[1]uint32)(unsafe.Pointer(&in[0]))
|
||||
h ^= uint64(words[0]) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
|
||||
in = in[4:len(in):len(in)]
|
||||
}
|
||||
|
||||
for _, b := range in {
|
||||
h ^= uint64(b) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Write(in []byte) (n int, err error) {
|
||||
mem, idx := xx.mem[:], int(xx.memIdx)
|
||||
|
||||
xx.ln, n = xx.ln+uint64(len(in)), len(in)
|
||||
|
||||
if idx+len(in) < 32 {
|
||||
xx.memIdx += int8(copy(mem[idx:len(mem):len(mem)], in))
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
v1, v2, v3, v4 = xx.v1, xx.v2, xx.v3, xx.v4
|
||||
|
||||
i int
|
||||
)
|
||||
|
||||
if d := 32 - int(idx); d > 0 && int(idx)+len(in) > 31 {
|
||||
copy(mem[idx:len(mem):len(mem)], in[:len(in):len(in)])
|
||||
|
||||
words := (*[4]uint64)(unsafe.Pointer(&mem[0]))
|
||||
|
||||
v1 = round64(v1, words[0])
|
||||
v2 = round64(v2, words[1])
|
||||
v3 = round64(v3, words[2])
|
||||
v4 = round64(v4, words[3])
|
||||
|
||||
if in, xx.memIdx = in[d:len(in):len(in)], 0; len(in) == 0 {
|
||||
goto RET
|
||||
}
|
||||
}
|
||||
|
||||
for ; i < len(in)-31; i += 32 {
|
||||
words := (*[4]uint64)(unsafe.Pointer(&in[i]))
|
||||
|
||||
v1 = round64(v1, words[0])
|
||||
v2 = round64(v2, words[1])
|
||||
v3 = round64(v3, words[2])
|
||||
v4 = round64(v4, words[3])
|
||||
}
|
||||
|
||||
if len(in)-i != 0 {
|
||||
xx.memIdx += int8(copy(mem[xx.memIdx:len(mem):len(mem)], in[i:len(in):len(in)]))
|
||||
}
|
||||
|
||||
RET:
|
||||
xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (xx *XXHash64) Sum64() (h uint64) {
|
||||
if seed := xx.seed; xx.ln > 31 {
|
||||
v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
|
||||
h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
|
||||
|
||||
h = mergeRound64(h, v1)
|
||||
h = mergeRound64(h, v2)
|
||||
h = mergeRound64(h, v3)
|
||||
h = mergeRound64(h, v4)
|
||||
} else if seed == 0 {
|
||||
h = prime64x5
|
||||
} else {
|
||||
h = seed + prime64x5
|
||||
}
|
||||
|
||||
h += uint64(xx.ln)
|
||||
|
||||
if xx.memIdx == 0 {
|
||||
return mix64(h)
|
||||
}
|
||||
|
||||
var (
|
||||
in = xx.mem[:xx.memIdx:xx.memIdx]
|
||||
wordsLen = len(in) >> 3
|
||||
words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
|
||||
)
|
||||
|
||||
for _, k := range words {
|
||||
h ^= round64(0, k)
|
||||
h = rotl64_27(h)*prime64x1 + prime64x4
|
||||
}
|
||||
|
||||
if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
|
||||
words := (*[1]uint32)(unsafe.Pointer(&in[0]))
|
||||
|
||||
h ^= uint64(words[0]) * prime64x1
|
||||
h = rotl64_23(h)*prime64x2 + prime64x3
|
||||
|
||||
in = in[4:len(in):len(in)]
|
||||
}
|
||||
|
||||
for _, b := range in {
|
||||
h ^= uint64(b) * prime64x5
|
||||
h = rotl64_11(h) * prime64x1
|
||||
}
|
||||
|
||||
return mix64(h)
|
||||
}
|
||||
294
vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go
generated
vendored
Normal file
294
vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/globalsign/mgo/internal/json"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix(name + ": ")
|
||||
|
||||
var g Generator
|
||||
|
||||
fmt.Fprintf(&g, "// Code generated by \"%s.go\"; DO NOT EDIT\n\n", name)
|
||||
|
||||
src := g.generate()
|
||||
|
||||
err := ioutil.WriteFile(fmt.Sprintf("%s.go", strings.TrimSuffix(name, "_generator")), src, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("writing output: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Generator holds the state of the analysis. Primarily used to buffer
|
||||
// the output for format.Source.
|
||||
type Generator struct {
|
||||
bytes.Buffer // Accumulated output.
|
||||
}
|
||||
|
||||
// format returns the gofmt-ed contents of the Generator's buffer.
|
||||
func (g *Generator) format() []byte {
|
||||
src, err := format.Source(g.Bytes())
|
||||
if err != nil {
|
||||
// Should never happen, but can arise when developing this code.
|
||||
// The user can compile the output to see the error.
|
||||
log.Printf("warning: internal error: invalid Go generated: %s", err)
|
||||
log.Printf("warning: compile the package to analyze the error")
|
||||
return g.Bytes()
|
||||
}
|
||||
return src
|
||||
}
|
||||
|
||||
// EVERYTHING ABOVE IS CONSTANT BETWEEN THE GENERATORS
|
||||
|
||||
const name = "bson_corpus_spec_test_generator"
|
||||
|
||||
func (g *Generator) generate() []byte {
|
||||
|
||||
testFiles, err := filepath.Glob("./specdata/specifications/source/bson-corpus/tests/*.json")
|
||||
if err != nil {
|
||||
log.Fatalf("error reading bson-corpus files: %s", err)
|
||||
}
|
||||
|
||||
tests, err := g.loadTests(testFiles)
|
||||
if err != nil {
|
||||
log.Fatalf("error loading tests: %s", err)
|
||||
}
|
||||
|
||||
tmpl, err := g.getTemplate()
|
||||
if err != nil {
|
||||
log.Fatalf("error loading template: %s", err)
|
||||
}
|
||||
|
||||
tmpl.Execute(&g.Buffer, tests)
|
||||
|
||||
return g.format()
|
||||
}
|
||||
|
||||
func (g *Generator) loadTests(filenames []string) ([]*testDef, error) {
|
||||
var tests []*testDef
|
||||
for _, filename := range filenames {
|
||||
test, err := g.loadTest(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tests = append(tests, test)
|
||||
}
|
||||
|
||||
return tests, nil
|
||||
}
|
||||
|
||||
func (g *Generator) loadTest(filename string) (*testDef, error) {
|
||||
content, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var testDef testDef
|
||||
err = json.Unmarshal(content, &testDef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := make(map[string]struct{})
|
||||
|
||||
for i := len(testDef.Valid) - 1; i >= 0; i-- {
|
||||
if testDef.BsonType == "0x05" && testDef.Valid[i].Description == "subtype 0x02" {
|
||||
testDef.Valid = append(testDef.Valid[:i], testDef.Valid[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
name := cleanupFuncName(testDef.Description + "_" + testDef.Valid[i].Description)
|
||||
nameIdx := name
|
||||
j := 1
|
||||
for {
|
||||
if _, ok := names[nameIdx]; !ok {
|
||||
break
|
||||
}
|
||||
|
||||
nameIdx = fmt.Sprintf("%s_%d", name, j)
|
||||
}
|
||||
|
||||
names[nameIdx] = struct{}{}
|
||||
|
||||
testDef.Valid[i].TestDef = &testDef
|
||||
testDef.Valid[i].Name = nameIdx
|
||||
testDef.Valid[i].StructTest = testDef.TestKey != "" &&
|
||||
(testDef.BsonType != "0x05" || strings.Contains(testDef.Valid[i].Description, "0x00")) &&
|
||||
!testDef.Deprecated
|
||||
}
|
||||
|
||||
for i := len(testDef.DecodeErrors) - 1; i >= 0; i-- {
|
||||
if strings.Contains(testDef.DecodeErrors[i].Description, "UTF-8") {
|
||||
testDef.DecodeErrors = append(testDef.DecodeErrors[:i], testDef.DecodeErrors[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
name := cleanupFuncName(testDef.Description + "_" + testDef.DecodeErrors[i].Description)
|
||||
nameIdx := name
|
||||
j := 1
|
||||
for {
|
||||
if _, ok := names[nameIdx]; !ok {
|
||||
break
|
||||
}
|
||||
|
||||
nameIdx = fmt.Sprintf("%s_%d", name, j)
|
||||
}
|
||||
names[nameIdx] = struct{}{}
|
||||
|
||||
testDef.DecodeErrors[i].Name = nameIdx
|
||||
}
|
||||
|
||||
return &testDef, nil
|
||||
}
|
||||
|
||||
func (g *Generator) getTemplate() (*template.Template, error) {
|
||||
content := `package bson_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"github.com/globalsign/mgo/bson"
|
||||
)
|
||||
|
||||
func testValid(c *C, in []byte, expected []byte, result interface{}) {
|
||||
err := bson.Unmarshal(in, result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
out, err := bson.Marshal(result)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
c.Assert(string(expected), Equals, string(out), Commentf("roundtrip failed for %T, expected '%x' but got '%x'", result, expected, out))
|
||||
}
|
||||
|
||||
func testDecodeSkip(c *C, in []byte) {
|
||||
err := bson.Unmarshal(in, &struct{}{})
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func testDecodeError(c *C, in []byte, result interface{}) {
|
||||
err := bson.Unmarshal(in, result)
|
||||
c.Assert(err, Not(IsNil))
|
||||
}
|
||||
|
||||
{{range .}}
|
||||
{{range .Valid}}
|
||||
func (s *S) Test{{.Name}}(c *C) {
|
||||
b, err := hex.DecodeString("{{.Bson}}")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
{{if .CanonicalBson}}
|
||||
cb, err := hex.DecodeString("{{.CanonicalBson}}")
|
||||
c.Assert(err, IsNil)
|
||||
{{else}}
|
||||
cb := b
|
||||
{{end}}
|
||||
|
||||
var resultD bson.D
|
||||
testValid(c, b, cb, &resultD)
|
||||
{{if .StructTest}}var resultS struct {
|
||||
Element {{.TestDef.GoType}} ` + "`bson:\"{{.TestDef.TestKey}}\"`" + `
|
||||
}
|
||||
testValid(c, b, cb, &resultS){{end}}
|
||||
|
||||
testDecodeSkip(c, b)
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{range .DecodeErrors}}
|
||||
func (s *S) Test{{.Name}}(c *C) {
|
||||
b, err := hex.DecodeString("{{.Bson}}")
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
var resultD bson.D
|
||||
testDecodeError(c, b, &resultD)
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
`
|
||||
tmpl, err := template.New("").Parse(content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tmpl, nil
|
||||
}
|
||||
|
||||
func cleanupFuncName(name string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if (r >= 48 && r <= 57) || (r >= 65 && r <= 90) || (r >= 97 && r <= 122) {
|
||||
return r
|
||||
}
|
||||
return '_'
|
||||
}, name)
|
||||
}
|
||||
|
||||
type testDef struct {
|
||||
Description string `json:"description"`
|
||||
BsonType string `json:"bson_type"`
|
||||
TestKey string `json:"test_key"`
|
||||
Valid []*valid `json:"valid"`
|
||||
DecodeErrors []*decodeError `json:"decodeErrors"`
|
||||
Deprecated bool `json:"deprecated"`
|
||||
}
|
||||
|
||||
func (t *testDef) GoType() string {
|
||||
switch t.BsonType {
|
||||
case "0x01":
|
||||
return "float64"
|
||||
case "0x02":
|
||||
return "string"
|
||||
case "0x03":
|
||||
return "bson.D"
|
||||
case "0x04":
|
||||
return "[]interface{}"
|
||||
case "0x05":
|
||||
return "[]byte"
|
||||
case "0x07":
|
||||
return "bson.ObjectId"
|
||||
case "0x08":
|
||||
return "bool"
|
||||
case "0x09":
|
||||
return "time.Time"
|
||||
case "0x0E":
|
||||
return "string"
|
||||
case "0x10":
|
||||
return "int32"
|
||||
case "0x12":
|
||||
return "int64"
|
||||
case "0x13":
|
||||
return "bson.Decimal"
|
||||
default:
|
||||
return "interface{}"
|
||||
}
|
||||
}
|
||||
|
||||
type valid struct {
|
||||
Description string `json:"description"`
|
||||
Bson string `json:"bson"`
|
||||
CanonicalBson string `json:"canonical_bson"`
|
||||
|
||||
Name string
|
||||
StructTest bool
|
||||
TestDef *testDef
|
||||
}
|
||||
|
||||
type decodeError struct {
|
||||
Description string `json:"description"`
|
||||
Bson string `json:"bson"`
|
||||
|
||||
Name string
|
||||
}
|
||||
8
vendor/github.com/gobwas/glob/.gitignore
generated
vendored
Normal file
8
vendor/github.com/gobwas/glob/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
glob.iml
|
||||
.idea
|
||||
*.cpu
|
||||
*.mem
|
||||
*.test
|
||||
*.dot
|
||||
*.png
|
||||
*.svg
|
||||
9
vendor/github.com/gobwas/glob/.travis.yml
generated
vendored
Normal file
9
vendor/github.com/gobwas/glob/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.5.3
|
||||
|
||||
script:
|
||||
- go test -v ./...
|
||||
21
vendor/github.com/gobwas/glob/LICENSE
generated
vendored
Normal file
21
vendor/github.com/gobwas/glob/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Sergey Kamardin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
26
vendor/github.com/gobwas/glob/bench.sh
generated
vendored
Normal file
26
vendor/github.com/gobwas/glob/bench.sh
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
#! /bin/bash
|
||||
|
||||
bench() {
|
||||
filename="/tmp/$1-$2.bench"
|
||||
if test -e "${filename}";
|
||||
then
|
||||
echo "Already exists ${filename}"
|
||||
else
|
||||
backup=`git rev-parse --abbrev-ref HEAD`
|
||||
git checkout $1
|
||||
echo -n "Creating ${filename}... "
|
||||
go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem
|
||||
echo "OK"
|
||||
git checkout ${backup}
|
||||
sleep 5
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
to=$1
|
||||
current=`git rev-parse --abbrev-ref HEAD`
|
||||
|
||||
bench ${to} $2
|
||||
bench ${current} $2
|
||||
|
||||
benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench"
|
||||
525
vendor/github.com/gobwas/glob/compiler/compiler.go
generated
vendored
Normal file
525
vendor/github.com/gobwas/glob/compiler/compiler.go
generated
vendored
Normal file
@@ -0,0 +1,525 @@
|
||||
package compiler
|
||||
|
||||
// TODO use constructor with all matchers, and to their structs private
|
||||
// TODO glue multiple Text nodes (like after QuoteMeta)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/gobwas/glob/match"
|
||||
"github.com/gobwas/glob/syntax/ast"
|
||||
"github.com/gobwas/glob/util/runes"
|
||||
)
|
||||
|
||||
func optimizeMatcher(matcher match.Matcher) match.Matcher {
|
||||
switch m := matcher.(type) {
|
||||
|
||||
case match.Any:
|
||||
if len(m.Separators) == 0 {
|
||||
return match.NewSuper()
|
||||
}
|
||||
|
||||
case match.AnyOf:
|
||||
if len(m.Matchers) == 1 {
|
||||
return m.Matchers[0]
|
||||
}
|
||||
|
||||
return m
|
||||
|
||||
case match.List:
|
||||
if m.Not == false && len(m.List) == 1 {
|
||||
return match.NewText(string(m.List))
|
||||
}
|
||||
|
||||
return m
|
||||
|
||||
case match.BTree:
|
||||
m.Left = optimizeMatcher(m.Left)
|
||||
m.Right = optimizeMatcher(m.Right)
|
||||
|
||||
r, ok := m.Value.(match.Text)
|
||||
if !ok {
|
||||
return m
|
||||
}
|
||||
|
||||
var (
|
||||
leftNil = m.Left == nil
|
||||
rightNil = m.Right == nil
|
||||
)
|
||||
if leftNil && rightNil {
|
||||
return match.NewText(r.Str)
|
||||
}
|
||||
|
||||
_, leftSuper := m.Left.(match.Super)
|
||||
lp, leftPrefix := m.Left.(match.Prefix)
|
||||
la, leftAny := m.Left.(match.Any)
|
||||
|
||||
_, rightSuper := m.Right.(match.Super)
|
||||
rs, rightSuffix := m.Right.(match.Suffix)
|
||||
ra, rightAny := m.Right.(match.Any)
|
||||
|
||||
switch {
|
||||
case leftSuper && rightSuper:
|
||||
return match.NewContains(r.Str, false)
|
||||
|
||||
case leftSuper && rightNil:
|
||||
return match.NewSuffix(r.Str)
|
||||
|
||||
case rightSuper && leftNil:
|
||||
return match.NewPrefix(r.Str)
|
||||
|
||||
case leftNil && rightSuffix:
|
||||
return match.NewPrefixSuffix(r.Str, rs.Suffix)
|
||||
|
||||
case rightNil && leftPrefix:
|
||||
return match.NewPrefixSuffix(lp.Prefix, r.Str)
|
||||
|
||||
case rightNil && leftAny:
|
||||
return match.NewSuffixAny(r.Str, la.Separators)
|
||||
|
||||
case leftNil && rightAny:
|
||||
return match.NewPrefixAny(r.Str, ra.Separators)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
return matcher
|
||||
}
|
||||
|
||||
func compileMatchers(matchers []match.Matcher) (match.Matcher, error) {
|
||||
if len(matchers) == 0 {
|
||||
return nil, fmt.Errorf("compile error: need at least one matcher")
|
||||
}
|
||||
if len(matchers) == 1 {
|
||||
return matchers[0], nil
|
||||
}
|
||||
if m := glueMatchers(matchers); m != nil {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
idx := -1
|
||||
maxLen := -1
|
||||
var val match.Matcher
|
||||
for i, matcher := range matchers {
|
||||
if l := matcher.Len(); l != -1 && l >= maxLen {
|
||||
maxLen = l
|
||||
idx = i
|
||||
val = matcher
|
||||
}
|
||||
}
|
||||
|
||||
if val == nil { // not found matcher with static length
|
||||
r, err := compileMatchers(matchers[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return match.NewBTree(matchers[0], nil, r), nil
|
||||
}
|
||||
|
||||
left := matchers[:idx]
|
||||
var right []match.Matcher
|
||||
if len(matchers) > idx+1 {
|
||||
right = matchers[idx+1:]
|
||||
}
|
||||
|
||||
var l, r match.Matcher
|
||||
var err error
|
||||
if len(left) > 0 {
|
||||
l, err = compileMatchers(left)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(right) > 0 {
|
||||
r, err = compileMatchers(right)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return match.NewBTree(val, l, r), nil
|
||||
}
|
||||
|
||||
func glueMatchers(matchers []match.Matcher) match.Matcher {
|
||||
if m := glueMatchersAsEvery(matchers); m != nil {
|
||||
return m
|
||||
}
|
||||
if m := glueMatchersAsRow(matchers); m != nil {
|
||||
return m
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func glueMatchersAsRow(matchers []match.Matcher) match.Matcher {
|
||||
if len(matchers) <= 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
c []match.Matcher
|
||||
l int
|
||||
)
|
||||
for _, matcher := range matchers {
|
||||
if ml := matcher.Len(); ml == -1 {
|
||||
return nil
|
||||
} else {
|
||||
c = append(c, matcher)
|
||||
l += ml
|
||||
}
|
||||
}
|
||||
return match.NewRow(l, c...)
|
||||
}
|
||||
|
||||
func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher {
|
||||
if len(matchers) <= 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
hasAny bool
|
||||
hasSuper bool
|
||||
hasSingle bool
|
||||
min int
|
||||
separator []rune
|
||||
)
|
||||
|
||||
for i, matcher := range matchers {
|
||||
var sep []rune
|
||||
|
||||
switch m := matcher.(type) {
|
||||
case match.Super:
|
||||
sep = []rune{}
|
||||
hasSuper = true
|
||||
|
||||
case match.Any:
|
||||
sep = m.Separators
|
||||
hasAny = true
|
||||
|
||||
case match.Single:
|
||||
sep = m.Separators
|
||||
hasSingle = true
|
||||
min++
|
||||
|
||||
case match.List:
|
||||
if !m.Not {
|
||||
return nil
|
||||
}
|
||||
sep = m.List
|
||||
hasSingle = true
|
||||
min++
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
// initialize
|
||||
if i == 0 {
|
||||
separator = sep
|
||||
}
|
||||
|
||||
if runes.Equal(sep, separator) {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if hasSuper && !hasAny && !hasSingle {
|
||||
return match.NewSuper()
|
||||
}
|
||||
|
||||
if hasAny && !hasSuper && !hasSingle {
|
||||
return match.NewAny(separator)
|
||||
}
|
||||
|
||||
if (hasAny || hasSuper) && min > 0 && len(separator) == 0 {
|
||||
return match.NewMin(min)
|
||||
}
|
||||
|
||||
every := match.NewEveryOf()
|
||||
|
||||
if min > 0 {
|
||||
every.Add(match.NewMin(min))
|
||||
|
||||
if !hasAny && !hasSuper {
|
||||
every.Add(match.NewMax(min))
|
||||
}
|
||||
}
|
||||
|
||||
if len(separator) > 0 {
|
||||
every.Add(match.NewContains(string(separator), true))
|
||||
}
|
||||
|
||||
return every
|
||||
}
|
||||
|
||||
func minimizeMatchers(matchers []match.Matcher) []match.Matcher {
|
||||
var done match.Matcher
|
||||
var left, right, count int
|
||||
|
||||
for l := 0; l < len(matchers); l++ {
|
||||
for r := len(matchers); r > l; r-- {
|
||||
if glued := glueMatchers(matchers[l:r]); glued != nil {
|
||||
var swap bool
|
||||
|
||||
if done == nil {
|
||||
swap = true
|
||||
} else {
|
||||
cl, gl := done.Len(), glued.Len()
|
||||
swap = cl > -1 && gl > -1 && gl > cl
|
||||
swap = swap || count < r-l
|
||||
}
|
||||
|
||||
if swap {
|
||||
done = glued
|
||||
left = l
|
||||
right = r
|
||||
count = r - l
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if done == nil {
|
||||
return matchers
|
||||
}
|
||||
|
||||
next := append(append([]match.Matcher{}, matchers[:left]...), done)
|
||||
if right < len(matchers) {
|
||||
next = append(next, matchers[right:]...)
|
||||
}
|
||||
|
||||
if len(next) == len(matchers) {
|
||||
return next
|
||||
}
|
||||
|
||||
return minimizeMatchers(next)
|
||||
}
|
||||
|
||||
// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree
|
||||
func minimizeTree(tree *ast.Node) *ast.Node {
|
||||
switch tree.Kind {
|
||||
case ast.KindAnyOf:
|
||||
return minimizeTreeAnyOf(tree)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// minimizeAnyOf tries to find common children of given node of AnyOf pattern
|
||||
// it searches for common children from left and from right
|
||||
// if any common children are found – then it returns new optimized ast tree
|
||||
// else it returns nil
|
||||
func minimizeTreeAnyOf(tree *ast.Node) *ast.Node {
|
||||
if !areOfSameKind(tree.Children, ast.KindPattern) {
|
||||
return nil
|
||||
}
|
||||
|
||||
commonLeft, commonRight := commonChildren(tree.Children)
|
||||
commonLeftCount, commonRightCount := len(commonLeft), len(commonRight)
|
||||
if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts
|
||||
return nil
|
||||
}
|
||||
|
||||
var result []*ast.Node
|
||||
if commonLeftCount > 0 {
|
||||
result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...))
|
||||
}
|
||||
|
||||
var anyOf []*ast.Node
|
||||
for _, child := range tree.Children {
|
||||
reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount]
|
||||
var node *ast.Node
|
||||
if len(reuse) == 0 {
|
||||
// this pattern is completely reduced by commonLeft and commonRight patterns
|
||||
// so it become nothing
|
||||
node = ast.NewNode(ast.KindNothing, nil)
|
||||
} else {
|
||||
node = ast.NewNode(ast.KindPattern, nil, reuse...)
|
||||
}
|
||||
anyOf = appendIfUnique(anyOf, node)
|
||||
}
|
||||
switch {
|
||||
case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing:
|
||||
result = append(result, anyOf[0])
|
||||
case len(anyOf) > 1:
|
||||
result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...))
|
||||
}
|
||||
|
||||
if commonRightCount > 0 {
|
||||
result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...))
|
||||
}
|
||||
|
||||
return ast.NewNode(ast.KindPattern, nil, result...)
|
||||
}
|
||||
|
||||
func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) {
|
||||
if len(nodes) <= 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// find node that has least number of children
|
||||
idx := leastChildren(nodes)
|
||||
if idx == -1 {
|
||||
return
|
||||
}
|
||||
tree := nodes[idx]
|
||||
treeLength := len(tree.Children)
|
||||
|
||||
// allocate max able size for rightCommon slice
|
||||
// to get ability insert elements in reverse order (from end to start)
|
||||
// without sorting
|
||||
commonRight = make([]*ast.Node, treeLength)
|
||||
lastRight := treeLength // will use this to get results as commonRight[lastRight:]
|
||||
|
||||
var (
|
||||
breakLeft bool
|
||||
breakRight bool
|
||||
commonTotal int
|
||||
)
|
||||
for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 {
|
||||
treeLeft := tree.Children[i]
|
||||
treeRight := tree.Children[j]
|
||||
|
||||
for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ {
|
||||
// skip least children node
|
||||
if k == idx {
|
||||
continue
|
||||
}
|
||||
|
||||
restLeft := nodes[k].Children[i]
|
||||
restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength]
|
||||
|
||||
breakLeft = breakLeft || !treeLeft.Equal(restLeft)
|
||||
|
||||
// disable searching for right common parts, if left part is already overlapping
|
||||
breakRight = breakRight || (!breakLeft && j <= i)
|
||||
breakRight = breakRight || !treeRight.Equal(restRight)
|
||||
}
|
||||
|
||||
if !breakLeft {
|
||||
commonTotal++
|
||||
commonLeft = append(commonLeft, treeLeft)
|
||||
}
|
||||
if !breakRight {
|
||||
commonTotal++
|
||||
lastRight = j
|
||||
commonRight[j] = treeRight
|
||||
}
|
||||
}
|
||||
|
||||
commonRight = commonRight[lastRight:]
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node {
|
||||
for _, n := range target {
|
||||
if reflect.DeepEqual(n, val) {
|
||||
return target
|
||||
}
|
||||
}
|
||||
return append(target, val)
|
||||
}
|
||||
|
||||
func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool {
|
||||
for _, n := range nodes {
|
||||
if n.Kind != kind {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func leastChildren(nodes []*ast.Node) int {
|
||||
min := -1
|
||||
idx := -1
|
||||
for i, n := range nodes {
|
||||
if idx == -1 || (len(n.Children) < min) {
|
||||
min = len(n.Children)
|
||||
idx = i
|
||||
}
|
||||
}
|
||||
return idx
|
||||
}
|
||||
|
||||
func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) {
|
||||
var matchers []match.Matcher
|
||||
for _, desc := range tree.Children {
|
||||
m, err := compile(desc, sep)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matchers = append(matchers, optimizeMatcher(m))
|
||||
}
|
||||
return matchers, nil
|
||||
}
|
||||
|
||||
func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) {
|
||||
switch tree.Kind {
|
||||
case ast.KindAnyOf:
|
||||
// todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go)
|
||||
if n := minimizeTree(tree); n != nil {
|
||||
return compile(n, sep)
|
||||
}
|
||||
matchers, err := compileTreeChildren(tree, sep)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return match.NewAnyOf(matchers...), nil
|
||||
|
||||
case ast.KindPattern:
|
||||
if len(tree.Children) == 0 {
|
||||
return match.NewNothing(), nil
|
||||
}
|
||||
matchers, err := compileTreeChildren(tree, sep)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, err = compileMatchers(minimizeMatchers(matchers))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case ast.KindAny:
|
||||
m = match.NewAny(sep)
|
||||
|
||||
case ast.KindSuper:
|
||||
m = match.NewSuper()
|
||||
|
||||
case ast.KindSingle:
|
||||
m = match.NewSingle(sep)
|
||||
|
||||
case ast.KindNothing:
|
||||
m = match.NewNothing()
|
||||
|
||||
case ast.KindList:
|
||||
l := tree.Value.(ast.List)
|
||||
m = match.NewList([]rune(l.Chars), l.Not)
|
||||
|
||||
case ast.KindRange:
|
||||
r := tree.Value.(ast.Range)
|
||||
m = match.NewRange(r.Lo, r.Hi, r.Not)
|
||||
|
||||
case ast.KindText:
|
||||
t := tree.Value.(ast.Text)
|
||||
m = match.NewText(t.Text)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("could not compile tree: unknown node type")
|
||||
}
|
||||
|
||||
return optimizeMatcher(m), nil
|
||||
}
|
||||
|
||||
func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) {
|
||||
m, err := compile(tree, sep)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
80
vendor/github.com/gobwas/glob/glob.go
generated
vendored
Normal file
80
vendor/github.com/gobwas/glob/glob.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
package glob
|
||||
|
||||
import (
|
||||
"github.com/gobwas/glob/compiler"
|
||||
"github.com/gobwas/glob/syntax"
|
||||
)
|
||||
|
||||
// Glob represents compiled glob pattern.
|
||||
type Glob interface {
|
||||
Match(string) bool
|
||||
}
|
||||
|
||||
// Compile creates Glob for given pattern and strings (if any present after pattern) as separators.
|
||||
// The pattern syntax is:
|
||||
//
|
||||
// pattern:
|
||||
// { term }
|
||||
//
|
||||
// term:
|
||||
// `*` matches any sequence of non-separator characters
|
||||
// `**` matches any sequence of characters
|
||||
// `?` matches any single non-separator character
|
||||
// `[` [ `!` ] { character-range } `]`
|
||||
// character class (must be non-empty)
|
||||
// `{` pattern-list `}`
|
||||
// pattern alternatives
|
||||
// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
|
||||
// `\` c matches character c
|
||||
//
|
||||
// character-range:
|
||||
// c matches character c (c != `\\`, `-`, `]`)
|
||||
// `\` c matches character c
|
||||
// lo `-` hi matches character c for lo <= c <= hi
|
||||
//
|
||||
// pattern-list:
|
||||
// pattern { `,` pattern }
|
||||
// comma-separated (without spaces) patterns
|
||||
//
|
||||
func Compile(pattern string, separators ...rune) (Glob, error) {
|
||||
ast, err := syntax.Parse(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matcher, err := compiler.Compile(ast, separators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return matcher, nil
|
||||
}
|
||||
|
||||
// MustCompile is the same as Compile, except that if Compile returns error, this will panic
|
||||
func MustCompile(pattern string, separators ...rune) Glob {
|
||||
g, err := Compile(pattern, separators...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
// QuoteMeta returns a string that quotes all glob pattern meta characters
|
||||
// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`.
|
||||
func QuoteMeta(s string) string {
|
||||
b := make([]byte, 2*len(s))
|
||||
|
||||
// a byte loop is correct because all meta characters are ASCII
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
if syntax.Special(s[i]) {
|
||||
b[j] = '\\'
|
||||
j++
|
||||
}
|
||||
b[j] = s[i]
|
||||
j++
|
||||
}
|
||||
|
||||
return string(b[0:j])
|
||||
}
|
||||
45
vendor/github.com/gobwas/glob/match/any.go
generated
vendored
Normal file
45
vendor/github.com/gobwas/glob/match/any.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gobwas/glob/util/strings"
|
||||
)
|
||||
|
||||
type Any struct {
|
||||
Separators []rune
|
||||
}
|
||||
|
||||
func NewAny(s []rune) Any {
|
||||
return Any{s}
|
||||
}
|
||||
|
||||
func (self Any) Match(s string) bool {
|
||||
return strings.IndexAnyRunes(s, self.Separators) == -1
|
||||
}
|
||||
|
||||
func (self Any) Index(s string) (int, []int) {
|
||||
found := strings.IndexAnyRunes(s, self.Separators)
|
||||
switch found {
|
||||
case -1:
|
||||
case 0:
|
||||
return 0, segments0
|
||||
default:
|
||||
s = s[:found]
|
||||
}
|
||||
|
||||
segments := acquireSegments(len(s))
|
||||
for i := range s {
|
||||
segments = append(segments, i)
|
||||
}
|
||||
segments = append(segments, len(s))
|
||||
|
||||
return 0, segments
|
||||
}
|
||||
|
||||
func (self Any) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self Any) String() string {
|
||||
return fmt.Sprintf("<any:![%s]>", string(self.Separators))
|
||||
}
|
||||
82
vendor/github.com/gobwas/glob/match/any_of.go
generated
vendored
Normal file
82
vendor/github.com/gobwas/glob/match/any_of.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package match
|
||||
|
||||
import "fmt"
|
||||
|
||||
type AnyOf struct {
|
||||
Matchers Matchers
|
||||
}
|
||||
|
||||
func NewAnyOf(m ...Matcher) AnyOf {
|
||||
return AnyOf{Matchers(m)}
|
||||
}
|
||||
|
||||
func (self *AnyOf) Add(m Matcher) error {
|
||||
self.Matchers = append(self.Matchers, m)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self AnyOf) Match(s string) bool {
|
||||
for _, m := range self.Matchers {
|
||||
if m.Match(s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (self AnyOf) Index(s string) (int, []int) {
|
||||
index := -1
|
||||
|
||||
segments := acquireSegments(len(s))
|
||||
for _, m := range self.Matchers {
|
||||
idx, seg := m.Index(s)
|
||||
if idx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
if index == -1 || idx < index {
|
||||
index = idx
|
||||
segments = append(segments[:0], seg...)
|
||||
continue
|
||||
}
|
||||
|
||||
if idx > index {
|
||||
continue
|
||||
}
|
||||
|
||||
// here idx == index
|
||||
segments = appendMerge(segments, seg)
|
||||
}
|
||||
|
||||
if index == -1 {
|
||||
releaseSegments(segments)
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
return index, segments
|
||||
}
|
||||
|
||||
func (self AnyOf) Len() (l int) {
|
||||
l = -1
|
||||
for _, m := range self.Matchers {
|
||||
ml := m.Len()
|
||||
switch {
|
||||
case l == -1:
|
||||
l = ml
|
||||
continue
|
||||
|
||||
case ml == -1:
|
||||
return -1
|
||||
|
||||
case l != ml:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (self AnyOf) String() string {
|
||||
return fmt.Sprintf("<any_of:[%s]>", self.Matchers)
|
||||
}
|
||||
146
vendor/github.com/gobwas/glob/match/btree.go
generated
vendored
Normal file
146
vendor/github.com/gobwas/glob/match/btree.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type BTree struct {
|
||||
Value Matcher
|
||||
Left Matcher
|
||||
Right Matcher
|
||||
ValueLengthRunes int
|
||||
LeftLengthRunes int
|
||||
RightLengthRunes int
|
||||
LengthRunes int
|
||||
}
|
||||
|
||||
func NewBTree(Value, Left, Right Matcher) (tree BTree) {
|
||||
tree.Value = Value
|
||||
tree.Left = Left
|
||||
tree.Right = Right
|
||||
|
||||
lenOk := true
|
||||
if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 {
|
||||
lenOk = false
|
||||
}
|
||||
|
||||
if Left != nil {
|
||||
if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 {
|
||||
lenOk = false
|
||||
}
|
||||
}
|
||||
|
||||
if Right != nil {
|
||||
if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 {
|
||||
lenOk = false
|
||||
}
|
||||
}
|
||||
|
||||
if lenOk {
|
||||
tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes
|
||||
} else {
|
||||
tree.LengthRunes = -1
|
||||
}
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
func (self BTree) Len() int {
|
||||
return self.LengthRunes
|
||||
}
|
||||
|
||||
// todo?
|
||||
func (self BTree) Index(s string) (int, []int) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (self BTree) Match(s string) bool {
|
||||
inputLen := len(s)
|
||||
|
||||
// self.Length, self.RLen and self.LLen are values meaning the length of runes for each part
|
||||
// here we manipulating byte length for better optimizations
|
||||
// but these checks still works, cause minLen of 1-rune string is 1 byte.
|
||||
if self.LengthRunes != -1 && self.LengthRunes > inputLen {
|
||||
return false
|
||||
}
|
||||
|
||||
// try to cut unnecessary parts
|
||||
// by knowledge of length of right and left part
|
||||
var offset, limit int
|
||||
if self.LeftLengthRunes >= 0 {
|
||||
offset = self.LeftLengthRunes
|
||||
}
|
||||
if self.RightLengthRunes >= 0 {
|
||||
limit = inputLen - self.RightLengthRunes
|
||||
} else {
|
||||
limit = inputLen
|
||||
}
|
||||
|
||||
for offset < limit {
|
||||
// search for matching part in substring
|
||||
index, segments := self.Value.Index(s[offset:limit])
|
||||
if index == -1 {
|
||||
releaseSegments(segments)
|
||||
return false
|
||||
}
|
||||
|
||||
l := s[:offset+index]
|
||||
var left bool
|
||||
if self.Left != nil {
|
||||
left = self.Left.Match(l)
|
||||
} else {
|
||||
left = l == ""
|
||||
}
|
||||
|
||||
if left {
|
||||
for i := len(segments) - 1; i >= 0; i-- {
|
||||
length := segments[i]
|
||||
|
||||
var right bool
|
||||
var r string
|
||||
// if there is no string for the right branch
|
||||
if inputLen <= offset+index+length {
|
||||
r = ""
|
||||
} else {
|
||||
r = s[offset+index+length:]
|
||||
}
|
||||
|
||||
if self.Right != nil {
|
||||
right = self.Right.Match(r)
|
||||
} else {
|
||||
right = r == ""
|
||||
}
|
||||
|
||||
if right {
|
||||
releaseSegments(segments)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, step := utf8.DecodeRuneInString(s[offset+index:])
|
||||
offset += index + step
|
||||
|
||||
releaseSegments(segments)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (self BTree) String() string {
|
||||
const n string = "<nil>"
|
||||
var l, r string
|
||||
if self.Left == nil {
|
||||
l = n
|
||||
} else {
|
||||
l = self.Left.String()
|
||||
}
|
||||
if self.Right == nil {
|
||||
r = n
|
||||
} else {
|
||||
r = self.Right.String()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("<btree:[%s<-%s->%s]>", l, self.Value, r)
|
||||
}
|
||||
58
vendor/github.com/gobwas/glob/match/contains.go
generated
vendored
Normal file
58
vendor/github.com/gobwas/glob/match/contains.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Contains struct {
|
||||
Needle string
|
||||
Not bool
|
||||
}
|
||||
|
||||
func NewContains(needle string, not bool) Contains {
|
||||
return Contains{needle, not}
|
||||
}
|
||||
|
||||
func (self Contains) Match(s string) bool {
|
||||
return strings.Contains(s, self.Needle) != self.Not
|
||||
}
|
||||
|
||||
func (self Contains) Index(s string) (int, []int) {
|
||||
var offset int
|
||||
|
||||
idx := strings.Index(s, self.Needle)
|
||||
|
||||
if !self.Not {
|
||||
if idx == -1 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
offset = idx + len(self.Needle)
|
||||
if len(s) <= offset {
|
||||
return 0, []int{offset}
|
||||
}
|
||||
s = s[offset:]
|
||||
} else if idx != -1 {
|
||||
s = s[:idx]
|
||||
}
|
||||
|
||||
segments := acquireSegments(len(s) + 1)
|
||||
for i := range s {
|
||||
segments = append(segments, offset+i)
|
||||
}
|
||||
|
||||
return 0, append(segments, offset+len(s))
|
||||
}
|
||||
|
||||
func (self Contains) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self Contains) String() string {
|
||||
var not string
|
||||
if self.Not {
|
||||
not = "!"
|
||||
}
|
||||
return fmt.Sprintf("<contains:%s[%s]>", not, self.Needle)
|
||||
}
|
||||
99
vendor/github.com/gobwas/glob/match/every_of.go
generated
vendored
Normal file
99
vendor/github.com/gobwas/glob/match/every_of.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type EveryOf struct {
|
||||
Matchers Matchers
|
||||
}
|
||||
|
||||
func NewEveryOf(m ...Matcher) EveryOf {
|
||||
return EveryOf{Matchers(m)}
|
||||
}
|
||||
|
||||
func (self *EveryOf) Add(m Matcher) error {
|
||||
self.Matchers = append(self.Matchers, m)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self EveryOf) Len() (l int) {
|
||||
for _, m := range self.Matchers {
|
||||
if ml := m.Len(); l > 0 {
|
||||
l += ml
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (self EveryOf) Index(s string) (int, []int) {
|
||||
var index int
|
||||
var offset int
|
||||
|
||||
// make `in` with cap as len(s),
|
||||
// cause it is the maximum size of output segments values
|
||||
next := acquireSegments(len(s))
|
||||
current := acquireSegments(len(s))
|
||||
|
||||
sub := s
|
||||
for i, m := range self.Matchers {
|
||||
idx, seg := m.Index(sub)
|
||||
if idx == -1 {
|
||||
releaseSegments(next)
|
||||
releaseSegments(current)
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
// we use copy here instead of `current = seg`
|
||||
// cause seg is a slice from reusable buffer `in`
|
||||
// and it could be overwritten in next iteration
|
||||
current = append(current, seg...)
|
||||
} else {
|
||||
// clear the next
|
||||
next = next[:0]
|
||||
|
||||
delta := index - (idx + offset)
|
||||
for _, ex := range current {
|
||||
for _, n := range seg {
|
||||
if ex+delta == n {
|
||||
next = append(next, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(next) == 0 {
|
||||
releaseSegments(next)
|
||||
releaseSegments(current)
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
current = append(current[:0], next...)
|
||||
}
|
||||
|
||||
index = idx + offset
|
||||
sub = s[index:]
|
||||
offset += idx
|
||||
}
|
||||
|
||||
releaseSegments(next)
|
||||
|
||||
return index, current
|
||||
}
|
||||
|
||||
func (self EveryOf) Match(s string) bool {
|
||||
for _, m := range self.Matchers {
|
||||
if !m.Match(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (self EveryOf) String() string {
|
||||
return fmt.Sprintf("<every_of:[%s]>", self.Matchers)
|
||||
}
|
||||
49
vendor/github.com/gobwas/glob/match/list.go
generated
vendored
Normal file
49
vendor/github.com/gobwas/glob/match/list.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gobwas/glob/util/runes"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type List struct {
|
||||
List []rune
|
||||
Not bool
|
||||
}
|
||||
|
||||
func NewList(list []rune, not bool) List {
|
||||
return List{list, not}
|
||||
}
|
||||
|
||||
func (self List) Match(s string) bool {
|
||||
r, w := utf8.DecodeRuneInString(s)
|
||||
if len(s) > w {
|
||||
return false
|
||||
}
|
||||
|
||||
inList := runes.IndexRune(self.List, r) != -1
|
||||
return inList == !self.Not
|
||||
}
|
||||
|
||||
func (self List) Len() int {
|
||||
return lenOne
|
||||
}
|
||||
|
||||
func (self List) Index(s string) (int, []int) {
|
||||
for i, r := range s {
|
||||
if self.Not == (runes.IndexRune(self.List, r) == -1) {
|
||||
return i, segmentsByRuneLength[utf8.RuneLen(r)]
|
||||
}
|
||||
}
|
||||
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (self List) String() string {
|
||||
var not string
|
||||
if self.Not {
|
||||
not = "!"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("<list:%s[%s]>", not, string(self.List))
|
||||
}
|
||||
81
vendor/github.com/gobwas/glob/match/match.go
generated
vendored
Normal file
81
vendor/github.com/gobwas/glob/match/match.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
package match
|
||||
|
||||
// todo common table of rune's length
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const lenOne = 1
|
||||
const lenZero = 0
|
||||
const lenNo = -1
|
||||
|
||||
type Matcher interface {
|
||||
Match(string) bool
|
||||
Index(string) (int, []int)
|
||||
Len() int
|
||||
String() string
|
||||
}
|
||||
|
||||
type Matchers []Matcher
|
||||
|
||||
func (m Matchers) String() string {
|
||||
var s []string
|
||||
for _, matcher := range m {
|
||||
s = append(s, fmt.Sprint(matcher))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s", strings.Join(s, ","))
|
||||
}
|
||||
|
||||
// appendMerge merges and sorts given already SORTED and UNIQUE segments.
|
||||
func appendMerge(target, sub []int) []int {
|
||||
lt, ls := len(target), len(sub)
|
||||
out := make([]int, 0, lt+ls)
|
||||
|
||||
for x, y := 0, 0; x < lt || y < ls; {
|
||||
if x >= lt {
|
||||
out = append(out, sub[y:]...)
|
||||
break
|
||||
}
|
||||
|
||||
if y >= ls {
|
||||
out = append(out, target[x:]...)
|
||||
break
|
||||
}
|
||||
|
||||
xValue := target[x]
|
||||
yValue := sub[y]
|
||||
|
||||
switch {
|
||||
|
||||
case xValue == yValue:
|
||||
out = append(out, xValue)
|
||||
x++
|
||||
y++
|
||||
|
||||
case xValue < yValue:
|
||||
out = append(out, xValue)
|
||||
x++
|
||||
|
||||
case yValue < xValue:
|
||||
out = append(out, yValue)
|
||||
y++
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
target = append(target[:0], out...)
|
||||
|
||||
return target
|
||||
}
|
||||
|
||||
func reverseSegments(input []int) {
|
||||
l := len(input)
|
||||
m := l / 2
|
||||
|
||||
for i := 0; i < m; i++ {
|
||||
input[i], input[l-i-1] = input[l-i-1], input[i]
|
||||
}
|
||||
}
|
||||
49
vendor/github.com/gobwas/glob/match/max.go
generated
vendored
Normal file
49
vendor/github.com/gobwas/glob/match/max.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type Max struct {
|
||||
Limit int
|
||||
}
|
||||
|
||||
func NewMax(l int) Max {
|
||||
return Max{l}
|
||||
}
|
||||
|
||||
func (self Max) Match(s string) bool {
|
||||
var l int
|
||||
for range s {
|
||||
l += 1
|
||||
if l > self.Limit {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (self Max) Index(s string) (int, []int) {
|
||||
segments := acquireSegments(self.Limit + 1)
|
||||
segments = append(segments, 0)
|
||||
var count int
|
||||
for i, r := range s {
|
||||
count++
|
||||
if count > self.Limit {
|
||||
break
|
||||
}
|
||||
segments = append(segments, i+utf8.RuneLen(r))
|
||||
}
|
||||
|
||||
return 0, segments
|
||||
}
|
||||
|
||||
func (self Max) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self Max) String() string {
|
||||
return fmt.Sprintf("<max:%d>", self.Limit)
|
||||
}
|
||||
57
vendor/github.com/gobwas/glob/match/min.go
generated
vendored
Normal file
57
vendor/github.com/gobwas/glob/match/min.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type Min struct {
|
||||
Limit int
|
||||
}
|
||||
|
||||
func NewMin(l int) Min {
|
||||
return Min{l}
|
||||
}
|
||||
|
||||
func (self Min) Match(s string) bool {
|
||||
var l int
|
||||
for range s {
|
||||
l += 1
|
||||
if l >= self.Limit {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (self Min) Index(s string) (int, []int) {
|
||||
var count int
|
||||
|
||||
c := len(s) - self.Limit + 1
|
||||
if c <= 0 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
segments := acquireSegments(c)
|
||||
for i, r := range s {
|
||||
count++
|
||||
if count >= self.Limit {
|
||||
segments = append(segments, i+utf8.RuneLen(r))
|
||||
}
|
||||
}
|
||||
|
||||
if len(segments) == 0 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
return 0, segments
|
||||
}
|
||||
|
||||
func (self Min) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self Min) String() string {
|
||||
return fmt.Sprintf("<min:%d>", self.Limit)
|
||||
}
|
||||
27
vendor/github.com/gobwas/glob/match/nothing.go
generated
vendored
Normal file
27
vendor/github.com/gobwas/glob/match/nothing.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Nothing struct{}
|
||||
|
||||
func NewNothing() Nothing {
|
||||
return Nothing{}
|
||||
}
|
||||
|
||||
func (self Nothing) Match(s string) bool {
|
||||
return len(s) == 0
|
||||
}
|
||||
|
||||
func (self Nothing) Index(s string) (int, []int) {
|
||||
return 0, segments0
|
||||
}
|
||||
|
||||
func (self Nothing) Len() int {
|
||||
return lenZero
|
||||
}
|
||||
|
||||
func (self Nothing) String() string {
|
||||
return fmt.Sprintf("<nothing>")
|
||||
}
|
||||
50
vendor/github.com/gobwas/glob/match/prefix.go
generated
vendored
Normal file
50
vendor/github.com/gobwas/glob/match/prefix.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type Prefix struct {
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func NewPrefix(p string) Prefix {
|
||||
return Prefix{p}
|
||||
}
|
||||
|
||||
func (self Prefix) Index(s string) (int, []int) {
|
||||
idx := strings.Index(s, self.Prefix)
|
||||
if idx == -1 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
length := len(self.Prefix)
|
||||
var sub string
|
||||
if len(s) > idx+length {
|
||||
sub = s[idx+length:]
|
||||
} else {
|
||||
sub = ""
|
||||
}
|
||||
|
||||
segments := acquireSegments(len(sub) + 1)
|
||||
segments = append(segments, length)
|
||||
for i, r := range sub {
|
||||
segments = append(segments, length+i+utf8.RuneLen(r))
|
||||
}
|
||||
|
||||
return idx, segments
|
||||
}
|
||||
|
||||
func (self Prefix) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self Prefix) Match(s string) bool {
|
||||
return strings.HasPrefix(s, self.Prefix)
|
||||
}
|
||||
|
||||
func (self Prefix) String() string {
|
||||
return fmt.Sprintf("<prefix:%s>", self.Prefix)
|
||||
}
|
||||
55
vendor/github.com/gobwas/glob/match/prefix_any.go
generated
vendored
Normal file
55
vendor/github.com/gobwas/glob/match/prefix_any.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
sutil "github.com/gobwas/glob/util/strings"
|
||||
)
|
||||
|
||||
type PrefixAny struct {
|
||||
Prefix string
|
||||
Separators []rune
|
||||
}
|
||||
|
||||
func NewPrefixAny(s string, sep []rune) PrefixAny {
|
||||
return PrefixAny{s, sep}
|
||||
}
|
||||
|
||||
func (self PrefixAny) Index(s string) (int, []int) {
|
||||
idx := strings.Index(s, self.Prefix)
|
||||
if idx == -1 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
n := len(self.Prefix)
|
||||
sub := s[idx+n:]
|
||||
i := sutil.IndexAnyRunes(sub, self.Separators)
|
||||
if i > -1 {
|
||||
sub = sub[:i]
|
||||
}
|
||||
|
||||
seg := acquireSegments(len(sub) + 1)
|
||||
seg = append(seg, n)
|
||||
for i, r := range sub {
|
||||
seg = append(seg, n+i+utf8.RuneLen(r))
|
||||
}
|
||||
|
||||
return idx, seg
|
||||
}
|
||||
|
||||
func (self PrefixAny) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self PrefixAny) Match(s string) bool {
|
||||
if !strings.HasPrefix(s, self.Prefix) {
|
||||
return false
|
||||
}
|
||||
return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1
|
||||
}
|
||||
|
||||
func (self PrefixAny) String() string {
|
||||
return fmt.Sprintf("<prefix_any:%s![%s]>", self.Prefix, string(self.Separators))
|
||||
}
|
||||
62
vendor/github.com/gobwas/glob/match/prefix_suffix.go
generated
vendored
Normal file
62
vendor/github.com/gobwas/glob/match/prefix_suffix.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type PrefixSuffix struct {
|
||||
Prefix, Suffix string
|
||||
}
|
||||
|
||||
func NewPrefixSuffix(p, s string) PrefixSuffix {
|
||||
return PrefixSuffix{p, s}
|
||||
}
|
||||
|
||||
func (self PrefixSuffix) Index(s string) (int, []int) {
|
||||
prefixIdx := strings.Index(s, self.Prefix)
|
||||
if prefixIdx == -1 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
suffixLen := len(self.Suffix)
|
||||
if suffixLen <= 0 {
|
||||
return prefixIdx, []int{len(s) - prefixIdx}
|
||||
}
|
||||
|
||||
if (len(s) - prefixIdx) <= 0 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
segments := acquireSegments(len(s) - prefixIdx)
|
||||
for sub := s[prefixIdx:]; ; {
|
||||
suffixIdx := strings.LastIndex(sub, self.Suffix)
|
||||
if suffixIdx == -1 {
|
||||
break
|
||||
}
|
||||
|
||||
segments = append(segments, suffixIdx+suffixLen)
|
||||
sub = sub[:suffixIdx]
|
||||
}
|
||||
|
||||
if len(segments) == 0 {
|
||||
releaseSegments(segments)
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
reverseSegments(segments)
|
||||
|
||||
return prefixIdx, segments
|
||||
}
|
||||
|
||||
func (self PrefixSuffix) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self PrefixSuffix) Match(s string) bool {
|
||||
return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix)
|
||||
}
|
||||
|
||||
func (self PrefixSuffix) String() string {
|
||||
return fmt.Sprintf("<prefix_suffix:[%s,%s]>", self.Prefix, self.Suffix)
|
||||
}
|
||||
48
vendor/github.com/gobwas/glob/match/range.go
generated
vendored
Normal file
48
vendor/github.com/gobwas/glob/match/range.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type Range struct {
|
||||
Lo, Hi rune
|
||||
Not bool
|
||||
}
|
||||
|
||||
func NewRange(lo, hi rune, not bool) Range {
|
||||
return Range{lo, hi, not}
|
||||
}
|
||||
|
||||
func (self Range) Len() int {
|
||||
return lenOne
|
||||
}
|
||||
|
||||
func (self Range) Match(s string) bool {
|
||||
r, w := utf8.DecodeRuneInString(s)
|
||||
if len(s) > w {
|
||||
return false
|
||||
}
|
||||
|
||||
inRange := r >= self.Lo && r <= self.Hi
|
||||
|
||||
return inRange == !self.Not
|
||||
}
|
||||
|
||||
func (self Range) Index(s string) (int, []int) {
|
||||
for i, r := range s {
|
||||
if self.Not != (r >= self.Lo && r <= self.Hi) {
|
||||
return i, segmentsByRuneLength[utf8.RuneLen(r)]
|
||||
}
|
||||
}
|
||||
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (self Range) String() string {
|
||||
var not string
|
||||
if self.Not {
|
||||
not = "!"
|
||||
}
|
||||
return fmt.Sprintf("<range:%s[%s,%s]>", not, string(self.Lo), string(self.Hi))
|
||||
}
|
||||
77
vendor/github.com/gobwas/glob/match/row.go
generated
vendored
Normal file
77
vendor/github.com/gobwas/glob/match/row.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Row struct {
|
||||
Matchers Matchers
|
||||
RunesLength int
|
||||
Segments []int
|
||||
}
|
||||
|
||||
func NewRow(len int, m ...Matcher) Row {
|
||||
return Row{
|
||||
Matchers: Matchers(m),
|
||||
RunesLength: len,
|
||||
Segments: []int{len},
|
||||
}
|
||||
}
|
||||
|
||||
func (self Row) matchAll(s string) bool {
|
||||
var idx int
|
||||
for _, m := range self.Matchers {
|
||||
length := m.Len()
|
||||
|
||||
var next, i int
|
||||
for next = range s[idx:] {
|
||||
i++
|
||||
if i == length {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if i < length || !m.Match(s[idx:idx+next+1]) {
|
||||
return false
|
||||
}
|
||||
|
||||
idx += next + 1
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (self Row) lenOk(s string) bool {
|
||||
var i int
|
||||
for range s {
|
||||
i++
|
||||
if i > self.RunesLength {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return self.RunesLength == i
|
||||
}
|
||||
|
||||
func (self Row) Match(s string) bool {
|
||||
return self.lenOk(s) && self.matchAll(s)
|
||||
}
|
||||
|
||||
func (self Row) Len() (l int) {
|
||||
return self.RunesLength
|
||||
}
|
||||
|
||||
func (self Row) Index(s string) (int, []int) {
|
||||
for i := range s {
|
||||
if len(s[i:]) < self.RunesLength {
|
||||
break
|
||||
}
|
||||
if self.matchAll(s[i:]) {
|
||||
return i, self.Segments
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (self Row) String() string {
|
||||
return fmt.Sprintf("<row_%d:[%s]>", self.RunesLength, self.Matchers)
|
||||
}
|
||||
91
vendor/github.com/gobwas/glob/match/segments.go
generated
vendored
Normal file
91
vendor/github.com/gobwas/glob/match/segments.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type SomePool interface {
|
||||
Get() []int
|
||||
Put([]int)
|
||||
}
|
||||
|
||||
var segmentsPools [1024]sync.Pool
|
||||
|
||||
func toPowerOfTwo(v int) int {
|
||||
v--
|
||||
v |= v >> 1
|
||||
v |= v >> 2
|
||||
v |= v >> 4
|
||||
v |= v >> 8
|
||||
v |= v >> 16
|
||||
v++
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
const (
|
||||
cacheFrom = 16
|
||||
cacheToAndHigher = 1024
|
||||
cacheFromIndex = 15
|
||||
cacheToAndHigherIndex = 1023
|
||||
)
|
||||
|
||||
var (
|
||||
segments0 = []int{0}
|
||||
segments1 = []int{1}
|
||||
segments2 = []int{2}
|
||||
segments3 = []int{3}
|
||||
segments4 = []int{4}
|
||||
)
|
||||
|
||||
var segmentsByRuneLength [5][]int = [5][]int{
|
||||
0: segments0,
|
||||
1: segments1,
|
||||
2: segments2,
|
||||
3: segments3,
|
||||
4: segments4,
|
||||
}
|
||||
|
||||
func init() {
|
||||
for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 {
|
||||
func(i int) {
|
||||
segmentsPools[i-1] = sync.Pool{New: func() interface{} {
|
||||
return make([]int, 0, i)
|
||||
}}
|
||||
}(i)
|
||||
}
|
||||
}
|
||||
|
||||
func getTableIndex(c int) int {
|
||||
p := toPowerOfTwo(c)
|
||||
switch {
|
||||
case p >= cacheToAndHigher:
|
||||
return cacheToAndHigherIndex
|
||||
case p <= cacheFrom:
|
||||
return cacheFromIndex
|
||||
default:
|
||||
return p - 1
|
||||
}
|
||||
}
|
||||
|
||||
func acquireSegments(c int) []int {
|
||||
// make []int with less capacity than cacheFrom
|
||||
// is faster than acquiring it from pool
|
||||
if c < cacheFrom {
|
||||
return make([]int, 0, c)
|
||||
}
|
||||
|
||||
return segmentsPools[getTableIndex(c)].Get().([]int)[:0]
|
||||
}
|
||||
|
||||
func releaseSegments(s []int) {
|
||||
c := cap(s)
|
||||
|
||||
// make []int with less capacity than cacheFrom
|
||||
// is faster than acquiring it from pool
|
||||
if c < cacheFrom {
|
||||
return
|
||||
}
|
||||
|
||||
segmentsPools[getTableIndex(c)].Put(s)
|
||||
}
|
||||
43
vendor/github.com/gobwas/glob/match/single.go
generated
vendored
Normal file
43
vendor/github.com/gobwas/glob/match/single.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gobwas/glob/util/runes"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// single represents ?
|
||||
type Single struct {
|
||||
Separators []rune
|
||||
}
|
||||
|
||||
func NewSingle(s []rune) Single {
|
||||
return Single{s}
|
||||
}
|
||||
|
||||
func (self Single) Match(s string) bool {
|
||||
r, w := utf8.DecodeRuneInString(s)
|
||||
if len(s) > w {
|
||||
return false
|
||||
}
|
||||
|
||||
return runes.IndexRune(self.Separators, r) == -1
|
||||
}
|
||||
|
||||
func (self Single) Len() int {
|
||||
return lenOne
|
||||
}
|
||||
|
||||
func (self Single) Index(s string) (int, []int) {
|
||||
for i, r := range s {
|
||||
if runes.IndexRune(self.Separators, r) == -1 {
|
||||
return i, segmentsByRuneLength[utf8.RuneLen(r)]
|
||||
}
|
||||
}
|
||||
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (self Single) String() string {
|
||||
return fmt.Sprintf("<single:![%s]>", string(self.Separators))
|
||||
}
|
||||
35
vendor/github.com/gobwas/glob/match/suffix.go
generated
vendored
Normal file
35
vendor/github.com/gobwas/glob/match/suffix.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Suffix struct {
|
||||
Suffix string
|
||||
}
|
||||
|
||||
func NewSuffix(s string) Suffix {
|
||||
return Suffix{s}
|
||||
}
|
||||
|
||||
func (self Suffix) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self Suffix) Match(s string) bool {
|
||||
return strings.HasSuffix(s, self.Suffix)
|
||||
}
|
||||
|
||||
func (self Suffix) Index(s string) (int, []int) {
|
||||
idx := strings.Index(s, self.Suffix)
|
||||
if idx == -1 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
return 0, []int{idx + len(self.Suffix)}
|
||||
}
|
||||
|
||||
func (self Suffix) String() string {
|
||||
return fmt.Sprintf("<suffix:%s>", self.Suffix)
|
||||
}
|
||||
43
vendor/github.com/gobwas/glob/match/suffix_any.go
generated
vendored
Normal file
43
vendor/github.com/gobwas/glob/match/suffix_any.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
sutil "github.com/gobwas/glob/util/strings"
|
||||
)
|
||||
|
||||
type SuffixAny struct {
|
||||
Suffix string
|
||||
Separators []rune
|
||||
}
|
||||
|
||||
func NewSuffixAny(s string, sep []rune) SuffixAny {
|
||||
return SuffixAny{s, sep}
|
||||
}
|
||||
|
||||
func (self SuffixAny) Index(s string) (int, []int) {
|
||||
idx := strings.Index(s, self.Suffix)
|
||||
if idx == -1 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1
|
||||
|
||||
return i, []int{idx + len(self.Suffix) - i}
|
||||
}
|
||||
|
||||
func (self SuffixAny) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self SuffixAny) Match(s string) bool {
|
||||
if !strings.HasSuffix(s, self.Suffix) {
|
||||
return false
|
||||
}
|
||||
return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1
|
||||
}
|
||||
|
||||
func (self SuffixAny) String() string {
|
||||
return fmt.Sprintf("<suffix_any:![%s]%s>", string(self.Separators), self.Suffix)
|
||||
}
|
||||
33
vendor/github.com/gobwas/glob/match/super.go
generated
vendored
Normal file
33
vendor/github.com/gobwas/glob/match/super.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Super struct{}
|
||||
|
||||
func NewSuper() Super {
|
||||
return Super{}
|
||||
}
|
||||
|
||||
func (self Super) Match(s string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (self Super) Len() int {
|
||||
return lenNo
|
||||
}
|
||||
|
||||
func (self Super) Index(s string) (int, []int) {
|
||||
segments := acquireSegments(len(s) + 1)
|
||||
for i := range s {
|
||||
segments = append(segments, i)
|
||||
}
|
||||
segments = append(segments, len(s))
|
||||
|
||||
return 0, segments
|
||||
}
|
||||
|
||||
func (self Super) String() string {
|
||||
return fmt.Sprintf("<super>")
|
||||
}
|
||||
45
vendor/github.com/gobwas/glob/match/text.go
generated
vendored
Normal file
45
vendor/github.com/gobwas/glob/match/text.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package match
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// raw represents raw string to match
|
||||
type Text struct {
|
||||
Str string
|
||||
RunesLength int
|
||||
BytesLength int
|
||||
Segments []int
|
||||
}
|
||||
|
||||
func NewText(s string) Text {
|
||||
return Text{
|
||||
Str: s,
|
||||
RunesLength: utf8.RuneCountInString(s),
|
||||
BytesLength: len(s),
|
||||
Segments: []int{len(s)},
|
||||
}
|
||||
}
|
||||
|
||||
func (self Text) Match(s string) bool {
|
||||
return self.Str == s
|
||||
}
|
||||
|
||||
func (self Text) Len() int {
|
||||
return self.RunesLength
|
||||
}
|
||||
|
||||
func (self Text) Index(s string) (int, []int) {
|
||||
index := strings.Index(s, self.Str)
|
||||
if index == -1 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
return index, self.Segments
|
||||
}
|
||||
|
||||
func (self Text) String() string {
|
||||
return fmt.Sprintf("<text:`%v`>", self.Str)
|
||||
}
|
||||
148
vendor/github.com/gobwas/glob/readme.md
generated
vendored
Normal file
148
vendor/github.com/gobwas/glob/readme.md
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
# glob.[go](https://golang.org)
|
||||
|
||||
[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url]
|
||||
|
||||
> Go Globbing Library.
|
||||
|
||||
## Install
|
||||
|
||||
```shell
|
||||
go get github.com/gobwas/glob
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/gobwas/glob"
|
||||
|
||||
func main() {
|
||||
var g glob.Glob
|
||||
|
||||
// create simple glob
|
||||
g = glob.MustCompile("*.github.com")
|
||||
g.Match("api.github.com") // true
|
||||
|
||||
// quote meta characters and then create simple glob
|
||||
g = glob.MustCompile(glob.QuoteMeta("*.github.com"))
|
||||
g.Match("*.github.com") // true
|
||||
|
||||
// create new glob with set of delimiters as ["."]
|
||||
g = glob.MustCompile("api.*.com", '.')
|
||||
g.Match("api.github.com") // true
|
||||
g.Match("api.gi.hub.com") // false
|
||||
|
||||
// create new glob with set of delimiters as ["."]
|
||||
// but now with super wildcard
|
||||
g = glob.MustCompile("api.**.com", '.')
|
||||
g.Match("api.github.com") // true
|
||||
g.Match("api.gi.hub.com") // true
|
||||
|
||||
// create glob with single symbol wildcard
|
||||
g = glob.MustCompile("?at")
|
||||
g.Match("cat") // true
|
||||
g.Match("fat") // true
|
||||
g.Match("at") // false
|
||||
|
||||
// create glob with single symbol wildcard and delimiters ['f']
|
||||
g = glob.MustCompile("?at", 'f')
|
||||
g.Match("cat") // true
|
||||
g.Match("fat") // false
|
||||
g.Match("at") // false
|
||||
|
||||
// create glob with character-list matchers
|
||||
g = glob.MustCompile("[abc]at")
|
||||
g.Match("cat") // true
|
||||
g.Match("bat") // true
|
||||
g.Match("fat") // false
|
||||
g.Match("at") // false
|
||||
|
||||
// create glob with character-list matchers
|
||||
g = glob.MustCompile("[!abc]at")
|
||||
g.Match("cat") // false
|
||||
g.Match("bat") // false
|
||||
g.Match("fat") // true
|
||||
g.Match("at") // false
|
||||
|
||||
// create glob with character-range matchers
|
||||
g = glob.MustCompile("[a-c]at")
|
||||
g.Match("cat") // true
|
||||
g.Match("bat") // true
|
||||
g.Match("fat") // false
|
||||
g.Match("at") // false
|
||||
|
||||
// create glob with character-range matchers
|
||||
g = glob.MustCompile("[!a-c]at")
|
||||
g.Match("cat") // false
|
||||
g.Match("bat") // false
|
||||
g.Match("fat") // true
|
||||
g.Match("at") // false
|
||||
|
||||
// create glob with pattern-alternatives list
|
||||
g = glob.MustCompile("{cat,bat,[fr]at}")
|
||||
g.Match("cat") // true
|
||||
g.Match("bat") // true
|
||||
g.Match("fat") // true
|
||||
g.Match("rat") // true
|
||||
g.Match("at") // false
|
||||
g.Match("zat") // false
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
This library is created for compile-once patterns. This means, that compilation could take time, but
|
||||
strings matching is done faster, than in case when always parsing template.
|
||||
|
||||
If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower.
|
||||
|
||||
Run `go test -bench=.` from source root to see the benchmarks:
|
||||
|
||||
Pattern | Fixture | Match | Speed (ns/op)
|
||||
--------|---------|-------|--------------
|
||||
`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432
|
||||
`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199
|
||||
`https://*.google.*` | `https://account.google.com` | `true` | 96
|
||||
`https://*.google.*` | `https://google.com` | `false` | 66
|
||||
`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163
|
||||
`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197
|
||||
`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22
|
||||
`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24
|
||||
`abc*` | `abcdef` | `true` | 8.15
|
||||
`abc*` | `af` | `false` | 5.68
|
||||
`*def` | `abcdef` | `true` | 8.84
|
||||
`*def` | `af` | `false` | 5.74
|
||||
`ab*ef` | `abcdef` | `true` | 15.2
|
||||
`ab*ef` | `af` | `false` | 10.4
|
||||
|
||||
The same things with `regexp` package:
|
||||
|
||||
Pattern | Fixture | Match | Speed (ns/op)
|
||||
--------|---------|-------|--------------
|
||||
`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553
|
||||
`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383
|
||||
`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205
|
||||
`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767
|
||||
`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435
|
||||
`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674
|
||||
`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039
|
||||
`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272
|
||||
`^abc.*$` | `abcdef` | `true` | 237
|
||||
`^abc.*$` | `af` | `false` | 100
|
||||
`^.*def$` | `abcdef` | `true` | 464
|
||||
`^.*def$` | `af` | `false` | 265
|
||||
`^ab.*ef$` | `abcdef` | `true` | 375
|
||||
`^ab.*ef$` | `af` | `false` | 145
|
||||
|
||||
[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg
|
||||
[godoc-url]: https://godoc.org/github.com/gobwas/glob
|
||||
[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master
|
||||
[travis-url]: https://travis-ci.org/gobwas/glob
|
||||
|
||||
## Syntax
|
||||
|
||||
Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm),
|
||||
except that `**` is aka super-asterisk, that do not sensitive for separators.
|
||||
122
vendor/github.com/gobwas/glob/syntax/ast/ast.go
generated
vendored
Normal file
122
vendor/github.com/gobwas/glob/syntax/ast/ast.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
Parent *Node
|
||||
Children []*Node
|
||||
Value interface{}
|
||||
Kind Kind
|
||||
}
|
||||
|
||||
func NewNode(k Kind, v interface{}, ch ...*Node) *Node {
|
||||
n := &Node{
|
||||
Kind: k,
|
||||
Value: v,
|
||||
}
|
||||
for _, c := range ch {
|
||||
Insert(n, c)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (a *Node) Equal(b *Node) bool {
|
||||
if a.Kind != b.Kind {
|
||||
return false
|
||||
}
|
||||
if a.Value != b.Value {
|
||||
return false
|
||||
}
|
||||
if len(a.Children) != len(b.Children) {
|
||||
return false
|
||||
}
|
||||
for i, c := range a.Children {
|
||||
if !c.Equal(b.Children[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *Node) String() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(a.Kind.String())
|
||||
if a.Value != nil {
|
||||
buf.WriteString(" =")
|
||||
buf.WriteString(fmt.Sprintf("%v", a.Value))
|
||||
}
|
||||
if len(a.Children) > 0 {
|
||||
buf.WriteString(" [")
|
||||
for i, c := range a.Children {
|
||||
if i > 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
buf.WriteString(c.String())
|
||||
}
|
||||
buf.WriteString("]")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func Insert(parent *Node, children ...*Node) {
|
||||
parent.Children = append(parent.Children, children...)
|
||||
for _, ch := range children {
|
||||
ch.Parent = parent
|
||||
}
|
||||
}
|
||||
|
||||
type List struct {
|
||||
Not bool
|
||||
Chars string
|
||||
}
|
||||
|
||||
type Range struct {
|
||||
Not bool
|
||||
Lo, Hi rune
|
||||
}
|
||||
|
||||
type Text struct {
|
||||
Text string
|
||||
}
|
||||
|
||||
type Kind int
|
||||
|
||||
const (
|
||||
KindNothing Kind = iota
|
||||
KindPattern
|
||||
KindList
|
||||
KindRange
|
||||
KindText
|
||||
KindAny
|
||||
KindSuper
|
||||
KindSingle
|
||||
KindAnyOf
|
||||
)
|
||||
|
||||
func (k Kind) String() string {
|
||||
switch k {
|
||||
case KindNothing:
|
||||
return "Nothing"
|
||||
case KindPattern:
|
||||
return "Pattern"
|
||||
case KindList:
|
||||
return "List"
|
||||
case KindRange:
|
||||
return "Range"
|
||||
case KindText:
|
||||
return "Text"
|
||||
case KindAny:
|
||||
return "Any"
|
||||
case KindSuper:
|
||||
return "Super"
|
||||
case KindSingle:
|
||||
return "Single"
|
||||
case KindAnyOf:
|
||||
return "AnyOf"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
157
vendor/github.com/gobwas/glob/syntax/ast/parser.go
generated
vendored
Normal file
157
vendor/github.com/gobwas/glob/syntax/ast/parser.go
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gobwas/glob/syntax/lexer"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type Lexer interface {
|
||||
Next() lexer.Token
|
||||
}
|
||||
|
||||
type parseFn func(*Node, Lexer) (parseFn, *Node, error)
|
||||
|
||||
func Parse(lexer Lexer) (*Node, error) {
|
||||
var parser parseFn
|
||||
|
||||
root := NewNode(KindPattern, nil)
|
||||
|
||||
var (
|
||||
tree *Node
|
||||
err error
|
||||
)
|
||||
for parser, tree = parserMain, root; parser != nil; {
|
||||
parser, tree, err = parser(tree, lexer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) {
|
||||
for {
|
||||
token := lex.Next()
|
||||
switch token.Type {
|
||||
case lexer.EOF:
|
||||
return nil, tree, nil
|
||||
|
||||
case lexer.Error:
|
||||
return nil, tree, errors.New(token.Raw)
|
||||
|
||||
case lexer.Text:
|
||||
Insert(tree, NewNode(KindText, Text{token.Raw}))
|
||||
return parserMain, tree, nil
|
||||
|
||||
case lexer.Any:
|
||||
Insert(tree, NewNode(KindAny, nil))
|
||||
return parserMain, tree, nil
|
||||
|
||||
case lexer.Super:
|
||||
Insert(tree, NewNode(KindSuper, nil))
|
||||
return parserMain, tree, nil
|
||||
|
||||
case lexer.Single:
|
||||
Insert(tree, NewNode(KindSingle, nil))
|
||||
return parserMain, tree, nil
|
||||
|
||||
case lexer.RangeOpen:
|
||||
return parserRange, tree, nil
|
||||
|
||||
case lexer.TermsOpen:
|
||||
a := NewNode(KindAnyOf, nil)
|
||||
Insert(tree, a)
|
||||
|
||||
p := NewNode(KindPattern, nil)
|
||||
Insert(a, p)
|
||||
|
||||
return parserMain, p, nil
|
||||
|
||||
case lexer.Separator:
|
||||
p := NewNode(KindPattern, nil)
|
||||
Insert(tree.Parent, p)
|
||||
|
||||
return parserMain, p, nil
|
||||
|
||||
case lexer.TermsClose:
|
||||
return parserMain, tree.Parent.Parent, nil
|
||||
|
||||
default:
|
||||
return nil, tree, fmt.Errorf("unexpected token: %s", token)
|
||||
}
|
||||
}
|
||||
return nil, tree, fmt.Errorf("unknown error")
|
||||
}
|
||||
|
||||
func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) {
|
||||
var (
|
||||
not bool
|
||||
lo rune
|
||||
hi rune
|
||||
chars string
|
||||
)
|
||||
for {
|
||||
token := lex.Next()
|
||||
switch token.Type {
|
||||
case lexer.EOF:
|
||||
return nil, tree, errors.New("unexpected end")
|
||||
|
||||
case lexer.Error:
|
||||
return nil, tree, errors.New(token.Raw)
|
||||
|
||||
case lexer.Not:
|
||||
not = true
|
||||
|
||||
case lexer.RangeLo:
|
||||
r, w := utf8.DecodeRuneInString(token.Raw)
|
||||
if len(token.Raw) > w {
|
||||
return nil, tree, fmt.Errorf("unexpected length of lo character")
|
||||
}
|
||||
lo = r
|
||||
|
||||
case lexer.RangeBetween:
|
||||
//
|
||||
|
||||
case lexer.RangeHi:
|
||||
r, w := utf8.DecodeRuneInString(token.Raw)
|
||||
if len(token.Raw) > w {
|
||||
return nil, tree, fmt.Errorf("unexpected length of lo character")
|
||||
}
|
||||
|
||||
hi = r
|
||||
|
||||
if hi < lo {
|
||||
return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo))
|
||||
}
|
||||
|
||||
case lexer.Text:
|
||||
chars = token.Raw
|
||||
|
||||
case lexer.RangeClose:
|
||||
isRange := lo != 0 && hi != 0
|
||||
isChars := chars != ""
|
||||
|
||||
if isChars == isRange {
|
||||
return nil, tree, fmt.Errorf("could not parse range")
|
||||
}
|
||||
|
||||
if isRange {
|
||||
Insert(tree, NewNode(KindRange, Range{
|
||||
Lo: lo,
|
||||
Hi: hi,
|
||||
Not: not,
|
||||
}))
|
||||
} else {
|
||||
Insert(tree, NewNode(KindList, List{
|
||||
Chars: chars,
|
||||
Not: not,
|
||||
}))
|
||||
}
|
||||
|
||||
return parserMain, tree, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
273
vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
generated
vendored
Normal file
273
vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
generated
vendored
Normal file
@@ -0,0 +1,273 @@
|
||||
package lexer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/gobwas/glob/util/runes"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
char_any = '*'
|
||||
char_comma = ','
|
||||
char_single = '?'
|
||||
char_escape = '\\'
|
||||
char_range_open = '['
|
||||
char_range_close = ']'
|
||||
char_terms_open = '{'
|
||||
char_terms_close = '}'
|
||||
char_range_not = '!'
|
||||
char_range_between = '-'
|
||||
)
|
||||
|
||||
var specials = []byte{
|
||||
char_any,
|
||||
char_single,
|
||||
char_escape,
|
||||
char_range_open,
|
||||
char_range_close,
|
||||
char_terms_open,
|
||||
char_terms_close,
|
||||
}
|
||||
|
||||
func Special(c byte) bool {
|
||||
return bytes.IndexByte(specials, c) != -1
|
||||
}
|
||||
|
||||
type tokens []Token
|
||||
|
||||
func (i *tokens) shift() (ret Token) {
|
||||
ret = (*i)[0]
|
||||
copy(*i, (*i)[1:])
|
||||
*i = (*i)[:len(*i)-1]
|
||||
return
|
||||
}
|
||||
|
||||
func (i *tokens) push(v Token) {
|
||||
*i = append(*i, v)
|
||||
}
|
||||
|
||||
func (i *tokens) empty() bool {
|
||||
return len(*i) == 0
|
||||
}
|
||||
|
||||
var eof rune = 0
|
||||
|
||||
type lexer struct {
|
||||
data string
|
||||
pos int
|
||||
err error
|
||||
|
||||
tokens tokens
|
||||
termsLevel int
|
||||
|
||||
lastRune rune
|
||||
lastRuneSize int
|
||||
hasRune bool
|
||||
}
|
||||
|
||||
func NewLexer(source string) *lexer {
|
||||
l := &lexer{
|
||||
data: source,
|
||||
tokens: tokens(make([]Token, 0, 4)),
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *lexer) Next() Token {
|
||||
if l.err != nil {
|
||||
return Token{Error, l.err.Error()}
|
||||
}
|
||||
if !l.tokens.empty() {
|
||||
return l.tokens.shift()
|
||||
}
|
||||
|
||||
l.fetchItem()
|
||||
return l.Next()
|
||||
}
|
||||
|
||||
func (l *lexer) peek() (r rune, w int) {
|
||||
if l.pos == len(l.data) {
|
||||
return eof, 0
|
||||
}
|
||||
|
||||
r, w = utf8.DecodeRuneInString(l.data[l.pos:])
|
||||
if r == utf8.RuneError {
|
||||
l.errorf("could not read rune")
|
||||
r = eof
|
||||
w = 0
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (l *lexer) read() rune {
|
||||
if l.hasRune {
|
||||
l.hasRune = false
|
||||
l.seek(l.lastRuneSize)
|
||||
return l.lastRune
|
||||
}
|
||||
|
||||
r, s := l.peek()
|
||||
l.seek(s)
|
||||
|
||||
l.lastRune = r
|
||||
l.lastRuneSize = s
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (l *lexer) seek(w int) {
|
||||
l.pos += w
|
||||
}
|
||||
|
||||
func (l *lexer) unread() {
|
||||
if l.hasRune {
|
||||
l.errorf("could not unread rune")
|
||||
return
|
||||
}
|
||||
l.seek(-l.lastRuneSize)
|
||||
l.hasRune = true
|
||||
}
|
||||
|
||||
func (l *lexer) errorf(f string, v ...interface{}) {
|
||||
l.err = fmt.Errorf(f, v...)
|
||||
}
|
||||
|
||||
func (l *lexer) inTerms() bool {
|
||||
return l.termsLevel > 0
|
||||
}
|
||||
|
||||
func (l *lexer) termsEnter() {
|
||||
l.termsLevel++
|
||||
}
|
||||
|
||||
func (l *lexer) termsLeave() {
|
||||
l.termsLevel--
|
||||
}
|
||||
|
||||
var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open}
|
||||
var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma)
|
||||
|
||||
func (l *lexer) fetchItem() {
|
||||
r := l.read()
|
||||
switch {
|
||||
case r == eof:
|
||||
l.tokens.push(Token{EOF, ""})
|
||||
|
||||
case r == char_terms_open:
|
||||
l.termsEnter()
|
||||
l.tokens.push(Token{TermsOpen, string(r)})
|
||||
|
||||
case r == char_comma && l.inTerms():
|
||||
l.tokens.push(Token{Separator, string(r)})
|
||||
|
||||
case r == char_terms_close && l.inTerms():
|
||||
l.tokens.push(Token{TermsClose, string(r)})
|
||||
l.termsLeave()
|
||||
|
||||
case r == char_range_open:
|
||||
l.tokens.push(Token{RangeOpen, string(r)})
|
||||
l.fetchRange()
|
||||
|
||||
case r == char_single:
|
||||
l.tokens.push(Token{Single, string(r)})
|
||||
|
||||
case r == char_any:
|
||||
if l.read() == char_any {
|
||||
l.tokens.push(Token{Super, string(r) + string(r)})
|
||||
} else {
|
||||
l.unread()
|
||||
l.tokens.push(Token{Any, string(r)})
|
||||
}
|
||||
|
||||
default:
|
||||
l.unread()
|
||||
|
||||
var breakers []rune
|
||||
if l.inTerms() {
|
||||
breakers = inTermsBreakers
|
||||
} else {
|
||||
breakers = inTextBreakers
|
||||
}
|
||||
l.fetchText(breakers)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lexer) fetchRange() {
|
||||
var wantHi bool
|
||||
var wantClose bool
|
||||
var seenNot bool
|
||||
for {
|
||||
r := l.read()
|
||||
if r == eof {
|
||||
l.errorf("unexpected end of input")
|
||||
return
|
||||
}
|
||||
|
||||
if wantClose {
|
||||
if r != char_range_close {
|
||||
l.errorf("expected close range character")
|
||||
} else {
|
||||
l.tokens.push(Token{RangeClose, string(r)})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if wantHi {
|
||||
l.tokens.push(Token{RangeHi, string(r)})
|
||||
wantClose = true
|
||||
continue
|
||||
}
|
||||
|
||||
if !seenNot && r == char_range_not {
|
||||
l.tokens.push(Token{Not, string(r)})
|
||||
seenNot = true
|
||||
continue
|
||||
}
|
||||
|
||||
if n, w := l.peek(); n == char_range_between {
|
||||
l.seek(w)
|
||||
l.tokens.push(Token{RangeLo, string(r)})
|
||||
l.tokens.push(Token{RangeBetween, string(n)})
|
||||
wantHi = true
|
||||
continue
|
||||
}
|
||||
|
||||
l.unread() // unread first peek and fetch as text
|
||||
l.fetchText([]rune{char_range_close})
|
||||
wantClose = true
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lexer) fetchText(breakers []rune) {
|
||||
var data []rune
|
||||
var escaped bool
|
||||
|
||||
reading:
|
||||
for {
|
||||
r := l.read()
|
||||
if r == eof {
|
||||
break
|
||||
}
|
||||
|
||||
if !escaped {
|
||||
if r == char_escape {
|
||||
escaped = true
|
||||
continue
|
||||
}
|
||||
|
||||
if runes.IndexRune(breakers, r) != -1 {
|
||||
l.unread()
|
||||
break reading
|
||||
}
|
||||
}
|
||||
|
||||
escaped = false
|
||||
data = append(data, r)
|
||||
}
|
||||
|
||||
if len(data) > 0 {
|
||||
l.tokens.push(Token{Text, string(data)})
|
||||
}
|
||||
}
|
||||
88
vendor/github.com/gobwas/glob/syntax/lexer/token.go
generated
vendored
Normal file
88
vendor/github.com/gobwas/glob/syntax/lexer/token.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
package lexer
|
||||
|
||||
import "fmt"
|
||||
|
||||
type TokenType int
|
||||
|
||||
const (
|
||||
EOF TokenType = iota
|
||||
Error
|
||||
Text
|
||||
Char
|
||||
Any
|
||||
Super
|
||||
Single
|
||||
Not
|
||||
Separator
|
||||
RangeOpen
|
||||
RangeClose
|
||||
RangeLo
|
||||
RangeHi
|
||||
RangeBetween
|
||||
TermsOpen
|
||||
TermsClose
|
||||
)
|
||||
|
||||
func (tt TokenType) String() string {
|
||||
switch tt {
|
||||
case EOF:
|
||||
return "eof"
|
||||
|
||||
case Error:
|
||||
return "error"
|
||||
|
||||
case Text:
|
||||
return "text"
|
||||
|
||||
case Char:
|
||||
return "char"
|
||||
|
||||
case Any:
|
||||
return "any"
|
||||
|
||||
case Super:
|
||||
return "super"
|
||||
|
||||
case Single:
|
||||
return "single"
|
||||
|
||||
case Not:
|
||||
return "not"
|
||||
|
||||
case Separator:
|
||||
return "separator"
|
||||
|
||||
case RangeOpen:
|
||||
return "range_open"
|
||||
|
||||
case RangeClose:
|
||||
return "range_close"
|
||||
|
||||
case RangeLo:
|
||||
return "range_lo"
|
||||
|
||||
case RangeHi:
|
||||
return "range_hi"
|
||||
|
||||
case RangeBetween:
|
||||
return "range_between"
|
||||
|
||||
case TermsOpen:
|
||||
return "terms_open"
|
||||
|
||||
case TermsClose:
|
||||
return "terms_close"
|
||||
|
||||
default:
|
||||
return "undef"
|
||||
}
|
||||
}
|
||||
|
||||
type Token struct {
|
||||
Type TokenType
|
||||
Raw string
|
||||
}
|
||||
|
||||
func (t Token) String() string {
|
||||
return fmt.Sprintf("%v<%q>", t.Type, t.Raw)
|
||||
}
|
||||
14
vendor/github.com/gobwas/glob/syntax/syntax.go
generated
vendored
Normal file
14
vendor/github.com/gobwas/glob/syntax/syntax.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package syntax
|
||||
|
||||
import (
|
||||
"github.com/gobwas/glob/syntax/ast"
|
||||
"github.com/gobwas/glob/syntax/lexer"
|
||||
)
|
||||
|
||||
func Parse(s string) (*ast.Node, error) {
|
||||
return ast.Parse(lexer.NewLexer(s))
|
||||
}
|
||||
|
||||
func Special(b byte) bool {
|
||||
return lexer.Special(b)
|
||||
}
|
||||
154
vendor/github.com/gobwas/glob/util/runes/runes.go
generated
vendored
Normal file
154
vendor/github.com/gobwas/glob/util/runes/runes.go
generated
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
package runes
|
||||
|
||||
func Index(s, needle []rune) int {
|
||||
ls, ln := len(s), len(needle)
|
||||
|
||||
switch {
|
||||
case ln == 0:
|
||||
return 0
|
||||
case ln == 1:
|
||||
return IndexRune(s, needle[0])
|
||||
case ln == ls:
|
||||
if Equal(s, needle) {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
case ln > ls:
|
||||
return -1
|
||||
}
|
||||
|
||||
head:
|
||||
for i := 0; i < ls && ls-i >= ln; i++ {
|
||||
for y := 0; y < ln; y++ {
|
||||
if s[i+y] != needle[y] {
|
||||
continue head
|
||||
}
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func LastIndex(s, needle []rune) int {
|
||||
ls, ln := len(s), len(needle)
|
||||
|
||||
switch {
|
||||
case ln == 0:
|
||||
if ls == 0 {
|
||||
return 0
|
||||
}
|
||||
return ls
|
||||
case ln == 1:
|
||||
return IndexLastRune(s, needle[0])
|
||||
case ln == ls:
|
||||
if Equal(s, needle) {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
case ln > ls:
|
||||
return -1
|
||||
}
|
||||
|
||||
head:
|
||||
for i := ls - 1; i >= 0 && i >= ln; i-- {
|
||||
for y := ln - 1; y >= 0; y-- {
|
||||
if s[i-(ln-y-1)] != needle[y] {
|
||||
continue head
|
||||
}
|
||||
}
|
||||
|
||||
return i - ln + 1
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// IndexAny returns the index of the first instance of any Unicode code point
|
||||
// from chars in s, or -1 if no Unicode code point from chars is present in s.
|
||||
func IndexAny(s, chars []rune) int {
|
||||
if len(chars) > 0 {
|
||||
for i, c := range s {
|
||||
for _, m := range chars {
|
||||
if c == m {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func Contains(s, needle []rune) bool {
|
||||
return Index(s, needle) >= 0
|
||||
}
|
||||
|
||||
func Max(s []rune) (max rune) {
|
||||
for _, r := range s {
|
||||
if r > max {
|
||||
max = r
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func Min(s []rune) rune {
|
||||
min := rune(-1)
|
||||
for _, r := range s {
|
||||
if min == -1 {
|
||||
min = r
|
||||
continue
|
||||
}
|
||||
|
||||
if r < min {
|
||||
min = r
|
||||
}
|
||||
}
|
||||
|
||||
return min
|
||||
}
|
||||
|
||||
func IndexRune(s []rune, r rune) int {
|
||||
for i, c := range s {
|
||||
if c == r {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func IndexLastRune(s []rune, r rune) int {
|
||||
for i := len(s) - 1; i >= 0; i-- {
|
||||
if s[i] == r {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func Equal(a, b []rune) bool {
|
||||
if len(a) == len(b) {
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// HasPrefix tests whether the string s begins with prefix.
|
||||
func HasPrefix(s, prefix []rune) bool {
|
||||
return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
|
||||
}
|
||||
|
||||
// HasSuffix tests whether the string s ends with suffix.
|
||||
func HasSuffix(s, suffix []rune) bool {
|
||||
return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
|
||||
}
|
||||
39
vendor/github.com/gobwas/glob/util/strings/strings.go
generated
vendored
Normal file
39
vendor/github.com/gobwas/glob/util/strings/strings.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package strings
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
func IndexAnyRunes(s string, rs []rune) int {
|
||||
for _, r := range rs {
|
||||
if i := strings.IndexRune(s, r); i != -1 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func LastIndexAnyRunes(s string, rs []rune) int {
|
||||
for _, r := range rs {
|
||||
i := -1
|
||||
if 0 <= r && r < utf8.RuneSelf {
|
||||
i = strings.LastIndexByte(s, byte(r))
|
||||
} else {
|
||||
sub := s
|
||||
for len(sub) > 0 {
|
||||
j := strings.IndexRune(s, r)
|
||||
if j == -1 {
|
||||
break
|
||||
}
|
||||
i = j
|
||||
sub = sub[i+1:]
|
||||
}
|
||||
}
|
||||
if i != -1 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
Normal file
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
Normal file
@@ -0,0 +1,476 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var inFiles = []string{"cpuid.go", "cpuid_test.go"}
|
||||
var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
|
||||
var fileSet = token.NewFileSet()
|
||||
var reWrites = []rewrite{
|
||||
initRewrite("CPUInfo -> cpuInfo"),
|
||||
initRewrite("Vendor -> vendor"),
|
||||
initRewrite("Flags -> flags"),
|
||||
initRewrite("Detect -> detect"),
|
||||
initRewrite("CPU -> cpu"),
|
||||
}
|
||||
var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
|
||||
// cpuid_test.go
|
||||
"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
|
||||
}
|
||||
|
||||
var excludePrefixes = []string{"test", "benchmark"}
|
||||
|
||||
func main() {
|
||||
Package := "private"
|
||||
parserMode := parser.ParseComments
|
||||
exported := make(map[string]rewrite)
|
||||
for _, file := range inFiles {
|
||||
in, err := os.Open(file)
|
||||
if err != nil {
|
||||
log.Fatalf("opening input", err)
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
log.Fatalf("reading input", err)
|
||||
}
|
||||
|
||||
astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
|
||||
if err != nil {
|
||||
log.Fatalf("parsing input", err)
|
||||
}
|
||||
|
||||
for _, rw := range reWrites {
|
||||
astfile = rw(astfile)
|
||||
}
|
||||
|
||||
// Inspect the AST and print all identifiers and literals.
|
||||
var startDecl token.Pos
|
||||
var endDecl token.Pos
|
||||
ast.Inspect(astfile, func(n ast.Node) bool {
|
||||
var s string
|
||||
switch x := n.(type) {
|
||||
case *ast.Ident:
|
||||
if x.IsExported() {
|
||||
t := strings.ToLower(x.Name)
|
||||
for _, pre := range excludePrefixes {
|
||||
if strings.HasPrefix(t, pre) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if excludeNames[t] != true {
|
||||
//if x.Pos() > startDecl && x.Pos() < endDecl {
|
||||
exported[x.Name] = initRewrite(x.Name + " -> " + t)
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.GenDecl:
|
||||
if x.Tok == token.CONST && x.Lparen > 0 {
|
||||
startDecl = x.Lparen
|
||||
endDecl = x.Rparen
|
||||
// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
|
||||
}
|
||||
}
|
||||
if s != "" {
|
||||
fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
for _, rw := range exported {
|
||||
astfile = rw(astfile)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
printer.Fprint(&buf, fileSet, astfile)
|
||||
|
||||
// Remove package documentation and insert information
|
||||
s := buf.String()
|
||||
ind := strings.Index(buf.String(), "\npackage cpuid")
|
||||
s = s[ind:]
|
||||
s = "// Generated, DO NOT EDIT,\n" +
|
||||
"// but copy it to your own project and rename the package.\n" +
|
||||
"// See more at http://github.com/klauspost/cpuid\n" +
|
||||
s
|
||||
|
||||
outputName := Package + string(os.PathSeparator) + file
|
||||
|
||||
err = ioutil.WriteFile(outputName, []byte(s), 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("writing output: %s", err)
|
||||
}
|
||||
log.Println("Generated", outputName)
|
||||
}
|
||||
|
||||
for _, file := range copyFiles {
|
||||
dst := ""
|
||||
if strings.HasPrefix(file, "cpuid") {
|
||||
dst = Package + string(os.PathSeparator) + file
|
||||
} else {
|
||||
dst = Package + string(os.PathSeparator) + "cpuid_" + file
|
||||
}
|
||||
err := copyFile(file, dst)
|
||||
if err != nil {
|
||||
log.Fatalf("copying file: %s", err)
|
||||
}
|
||||
log.Println("Copied", dst)
|
||||
}
|
||||
}
|
||||
|
||||
// CopyFile copies a file from src to dst. If src and dst files exist, and are
|
||||
// the same, then return success. Copy the file contents from src to dst.
|
||||
func copyFile(src, dst string) (err error) {
|
||||
sfi, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !sfi.Mode().IsRegular() {
|
||||
// cannot copy non-regular files (e.g., directories,
|
||||
// symlinks, devices, etc.)
|
||||
return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
|
||||
}
|
||||
dfi, err := os.Stat(dst)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !(dfi.Mode().IsRegular()) {
|
||||
return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
|
||||
}
|
||||
if os.SameFile(sfi, dfi) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = copyFileContents(src, dst)
|
||||
return
|
||||
}
|
||||
|
||||
// copyFileContents copies the contents of the file named src to the file named
|
||||
// by dst. The file will be created if it does not already exist. If the
|
||||
// destination file exists, all it's contents will be replaced by the contents
|
||||
// of the source file.
|
||||
func copyFileContents(src, dst string) (err error) {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer in.Close()
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
cerr := out.Close()
|
||||
if err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
if _, err = io.Copy(out, in); err != nil {
|
||||
return
|
||||
}
|
||||
err = out.Sync()
|
||||
return
|
||||
}
|
||||
|
||||
type rewrite func(*ast.File) *ast.File
|
||||
|
||||
// Mostly copied from gofmt
|
||||
func initRewrite(rewriteRule string) rewrite {
|
||||
f := strings.Split(rewriteRule, "->")
|
||||
if len(f) != 2 {
|
||||
fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
pattern := parseExpr(f[0], "pattern")
|
||||
replace := parseExpr(f[1], "replacement")
|
||||
return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
|
||||
}
|
||||
|
||||
// parseExpr parses s as an expression.
|
||||
// It might make sense to expand this to allow statement patterns,
|
||||
// but there are problems with preserving formatting and also
|
||||
// with what a wildcard for a statement looks like.
|
||||
func parseExpr(s, what string) ast.Expr {
|
||||
x, err := parser.ParseExpr(s)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
|
||||
os.Exit(2)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// Keep this function for debugging.
|
||||
/*
|
||||
func dump(msg string, val reflect.Value) {
|
||||
fmt.Printf("%s:\n", msg)
|
||||
ast.Print(fileSet, val.Interface())
|
||||
fmt.Println()
|
||||
}
|
||||
*/
|
||||
|
||||
// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
|
||||
func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
|
||||
cmap := ast.NewCommentMap(fileSet, p, p.Comments)
|
||||
m := make(map[string]reflect.Value)
|
||||
pat := reflect.ValueOf(pattern)
|
||||
repl := reflect.ValueOf(replace)
|
||||
|
||||
var rewriteVal func(val reflect.Value) reflect.Value
|
||||
rewriteVal = func(val reflect.Value) reflect.Value {
|
||||
// don't bother if val is invalid to start with
|
||||
if !val.IsValid() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
val = apply(rewriteVal, val)
|
||||
if match(m, pat, val) {
|
||||
val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
|
||||
r.Comments = cmap.Filter(r).Comments() // recreate comments list
|
||||
return r
|
||||
}
|
||||
|
||||
// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
|
||||
func set(x, y reflect.Value) {
|
||||
// don't bother if x cannot be set or y is invalid
|
||||
if !x.CanSet() || !y.IsValid() {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
if s, ok := x.(string); ok &&
|
||||
(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
|
||||
// x cannot be set to y - ignore this rewrite
|
||||
return
|
||||
}
|
||||
panic(x)
|
||||
}
|
||||
}()
|
||||
x.Set(y)
|
||||
}
|
||||
|
||||
// Values/types for special cases.
|
||||
var (
|
||||
objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
|
||||
scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
|
||||
|
||||
identType = reflect.TypeOf((*ast.Ident)(nil))
|
||||
objectPtrType = reflect.TypeOf((*ast.Object)(nil))
|
||||
positionType = reflect.TypeOf(token.NoPos)
|
||||
callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
|
||||
scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
|
||||
)
|
||||
|
||||
// apply replaces each AST field x in val with f(x), returning val.
|
||||
// To avoid extra conversions, f operates on the reflect.Value form.
|
||||
func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
|
||||
if !val.IsValid() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
|
||||
// *ast.Objects introduce cycles and are likely incorrect after
|
||||
// rewrite; don't follow them but replace with nil instead
|
||||
if val.Type() == objectPtrType {
|
||||
return objectPtrNil
|
||||
}
|
||||
|
||||
// similarly for scopes: they are likely incorrect after a rewrite;
|
||||
// replace them with nil
|
||||
if val.Type() == scopePtrType {
|
||||
return scopePtrNil
|
||||
}
|
||||
|
||||
switch v := reflect.Indirect(val); v.Kind() {
|
||||
case reflect.Slice:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
e := v.Index(i)
|
||||
set(e, f(e))
|
||||
}
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
e := v.Field(i)
|
||||
set(e, f(e))
|
||||
}
|
||||
case reflect.Interface:
|
||||
e := v.Elem()
|
||||
set(v, f(e))
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func isWildcard(s string) bool {
|
||||
rune, size := utf8.DecodeRuneInString(s)
|
||||
return size == len(s) && unicode.IsLower(rune)
|
||||
}
|
||||
|
||||
// match returns true if pattern matches val,
|
||||
// recording wildcard submatches in m.
|
||||
// If m == nil, match checks whether pattern == val.
|
||||
func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
|
||||
// Wildcard matches any expression. If it appears multiple
|
||||
// times in the pattern, it must match the same expression
|
||||
// each time.
|
||||
if m != nil && pattern.IsValid() && pattern.Type() == identType {
|
||||
name := pattern.Interface().(*ast.Ident).Name
|
||||
if isWildcard(name) && val.IsValid() {
|
||||
// wildcards only match valid (non-nil) expressions.
|
||||
if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
|
||||
if old, ok := m[name]; ok {
|
||||
return match(nil, old, val)
|
||||
}
|
||||
m[name] = val
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, pattern and val must match recursively.
|
||||
if !pattern.IsValid() || !val.IsValid() {
|
||||
return !pattern.IsValid() && !val.IsValid()
|
||||
}
|
||||
if pattern.Type() != val.Type() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Special cases.
|
||||
switch pattern.Type() {
|
||||
case identType:
|
||||
// For identifiers, only the names need to match
|
||||
// (and none of the other *ast.Object information).
|
||||
// This is a common case, handle it all here instead
|
||||
// of recursing down any further via reflection.
|
||||
p := pattern.Interface().(*ast.Ident)
|
||||
v := val.Interface().(*ast.Ident)
|
||||
return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
|
||||
case objectPtrType, positionType:
|
||||
// object pointers and token positions always match
|
||||
return true
|
||||
case callExprType:
|
||||
// For calls, the Ellipsis fields (token.Position) must
|
||||
// match since that is how f(x) and f(x...) are different.
|
||||
// Check them here but fall through for the remaining fields.
|
||||
p := pattern.Interface().(*ast.CallExpr)
|
||||
v := val.Interface().(*ast.CallExpr)
|
||||
if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
p := reflect.Indirect(pattern)
|
||||
v := reflect.Indirect(val)
|
||||
if !p.IsValid() || !v.IsValid() {
|
||||
return !p.IsValid() && !v.IsValid()
|
||||
}
|
||||
|
||||
switch p.Kind() {
|
||||
case reflect.Slice:
|
||||
if p.Len() != v.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < p.Len(); i++ {
|
||||
if !match(m, p.Index(i), v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
case reflect.Struct:
|
||||
for i := 0; i < p.NumField(); i++ {
|
||||
if !match(m, p.Field(i), v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
case reflect.Interface:
|
||||
return match(m, p.Elem(), v.Elem())
|
||||
}
|
||||
|
||||
// Handle token integers, etc.
|
||||
return p.Interface() == v.Interface()
|
||||
}
|
||||
|
||||
// subst returns a copy of pattern with values from m substituted in place
|
||||
// of wildcards and pos used as the position of tokens from the pattern.
|
||||
// if m == nil, subst returns a copy of pattern and doesn't change the line
|
||||
// number information.
|
||||
func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
|
||||
if !pattern.IsValid() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
|
||||
// Wildcard gets replaced with map value.
|
||||
if m != nil && pattern.Type() == identType {
|
||||
name := pattern.Interface().(*ast.Ident).Name
|
||||
if isWildcard(name) {
|
||||
if old, ok := m[name]; ok {
|
||||
return subst(nil, old, reflect.Value{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pos.IsValid() && pattern.Type() == positionType {
|
||||
// use new position only if old position was valid in the first place
|
||||
if old := pattern.Interface().(token.Pos); !old.IsValid() {
|
||||
return pattern
|
||||
}
|
||||
return pos
|
||||
}
|
||||
|
||||
// Otherwise copy.
|
||||
switch p := pattern; p.Kind() {
|
||||
case reflect.Slice:
|
||||
v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
|
||||
for i := 0; i < p.Len(); i++ {
|
||||
v.Index(i).Set(subst(m, p.Index(i), pos))
|
||||
}
|
||||
return v
|
||||
|
||||
case reflect.Struct:
|
||||
v := reflect.New(p.Type()).Elem()
|
||||
for i := 0; i < p.NumField(); i++ {
|
||||
v.Field(i).Set(subst(m, p.Field(i), pos))
|
||||
}
|
||||
return v
|
||||
|
||||
case reflect.Ptr:
|
||||
v := reflect.New(p.Type()).Elem()
|
||||
if elem := p.Elem(); elem.IsValid() {
|
||||
v.Set(subst(m, elem, pos).Addr())
|
||||
}
|
||||
return v
|
||||
|
||||
case reflect.Interface:
|
||||
v := reflect.New(p.Type()).Elem()
|
||||
if elem := p.Elem(); elem.IsValid() {
|
||||
v.Set(subst(m, elem, pos))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
return pattern
|
||||
}
|
||||
169
vendor/github.com/marten-seemann/qtls/generate_cert.go
generated
vendored
Normal file
169
vendor/github.com/marten-seemann/qtls/generate_cert.go
generated
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Generate a self-signed X.509 certificate for a TLS server. Outputs to
|
||||
// 'cert.pem' and 'key.pem' and will overwrite existing files.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for")
|
||||
validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011")
|
||||
validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for")
|
||||
isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority")
|
||||
rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate. Ignored if --ecdsa-curve is set")
|
||||
ecdsaCurve = flag.String("ecdsa-curve", "", "ECDSA curve to use to generate a key. Valid values are P224, P256 (recommended), P384, P521")
|
||||
)
|
||||
|
||||
func publicKey(priv interface{}) interface{} {
|
||||
switch k := priv.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return &k.PublicKey
|
||||
case *ecdsa.PrivateKey:
|
||||
return &k.PublicKey
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func pemBlockForKey(priv interface{}) *pem.Block {
|
||||
switch k := priv.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
|
||||
case *ecdsa.PrivateKey:
|
||||
b, err := x509.MarshalECPrivateKey(k)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if len(*host) == 0 {
|
||||
log.Fatalf("Missing required --host parameter")
|
||||
}
|
||||
|
||||
var priv interface{}
|
||||
var err error
|
||||
switch *ecdsaCurve {
|
||||
case "":
|
||||
priv, err = rsa.GenerateKey(rand.Reader, *rsaBits)
|
||||
case "P224":
|
||||
priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
|
||||
case "P256":
|
||||
priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
case "P384":
|
||||
priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
case "P521":
|
||||
priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Unrecognized elliptic curve: %q", *ecdsaCurve)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("failed to generate private key: %s", err)
|
||||
}
|
||||
|
||||
var notBefore time.Time
|
||||
if len(*validFrom) == 0 {
|
||||
notBefore = time.Now()
|
||||
} else {
|
||||
notBefore, err = time.Parse("Jan 2 15:04:05 2006", *validFrom)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to parse creation date: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
notAfter := notBefore.Add(*validFor)
|
||||
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to generate serial number: %s", err)
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"Acme Co"},
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
hosts := strings.Split(*host, ",")
|
||||
for _, h := range hosts {
|
||||
if ip := net.ParseIP(h); ip != nil {
|
||||
template.IPAddresses = append(template.IPAddresses, ip)
|
||||
} else {
|
||||
template.DNSNames = append(template.DNSNames, h)
|
||||
}
|
||||
}
|
||||
|
||||
if *isCA {
|
||||
template.IsCA = true
|
||||
template.KeyUsage |= x509.KeyUsageCertSign
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create certificate: %s", err)
|
||||
}
|
||||
|
||||
certOut, err := os.Create("cert.pem")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to open cert.pem for writing: %s", err)
|
||||
}
|
||||
if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
|
||||
log.Fatalf("failed to write data to cert.pem: %s", err)
|
||||
}
|
||||
if err := certOut.Close(); err != nil {
|
||||
log.Fatalf("error closing cert.pem: %s", err)
|
||||
}
|
||||
log.Print("wrote cert.pem\n")
|
||||
|
||||
keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
log.Print("failed to open key.pem for writing:", err)
|
||||
return
|
||||
}
|
||||
if err := pem.Encode(keyOut, pemBlockForKey(priv)); err != nil {
|
||||
log.Fatalf("failed to write data to key.pem: %s", err)
|
||||
}
|
||||
if err := keyOut.Close(); err != nil {
|
||||
log.Fatalf("error closing key.pem: %s", err)
|
||||
}
|
||||
log.Print("wrote key.pem\n")
|
||||
}
|
||||
144
vendor/github.com/miekg/dns/duplicate_generate.go
generated
vendored
Normal file
144
vendor/github.com/miekg/dns/duplicate_generate.go
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
//+build ignore
|
||||
|
||||
// types_generate.go is meant to run with go generate. It will use
|
||||
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||
// it will generate conversion tables (TypeToRR and TypeToString) and banal
|
||||
// methods (len, Header, copy) based on the struct tags. The generated source is
|
||||
// written to ztypes.go, and is meant to be checked into git.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/importer"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var packageHdr = `
|
||||
// Code generated by "go run duplicate_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
`
|
||||
|
||||
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||
st, ok := t.Underlying().(*types.Struct)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||
return st, false
|
||||
}
|
||||
if st.Field(0).Anonymous() {
|
||||
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||
return st, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Import and type-check the package
|
||||
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||
fatalIfErr(err)
|
||||
scope := pkg.Scope()
|
||||
|
||||
// Collect actual types (*X)
|
||||
var namedTypes []string
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
|
||||
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if name == "PrivateRR" || name == "OPT" {
|
||||
continue
|
||||
}
|
||||
|
||||
namedTypes = append(namedTypes, o.Name())
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(packageHdr)
|
||||
|
||||
// Generate the duplicate check for each type.
|
||||
fmt.Fprint(b, "// isDuplicate() functions\n\n")
|
||||
for _, name := range namedTypes {
|
||||
|
||||
o := scope.Lookup(name)
|
||||
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||
if isEmbedded {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(b, "func (r1 *%s) isDuplicate(_r2 RR) bool {\n", name)
|
||||
fmt.Fprintf(b, "r2, ok := _r2.(*%s)\n", name)
|
||||
fmt.Fprint(b, "if !ok { return false }\n")
|
||||
fmt.Fprint(b, "_ = r2\n")
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
field := st.Field(i).Name()
|
||||
o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) }
|
||||
o3 := func(s string) { fmt.Fprintf(b, s+"\n", field, field, field) }
|
||||
|
||||
// For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are
|
||||
// *indirectly* defined as a slice in the net package).
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}")
|
||||
|
||||
if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` {
|
||||
o3(`for i := 0; i < len(r1.%s); i++ {
|
||||
if !isDuplicateName(r1.%s[i], r2.%s[i]) {
|
||||
return false
|
||||
}
|
||||
}`)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
o3(`for i := 0; i < len(r1.%s); i++ {
|
||||
if r1.%s[i] != r2.%s[i] {
|
||||
return false
|
||||
}
|
||||
}`)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
switch st.Tag(i) {
|
||||
case `dns:"-"`:
|
||||
// ignored
|
||||
case `dns:"a"`, `dns:"aaaa"`:
|
||||
o2("if !r1.%s.Equal(r2.%s) {\nreturn false\n}")
|
||||
case `dns:"cdomain-name"`, `dns:"domain-name"`:
|
||||
o2("if !isDuplicateName(r1.%s, r2.%s) {\nreturn false\n}")
|
||||
default:
|
||||
o2("if r1.%s != r2.%s {\nreturn false\n}")
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(b, "return true\n}\n\n")
|
||||
}
|
||||
|
||||
// gofmt
|
||||
res, err := format.Source(b.Bytes())
|
||||
if err != nil {
|
||||
b.WriteTo(os.Stderr)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// write result
|
||||
f, err := os.Create("zduplicate.go")
|
||||
fatalIfErr(err)
|
||||
defer f.Close()
|
||||
f.Write(res)
|
||||
}
|
||||
|
||||
func fatalIfErr(err error) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
328
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
Normal file
328
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
Normal file
@@ -0,0 +1,328 @@
|
||||
//+build ignore
|
||||
|
||||
// msg_generate.go is meant to run with go generate. It will use
|
||||
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||
// it will generate pack/unpack methods based on the struct tags. The generated source is
|
||||
// written to zmsg.go, and is meant to be checked into git.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/importer"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var packageHdr = `
|
||||
// Code generated by "go run msg_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
`
|
||||
|
||||
// getTypeStruct will take a type and the package scope, and return the
|
||||
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||
// the RR interface). The bool return value indicates if embedded structs were
|
||||
// resolved.
|
||||
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||
st, ok := t.Underlying().(*types.Struct)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||
return st, false
|
||||
}
|
||||
if st.Field(0).Anonymous() {
|
||||
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||
return st, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Import and type-check the package
|
||||
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||
fatalIfErr(err)
|
||||
scope := pkg.Scope()
|
||||
|
||||
// Collect actual types (*X)
|
||||
var namedTypes []string
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||
continue
|
||||
}
|
||||
if name == "PrivateRR" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if corresponding TypeX exists
|
||||
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||
}
|
||||
|
||||
namedTypes = append(namedTypes, o.Name())
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(packageHdr)
|
||||
|
||||
fmt.Fprint(b, "// pack*() functions\n\n")
|
||||
for _, name := range namedTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {\n", name)
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
o := func(s string) {
|
||||
fmt.Fprintf(b, s, st.Field(i).Name())
|
||||
fmt.Fprint(b, `if err != nil {
|
||||
return off, err
|
||||
}
|
||||
`)
|
||||
}
|
||||
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
switch st.Tag(i) {
|
||||
case `dns:"-"`: // ignored
|
||||
case `dns:"txt"`:
|
||||
o("off, err = packStringTxt(rr.%s, msg, off)\n")
|
||||
case `dns:"opt"`:
|
||||
o("off, err = packDataOpt(rr.%s, msg, off)\n")
|
||||
case `dns:"nsec"`:
|
||||
o("off, err = packDataNsec(rr.%s, msg, off)\n")
|
||||
case `dns:"domain-name"`:
|
||||
o("off, err = packDataDomainNames(rr.%s, msg, off, compression, false)\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"-"`: // ignored
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
o("off, err = packDomainName(rr.%s, msg, off, compression, compress)\n")
|
||||
case st.Tag(i) == `dns:"domain-name"`:
|
||||
o("off, err = packDomainName(rr.%s, msg, off, compression, false)\n")
|
||||
case st.Tag(i) == `dns:"a"`:
|
||||
o("off, err = packDataA(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == `dns:"aaaa"`:
|
||||
o("off, err = packDataAAAA(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == `dns:"uint48"`:
|
||||
o("off, err = packUint48(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == `dns:"txt"`:
|
||||
o("off, err = packString(rr.%s, msg, off)\n")
|
||||
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"base32"`:
|
||||
o("off, err = packStringBase32(rr.%s, msg, off)\n")
|
||||
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"base64"`:
|
||||
o("off, err = packStringBase64(rr.%s, msg, off)\n")
|
||||
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`):
|
||||
// directly write instead of using o() so we get the error check in the correct place
|
||||
field := st.Field(i).Name()
|
||||
fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty
|
||||
if rr.%s != "-" {
|
||||
off, err = packStringHex(rr.%s, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
}
|
||||
`, field, field)
|
||||
continue
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"hex"`:
|
||||
o("off, err = packStringHex(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == `dns:"any"`:
|
||||
o("off, err = packStringAny(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == `dns:"octet"`:
|
||||
o("off, err = packStringOctet(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == "":
|
||||
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||
case types.Uint8:
|
||||
o("off, err = packUint8(rr.%s, msg, off)\n")
|
||||
case types.Uint16:
|
||||
o("off, err = packUint16(rr.%s, msg, off)\n")
|
||||
case types.Uint32:
|
||||
o("off, err = packUint32(rr.%s, msg, off)\n")
|
||||
case types.Uint64:
|
||||
o("off, err = packUint64(rr.%s, msg, off)\n")
|
||||
case types.String:
|
||||
o("off, err = packString(rr.%s, msg, off)\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name())
|
||||
}
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(b, "return off, nil }\n")
|
||||
}
|
||||
|
||||
fmt.Fprint(b, "// unpack*() functions\n\n")
|
||||
for _, name := range namedTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "func (rr *%s) unpack(msg []byte, off int) (off1 int, err error) {\n", name)
|
||||
fmt.Fprint(b, `rdStart := off
|
||||
_ = rdStart
|
||||
|
||||
`)
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
o := func(s string) {
|
||||
fmt.Fprintf(b, s, st.Field(i).Name())
|
||||
fmt.Fprint(b, `if err != nil {
|
||||
return off, err
|
||||
}
|
||||
`)
|
||||
}
|
||||
|
||||
// size-* are special, because they reference a struct member we should use for the length.
|
||||
if strings.HasPrefix(st.Tag(i), `dns:"size-`) {
|
||||
structMember := structMember(st.Tag(i))
|
||||
structTag := structTag(st.Tag(i))
|
||||
switch structTag {
|
||||
case "hex":
|
||||
fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||
case "base32":
|
||||
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||
case "base64":
|
||||
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
fmt.Fprint(b, `if err != nil {
|
||||
return off, err
|
||||
}
|
||||
`)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
switch st.Tag(i) {
|
||||
case `dns:"-"`: // ignored
|
||||
case `dns:"txt"`:
|
||||
o("rr.%s, off, err = unpackStringTxt(msg, off)\n")
|
||||
case `dns:"opt"`:
|
||||
o("rr.%s, off, err = unpackDataOpt(msg, off)\n")
|
||||
case `dns:"nsec"`:
|
||||
o("rr.%s, off, err = unpackDataNsec(msg, off)\n")
|
||||
case `dns:"domain-name"`:
|
||||
o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch st.Tag(i) {
|
||||
case `dns:"-"`: // ignored
|
||||
case `dns:"cdomain-name"`:
|
||||
fallthrough
|
||||
case `dns:"domain-name"`:
|
||||
o("rr.%s, off, err = UnpackDomainName(msg, off)\n")
|
||||
case `dns:"a"`:
|
||||
o("rr.%s, off, err = unpackDataA(msg, off)\n")
|
||||
case `dns:"aaaa"`:
|
||||
o("rr.%s, off, err = unpackDataAAAA(msg, off)\n")
|
||||
case `dns:"uint48"`:
|
||||
o("rr.%s, off, err = unpackUint48(msg, off)\n")
|
||||
case `dns:"txt"`:
|
||||
o("rr.%s, off, err = unpackString(msg, off)\n")
|
||||
case `dns:"base32"`:
|
||||
o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||
case `dns:"base64"`:
|
||||
o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||
case `dns:"hex"`:
|
||||
o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||
case `dns:"any"`:
|
||||
o("rr.%s, off, err = unpackStringAny(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||
case `dns:"octet"`:
|
||||
o("rr.%s, off, err = unpackStringOctet(msg, off)\n")
|
||||
case "":
|
||||
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||
case types.Uint8:
|
||||
o("rr.%s, off, err = unpackUint8(msg, off)\n")
|
||||
case types.Uint16:
|
||||
o("rr.%s, off, err = unpackUint16(msg, off)\n")
|
||||
case types.Uint32:
|
||||
o("rr.%s, off, err = unpackUint32(msg, off)\n")
|
||||
case types.Uint64:
|
||||
o("rr.%s, off, err = unpackUint64(msg, off)\n")
|
||||
case types.String:
|
||||
o("rr.%s, off, err = unpackString(msg, off)\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name())
|
||||
}
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
// If we've hit len(msg) we return without error.
|
||||
if i < st.NumFields()-1 {
|
||||
fmt.Fprintf(b, `if off == len(msg) {
|
||||
return off, nil
|
||||
}
|
||||
`)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(b, "return off, nil }\n\n")
|
||||
}
|
||||
|
||||
// gofmt
|
||||
res, err := format.Source(b.Bytes())
|
||||
if err != nil {
|
||||
b.WriteTo(os.Stderr)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// write result
|
||||
f, err := os.Create("zmsg.go")
|
||||
fatalIfErr(err)
|
||||
defer f.Close()
|
||||
f.Write(res)
|
||||
}
|
||||
|
||||
// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string.
|
||||
func structMember(s string) string {
|
||||
fields := strings.Split(s, ":")
|
||||
if len(fields) == 0 {
|
||||
return ""
|
||||
}
|
||||
f := fields[len(fields)-1]
|
||||
// f should have a closing "
|
||||
if len(f) > 1 {
|
||||
return f[:len(f)-1]
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// structTag will take a tag like dns:"size-base32:SaltLength" and return base32.
|
||||
func structTag(s string) string {
|
||||
fields := strings.Split(s, ":")
|
||||
if len(fields) < 2 {
|
||||
return ""
|
||||
}
|
||||
return fields[1][len("\"size-"):]
|
||||
}
|
||||
|
||||
func fatalIfErr(err error) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
287
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
Normal file
287
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
//+build ignore
|
||||
|
||||
// types_generate.go is meant to run with go generate. It will use
|
||||
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||
// it will generate conversion tables (TypeToRR and TypeToString) and banal
|
||||
// methods (len, Header, copy) based on the struct tags. The generated source is
|
||||
// written to ztypes.go, and is meant to be checked into git.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/importer"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var skipLen = map[string]struct{}{
|
||||
"NSEC": {},
|
||||
"NSEC3": {},
|
||||
"OPT": {},
|
||||
"CSYNC": {},
|
||||
}
|
||||
|
||||
var packageHdr = `
|
||||
// Code generated by "go run types_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net"
|
||||
)
|
||||
|
||||
`
|
||||
|
||||
var TypeToRR = template.Must(template.New("TypeToRR").Parse(`
|
||||
// TypeToRR is a map of constructors for each RR type.
|
||||
var TypeToRR = map[uint16]func() RR{
|
||||
{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) },
|
||||
{{end}}{{end}} }
|
||||
|
||||
`))
|
||||
|
||||
var typeToString = template.Must(template.New("typeToString").Parse(`
|
||||
// TypeToString is a map of strings for each RR type.
|
||||
var TypeToString = map[uint16]string{
|
||||
{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}",
|
||||
{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR",
|
||||
}
|
||||
|
||||
`))
|
||||
|
||||
var headerFunc = template.Must(template.New("headerFunc").Parse(`
|
||||
{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
|
||||
{{end}}
|
||||
|
||||
`))
|
||||
|
||||
// getTypeStruct will take a type and the package scope, and return the
|
||||
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||
// the RR interface). The bool return value indicates if embedded structs were
|
||||
// resolved.
|
||||
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||
st, ok := t.Underlying().(*types.Struct)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||
return st, false
|
||||
}
|
||||
if st.Field(0).Anonymous() {
|
||||
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||
return st, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Import and type-check the package
|
||||
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||
fatalIfErr(err)
|
||||
scope := pkg.Scope()
|
||||
|
||||
// Collect constants like TypeX
|
||||
var numberedTypes []string
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
b, ok := o.Type().(*types.Basic)
|
||||
if !ok || b.Kind() != types.Uint16 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(o.Name(), "Type") {
|
||||
continue
|
||||
}
|
||||
name := strings.TrimPrefix(o.Name(), "Type")
|
||||
if name == "PrivateRR" {
|
||||
continue
|
||||
}
|
||||
numberedTypes = append(numberedTypes, name)
|
||||
}
|
||||
|
||||
// Collect actual types (*X)
|
||||
var namedTypes []string
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||
continue
|
||||
}
|
||||
if name == "PrivateRR" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if corresponding TypeX exists
|
||||
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||
}
|
||||
|
||||
namedTypes = append(namedTypes, o.Name())
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(packageHdr)
|
||||
|
||||
// Generate TypeToRR
|
||||
fatalIfErr(TypeToRR.Execute(b, namedTypes))
|
||||
|
||||
// Generate typeToString
|
||||
fatalIfErr(typeToString.Execute(b, numberedTypes))
|
||||
|
||||
// Generate headerFunc
|
||||
fatalIfErr(headerFunc.Execute(b, namedTypes))
|
||||
|
||||
// Generate len()
|
||||
fmt.Fprint(b, "// len() functions\n")
|
||||
for _, name := range namedTypes {
|
||||
if _, ok := skipLen[name]; ok {
|
||||
continue
|
||||
}
|
||||
o := scope.Lookup(name)
|
||||
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||
if isEmbedded {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(b, "func (rr *%s) len(off int, compression map[string]struct{}) int {\n", name)
|
||||
fmt.Fprintf(b, "l := rr.Hdr.len(off, compression)\n")
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
|
||||
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
switch st.Tag(i) {
|
||||
case `dns:"-"`:
|
||||
// ignored
|
||||
case `dns:"cdomain-name"`:
|
||||
o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, true) }\n")
|
||||
case `dns:"domain-name"`:
|
||||
o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, false) }\n")
|
||||
case `dns:"txt"`:
|
||||
o("for _, x := range rr.%s { l += len(x) + 1 }\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"-"`:
|
||||
// ignored
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
o("l += domainNameLen(rr.%s, off+l, compression, true)\n")
|
||||
case st.Tag(i) == `dns:"domain-name"`:
|
||||
o("l += domainNameLen(rr.%s, off+l, compression, false)\n")
|
||||
case st.Tag(i) == `dns:"octet"`:
|
||||
o("l += len(rr.%s)\n")
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"base64"`:
|
||||
o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored
|
||||
o("l += len(rr.%s)/2\n")
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"hex"`:
|
||||
o("l += len(rr.%s)/2 + 1\n")
|
||||
case st.Tag(i) == `dns:"any"`:
|
||||
o("l += len(rr.%s)\n")
|
||||
case st.Tag(i) == `dns:"a"`:
|
||||
o("if len(rr.%s) != 0 { l += net.IPv4len }\n")
|
||||
case st.Tag(i) == `dns:"aaaa"`:
|
||||
o("if len(rr.%s) != 0 { l += net.IPv6len }\n")
|
||||
case st.Tag(i) == `dns:"txt"`:
|
||||
o("for _, t := range rr.%s { l += len(t) + 1 }\n")
|
||||
case st.Tag(i) == `dns:"uint48"`:
|
||||
o("l += 6 // %s\n")
|
||||
case st.Tag(i) == "":
|
||||
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||
case types.Uint8:
|
||||
o("l++ // %s\n")
|
||||
case types.Uint16:
|
||||
o("l += 2 // %s\n")
|
||||
case types.Uint32:
|
||||
o("l += 4 // %s\n")
|
||||
case types.Uint64:
|
||||
o("l += 8 // %s\n")
|
||||
case types.String:
|
||||
o("l += len(rr.%s) + 1\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name())
|
||||
}
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(b, "return l }\n")
|
||||
}
|
||||
|
||||
// Generate copy()
|
||||
fmt.Fprint(b, "// copy() functions\n")
|
||||
for _, name := range namedTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||
if isEmbedded {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
|
||||
fields := []string{"rr.Hdr"}
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
f := st.Field(i).Name()
|
||||
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
t := sl.Underlying().String()
|
||||
t = strings.TrimPrefix(t, "[]")
|
||||
if strings.Contains(t, ".") {
|
||||
splits := strings.Split(t, ".")
|
||||
t = splits[len(splits)-1]
|
||||
}
|
||||
// For the EDNS0 interface (used in the OPT RR), we need to call the copy method on each element.
|
||||
if t == "EDNS0" {
|
||||
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s));\nfor i,e := range rr.%s {\n %s[i] = e.copy()\n}\n",
|
||||
f, t, f, f, f)
|
||||
fields = append(fields, f)
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
|
||||
f, t, f, f, f)
|
||||
fields = append(fields, f)
|
||||
continue
|
||||
}
|
||||
if st.Field(i).Type().String() == "net.IP" {
|
||||
fields = append(fields, "copyIP(rr."+f+")")
|
||||
continue
|
||||
}
|
||||
fields = append(fields, "rr."+f)
|
||||
}
|
||||
fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ","))
|
||||
fmt.Fprintf(b, "}\n")
|
||||
}
|
||||
|
||||
// gofmt
|
||||
res, err := format.Source(b.Bytes())
|
||||
if err != nil {
|
||||
b.WriteTo(os.Stderr)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// write result
|
||||
f, err := os.Create("ztypes.go")
|
||||
fatalIfErr(err)
|
||||
defer f.Close()
|
||||
f.Write(res)
|
||||
}
|
||||
|
||||
func fatalIfErr(err error) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
202
vendor/github.com/open-policy-agent/opa/LICENSE
generated
vendored
Normal file
202
vendor/github.com/open-policy-agent/opa/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
1902
vendor/github.com/open-policy-agent/opa/ast/builtins.go
generated
vendored
Normal file
1902
vendor/github.com/open-policy-agent/opa/ast/builtins.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
983
vendor/github.com/open-policy-agent/opa/ast/check.go
generated
vendored
Normal file
983
vendor/github.com/open-policy-agent/opa/ast/check.go
generated
vendored
Normal file
@@ -0,0 +1,983 @@
|
||||
// Copyright 2017 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/open-policy-agent/opa/types"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
)
|
||||
|
||||
type rewriteVars func(x Ref) Ref
|
||||
|
||||
// exprChecker defines the interface for executing type checking on a single
|
||||
// expression. The exprChecker must update the provided TypeEnv with inferred
|
||||
// types of vars.
|
||||
type exprChecker func(*TypeEnv, *Expr) *Error
|
||||
|
||||
// typeChecker implements type checking on queries and rules. Errors are
|
||||
// accumulated on the typeChecker so that a single run can report multiple
|
||||
// issues.
|
||||
type typeChecker struct {
|
||||
errs Errors
|
||||
exprCheckers map[string]exprChecker
|
||||
varRewriter rewriteVars
|
||||
}
|
||||
|
||||
// newTypeChecker returns a new typeChecker object that has no errors.
|
||||
func newTypeChecker() *typeChecker {
|
||||
tc := &typeChecker{}
|
||||
tc.exprCheckers = map[string]exprChecker{
|
||||
"eq": tc.checkExprEq,
|
||||
}
|
||||
return tc
|
||||
}
|
||||
|
||||
func (tc *typeChecker) WithVarRewriter(f rewriteVars) *typeChecker {
|
||||
tc.varRewriter = f
|
||||
return tc
|
||||
}
|
||||
|
||||
// CheckBody runs type checking on the body and returns a TypeEnv if no errors
|
||||
// are found. The resulting TypeEnv wraps the provided one. The resulting
|
||||
// TypeEnv will be able to resolve types of vars contained in the body.
|
||||
func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) {
|
||||
|
||||
if env == nil {
|
||||
env = NewTypeEnv()
|
||||
} else {
|
||||
env = env.wrap()
|
||||
}
|
||||
|
||||
WalkExprs(body, func(expr *Expr) bool {
|
||||
|
||||
closureErrs := tc.checkClosures(env, expr)
|
||||
for _, err := range closureErrs {
|
||||
tc.err(err)
|
||||
}
|
||||
|
||||
hasClosureErrors := len(closureErrs) > 0
|
||||
|
||||
vis := newRefChecker(env, tc.varRewriter)
|
||||
NewGenericVisitor(vis.Visit).Walk(expr)
|
||||
for _, err := range vis.errs {
|
||||
tc.err(err)
|
||||
}
|
||||
|
||||
hasRefErrors := len(vis.errs) > 0
|
||||
|
||||
if err := tc.checkExpr(env, expr); err != nil {
|
||||
// Suppress this error if a more actionable one has occurred. In
|
||||
// this case, if an error occurred in a ref or closure contained in
|
||||
// this expression, and the error is due to a nil type, then it's
|
||||
// likely to be the result of the more specific error.
|
||||
skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err)
|
||||
if !skip {
|
||||
tc.err(err)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return env, tc.errs
|
||||
}
|
||||
|
||||
// CheckTypes runs type checking on the rules returns a TypeEnv if no errors
|
||||
// are found. The resulting TypeEnv wraps the provided one. The resulting
|
||||
// TypeEnv will be able to resolve types of refs that refer to rules.
|
||||
func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T) (*TypeEnv, Errors) {
|
||||
if env == nil {
|
||||
env = NewTypeEnv()
|
||||
} else {
|
||||
env = env.wrap()
|
||||
}
|
||||
for _, s := range sorted {
|
||||
tc.checkRule(env, s.(*Rule))
|
||||
}
|
||||
tc.errs.Sort()
|
||||
return env, tc.errs
|
||||
}
|
||||
|
||||
func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors {
|
||||
var result Errors
|
||||
WalkClosures(expr, func(x interface{}) bool {
|
||||
switch x := x.(type) {
|
||||
case *ArrayComprehension:
|
||||
_, errs := newTypeChecker().WithVarRewriter(tc.varRewriter).CheckBody(env, x.Body)
|
||||
if len(errs) > 0 {
|
||||
result = errs
|
||||
return true
|
||||
}
|
||||
case *SetComprehension:
|
||||
_, errs := newTypeChecker().WithVarRewriter(tc.varRewriter).CheckBody(env, x.Body)
|
||||
if len(errs) > 0 {
|
||||
result = errs
|
||||
return true
|
||||
}
|
||||
case *ObjectComprehension:
|
||||
_, errs := newTypeChecker().WithVarRewriter(tc.varRewriter).CheckBody(env, x.Body)
|
||||
if len(errs) > 0 {
|
||||
result = errs
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
func (tc *typeChecker) checkLanguageBuiltins(env *TypeEnv, builtins map[string]*Builtin) *TypeEnv {
|
||||
if env == nil {
|
||||
env = NewTypeEnv()
|
||||
} else {
|
||||
env = env.wrap()
|
||||
}
|
||||
for _, bi := range builtins {
|
||||
env.tree.Put(bi.Ref(), bi.Decl)
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
func (tc *typeChecker) checkRule(env *TypeEnv, rule *Rule) {
|
||||
|
||||
cpy, err := tc.CheckBody(env, rule.Body)
|
||||
|
||||
if len(err) == 0 {
|
||||
|
||||
path := rule.Path()
|
||||
var tpe types.Type
|
||||
|
||||
if len(rule.Head.Args) > 0 {
|
||||
|
||||
// If args are not referred to in body, infer as any.
|
||||
WalkVars(rule.Head.Args, func(v Var) bool {
|
||||
if cpy.Get(v) == nil {
|
||||
cpy.tree.PutOne(v, types.A)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
// Construct function type.
|
||||
args := make([]types.Type, len(rule.Head.Args))
|
||||
for i := 0; i < len(rule.Head.Args); i++ {
|
||||
args[i] = cpy.Get(rule.Head.Args[i])
|
||||
}
|
||||
|
||||
f := types.NewFunction(args, cpy.Get(rule.Head.Value))
|
||||
|
||||
// Union with existing.
|
||||
exist := env.tree.Get(path)
|
||||
tpe = types.Or(exist, f)
|
||||
|
||||
} else {
|
||||
switch rule.Head.DocKind() {
|
||||
case CompleteDoc:
|
||||
typeV := cpy.Get(rule.Head.Value)
|
||||
if typeV != nil {
|
||||
exist := env.tree.Get(path)
|
||||
tpe = types.Or(typeV, exist)
|
||||
}
|
||||
case PartialObjectDoc:
|
||||
typeK := cpy.Get(rule.Head.Key)
|
||||
typeV := cpy.Get(rule.Head.Value)
|
||||
if typeK != nil && typeV != nil {
|
||||
exist := env.tree.Get(path)
|
||||
typeV = types.Or(types.Values(exist), typeV)
|
||||
typeK = types.Or(types.Keys(exist), typeK)
|
||||
tpe = types.NewObject(nil, types.NewDynamicProperty(typeK, typeV))
|
||||
}
|
||||
case PartialSetDoc:
|
||||
typeK := cpy.Get(rule.Head.Key)
|
||||
if typeK != nil {
|
||||
exist := env.tree.Get(path)
|
||||
typeK = types.Or(types.Keys(exist), typeK)
|
||||
tpe = types.NewSet(typeK)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tpe != nil {
|
||||
env.tree.Put(path, tpe)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error {
|
||||
if !expr.IsCall() {
|
||||
return nil
|
||||
}
|
||||
|
||||
checker := tc.exprCheckers[expr.Operator().String()]
|
||||
if checker != nil {
|
||||
return checker(env, expr)
|
||||
}
|
||||
|
||||
return tc.checkExprBuiltin(env, expr)
|
||||
}
|
||||
|
||||
func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error {
|
||||
|
||||
args := expr.Operands()
|
||||
pre := getArgTypes(env, args)
|
||||
|
||||
// NOTE(tsandall): undefined functions will have been caught earlier in the
|
||||
// compiler. We check for undefined functions before the safety check so
|
||||
// that references to non-existent functions result in undefined function
|
||||
// errors as opposed to unsafe var errors.
|
||||
//
|
||||
// We cannot run type checking before the safety check because part of the
|
||||
// type checker relies on reordering (in particular for references to local
|
||||
// vars).
|
||||
name := expr.Operator()
|
||||
tpe := env.Get(name)
|
||||
|
||||
if tpe == nil {
|
||||
return NewError(TypeErr, expr.Location, "undefined function %v", name)
|
||||
}
|
||||
|
||||
ftpe, ok := tpe.(*types.Function)
|
||||
if !ok {
|
||||
return NewError(TypeErr, expr.Location, "undefined function %v", name)
|
||||
}
|
||||
|
||||
maxArgs := len(ftpe.Args())
|
||||
expArgs := ftpe.Args()
|
||||
|
||||
if ftpe.Result() != nil {
|
||||
maxArgs++
|
||||
expArgs = append(expArgs, ftpe.Result())
|
||||
}
|
||||
|
||||
if len(args) > maxArgs {
|
||||
return newArgError(expr.Location, name, "too many arguments", pre, expArgs)
|
||||
} else if len(args) < len(ftpe.Args()) {
|
||||
return newArgError(expr.Location, name, "too few arguments", pre, expArgs)
|
||||
}
|
||||
|
||||
for i := range args {
|
||||
if !unify1(env, args[i], expArgs[i], false) {
|
||||
post := make([]types.Type, len(args))
|
||||
for i := range args {
|
||||
post[i] = env.Get(args[i])
|
||||
}
|
||||
return newArgError(expr.Location, name, "invalid argument(s)", post, expArgs)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *typeChecker) checkExprEq(env *TypeEnv, expr *Expr) *Error {
|
||||
|
||||
pre := getArgTypes(env, expr.Operands())
|
||||
exp := Equality.Decl.Args()
|
||||
|
||||
if len(pre) < len(exp) {
|
||||
return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, exp)
|
||||
} else if len(exp) < len(pre) {
|
||||
return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, exp)
|
||||
}
|
||||
|
||||
a, b := expr.Operand(0), expr.Operand(1)
|
||||
typeA, typeB := env.Get(a), env.Get(b)
|
||||
|
||||
if !unify2(env, a, typeA, b, typeB) {
|
||||
err := NewError(TypeErr, expr.Location, "match error")
|
||||
err.Details = &UnificationErrDetail{
|
||||
Left: typeA,
|
||||
Right: typeB,
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool {
|
||||
|
||||
nilA := types.Nil(typeA)
|
||||
nilB := types.Nil(typeB)
|
||||
|
||||
if nilA && !nilB {
|
||||
return unify1(env, a, typeB, false)
|
||||
} else if nilB && !nilA {
|
||||
return unify1(env, b, typeA, false)
|
||||
} else if !nilA && !nilB {
|
||||
return unifies(typeA, typeB)
|
||||
}
|
||||
|
||||
switch a.Value.(type) {
|
||||
case Array:
|
||||
return unify2Array(env, a, typeA, b, typeB)
|
||||
case Object:
|
||||
return unify2Object(env, a, typeA, b, typeB)
|
||||
case Var:
|
||||
switch b.Value.(type) {
|
||||
case Var:
|
||||
return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
|
||||
case Array:
|
||||
return unify2Array(env, b, typeB, a, typeA)
|
||||
case Object:
|
||||
return unify2Object(env, b, typeB, a, typeA)
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func unify2Array(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool {
|
||||
arr := a.Value.(Array)
|
||||
switch bv := b.Value.(type) {
|
||||
case Array:
|
||||
if len(arr) == len(bv) {
|
||||
for i := range arr {
|
||||
if !unify2(env, arr[i], env.Get(arr[i]), bv[i], env.Get(bv[i])) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
case Var:
|
||||
return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unify2Object(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool {
|
||||
obj := a.Value.(Object)
|
||||
switch bv := b.Value.(type) {
|
||||
case Object:
|
||||
cv := obj.Intersect(bv)
|
||||
if obj.Len() == bv.Len() && bv.Len() == len(cv) {
|
||||
for i := range cv {
|
||||
if !unify2(env, cv[i][1], env.Get(cv[i][1]), cv[i][2], env.Get(cv[i][2])) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
case Var:
|
||||
return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool {
|
||||
switch v := term.Value.(type) {
|
||||
case Array:
|
||||
switch tpe := tpe.(type) {
|
||||
case *types.Array:
|
||||
return unify1Array(env, v, tpe, union)
|
||||
case types.Any:
|
||||
if types.Compare(tpe, types.A) == 0 {
|
||||
for i := range v {
|
||||
unify1(env, v[i], types.A, true)
|
||||
}
|
||||
return true
|
||||
}
|
||||
unifies := false
|
||||
for i := range tpe {
|
||||
unifies = unify1(env, term, tpe[i], true) || unifies
|
||||
}
|
||||
return unifies
|
||||
}
|
||||
return false
|
||||
case Object:
|
||||
switch tpe := tpe.(type) {
|
||||
case *types.Object:
|
||||
return unify1Object(env, v, tpe, union)
|
||||
case types.Any:
|
||||
if types.Compare(tpe, types.A) == 0 {
|
||||
v.Foreach(func(key, value *Term) {
|
||||
unify1(env, key, types.A, true)
|
||||
unify1(env, value, types.A, true)
|
||||
})
|
||||
return true
|
||||
}
|
||||
unifies := false
|
||||
for i := range tpe {
|
||||
unifies = unify1(env, term, tpe[i], true) || unifies
|
||||
}
|
||||
return unifies
|
||||
}
|
||||
return false
|
||||
case Set:
|
||||
switch tpe := tpe.(type) {
|
||||
case *types.Set:
|
||||
return unify1Set(env, v, tpe, union)
|
||||
case types.Any:
|
||||
if types.Compare(tpe, types.A) == 0 {
|
||||
v.Foreach(func(elem *Term) {
|
||||
unify1(env, elem, types.A, true)
|
||||
})
|
||||
return true
|
||||
}
|
||||
unifies := false
|
||||
for i := range tpe {
|
||||
unifies = unify1(env, term, tpe[i], true) || unifies
|
||||
}
|
||||
return unifies
|
||||
}
|
||||
return false
|
||||
case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension:
|
||||
return unifies(env.Get(v), tpe)
|
||||
case Var:
|
||||
if !union {
|
||||
if exist := env.Get(v); exist != nil {
|
||||
return unifies(exist, tpe)
|
||||
}
|
||||
env.tree.PutOne(term.Value, tpe)
|
||||
} else {
|
||||
env.tree.PutOne(term.Value, types.Or(env.Get(v), tpe))
|
||||
}
|
||||
return true
|
||||
default:
|
||||
if !IsConstant(v) {
|
||||
panic("unreachable")
|
||||
}
|
||||
return unifies(env.Get(term), tpe)
|
||||
}
|
||||
}
|
||||
|
||||
func unify1Array(env *TypeEnv, val Array, tpe *types.Array, union bool) bool {
|
||||
if len(val) != tpe.Len() && tpe.Dynamic() == nil {
|
||||
return false
|
||||
}
|
||||
for i := range val {
|
||||
if !unify1(env, val[i], tpe.Select(i), union) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool {
|
||||
if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil {
|
||||
return false
|
||||
}
|
||||
stop := val.Until(func(k, v *Term) bool {
|
||||
if IsConstant(k.Value) {
|
||||
if child := selectConstant(tpe, k); child != nil {
|
||||
if !unify1(env, v, child, union) {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
// Inferring type of value under dynamic key would involve unioning
|
||||
// with all property values of tpe whose keys unify. For now, type
|
||||
// these values as Any. We can investigate stricter inference in
|
||||
// the future.
|
||||
unify1(env, v, types.A, union)
|
||||
}
|
||||
return false
|
||||
})
|
||||
return !stop
|
||||
}
|
||||
|
||||
func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool {
|
||||
of := types.Values(tpe)
|
||||
return !val.Until(func(elem *Term) bool {
|
||||
return !unify1(env, elem, of, union)
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *typeChecker) err(err *Error) {
|
||||
tc.errs = append(tc.errs, err)
|
||||
}
|
||||
|
||||
type refChecker struct {
|
||||
env *TypeEnv
|
||||
errs Errors
|
||||
varRewriter rewriteVars
|
||||
}
|
||||
|
||||
func newRefChecker(env *TypeEnv, f rewriteVars) *refChecker {
|
||||
|
||||
if f == nil {
|
||||
f = rewriteVarsNop
|
||||
}
|
||||
|
||||
return &refChecker{
|
||||
env: env,
|
||||
errs: nil,
|
||||
varRewriter: f,
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *refChecker) Visit(x interface{}) bool {
|
||||
switch x := x.(type) {
|
||||
case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
|
||||
return true
|
||||
case *Expr:
|
||||
switch terms := x.Terms.(type) {
|
||||
case []*Term:
|
||||
for i := 1; i < len(terms); i++ {
|
||||
NewGenericVisitor(rc.Visit).Walk(terms[i])
|
||||
}
|
||||
return true
|
||||
case *Term:
|
||||
NewGenericVisitor(rc.Visit).Walk(terms)
|
||||
return true
|
||||
}
|
||||
case Ref:
|
||||
if err := rc.checkApply(rc.env, x); err != nil {
|
||||
rc.errs = append(rc.errs, err)
|
||||
return true
|
||||
}
|
||||
if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil {
|
||||
rc.errs = append(rc.errs, err)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error {
|
||||
if tpe := curr.Get(ref); tpe != nil {
|
||||
if _, ok := tpe.(*types.Function); ok {
|
||||
return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error {
|
||||
|
||||
if idx == len(ref) {
|
||||
return nil
|
||||
}
|
||||
|
||||
head := ref[idx]
|
||||
|
||||
// Handle constant ref operands, i.e., strings or the ref head.
|
||||
if _, ok := head.Value.(String); ok || idx == 0 {
|
||||
|
||||
child := node.Child(head.Value)
|
||||
if child == nil {
|
||||
|
||||
if curr.next != nil {
|
||||
next := curr.next
|
||||
return rc.checkRef(next, next.tree, ref, 0)
|
||||
}
|
||||
|
||||
if RootDocumentNames.Contains(ref[0]) {
|
||||
return rc.checkRefLeaf(types.A, ref, 1)
|
||||
}
|
||||
|
||||
return rc.checkRefLeaf(types.A, ref, 0)
|
||||
}
|
||||
|
||||
if child.Leaf() {
|
||||
return rc.checkRefLeaf(child.Value(), ref, idx+1)
|
||||
}
|
||||
|
||||
return rc.checkRef(curr, child, ref, idx+1)
|
||||
}
|
||||
|
||||
// Handle dynamic ref operands.
|
||||
switch value := head.Value.(type) {
|
||||
|
||||
case Var:
|
||||
|
||||
if exist := rc.env.Get(value); exist != nil {
|
||||
if !unifies(types.S, exist) {
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, types.S, getOneOfForNode(node))
|
||||
}
|
||||
} else {
|
||||
rc.env.tree.PutOne(value, types.S)
|
||||
}
|
||||
|
||||
case Ref:
|
||||
|
||||
exist := rc.env.Get(value)
|
||||
if exist == nil {
|
||||
// If ref type is unknown, an error will already be reported so
|
||||
// stop here.
|
||||
return nil
|
||||
}
|
||||
|
||||
if !unifies(types.S, exist) {
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, types.S, getOneOfForNode(node))
|
||||
}
|
||||
|
||||
// Catch other ref operand types here. Non-leaf nodes must be referred to
|
||||
// with string values.
|
||||
default:
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.S, getOneOfForNode(node))
|
||||
}
|
||||
|
||||
// Run checking on remaining portion of the ref. Note, since the ref
|
||||
// potentially refers to data for which no type information exists,
|
||||
// checking should never fail.
|
||||
node.Children().Iter(func(_, child util.T) bool {
|
||||
rc.checkRef(curr, child.(*typeTreeNode), ref, idx+1)
|
||||
return false
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error {
|
||||
|
||||
if idx == len(ref) {
|
||||
return nil
|
||||
}
|
||||
|
||||
head := ref[idx]
|
||||
|
||||
keys := types.Keys(tpe)
|
||||
if keys == nil {
|
||||
return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe)
|
||||
}
|
||||
|
||||
switch value := head.Value.(type) {
|
||||
|
||||
case Var:
|
||||
if exist := rc.env.Get(value); exist != nil {
|
||||
if !unifies(exist, keys) {
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
|
||||
}
|
||||
} else {
|
||||
rc.env.tree.PutOne(value, types.Keys(tpe))
|
||||
}
|
||||
|
||||
case Ref:
|
||||
if exist := rc.env.Get(value); exist != nil {
|
||||
if !unifies(exist, keys) {
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
|
||||
}
|
||||
}
|
||||
|
||||
case Array, Object, Set:
|
||||
// Composite references operands may only be used with a set.
|
||||
if !unifies(tpe, types.NewSet(types.A)) {
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, tpe, types.NewSet(types.A), nil)
|
||||
}
|
||||
if !unify1(rc.env, head, keys, false) {
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil)
|
||||
}
|
||||
|
||||
default:
|
||||
child := selectConstant(tpe, head)
|
||||
if child == nil {
|
||||
return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe))
|
||||
}
|
||||
return rc.checkRefLeaf(child, ref, idx+1)
|
||||
}
|
||||
|
||||
return rc.checkRefLeaf(types.Values(tpe), ref, idx+1)
|
||||
}
|
||||
|
||||
func unifies(a, b types.Type) bool {
|
||||
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
anyA, ok1 := a.(types.Any)
|
||||
if ok1 {
|
||||
if unifiesAny(anyA, b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
anyB, ok2 := b.(types.Any)
|
||||
if ok2 {
|
||||
if unifiesAny(anyB, a) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if ok1 || ok2 {
|
||||
return false
|
||||
}
|
||||
|
||||
switch a := a.(type) {
|
||||
case types.Null:
|
||||
_, ok := b.(types.Null)
|
||||
return ok
|
||||
case types.Boolean:
|
||||
_, ok := b.(types.Boolean)
|
||||
return ok
|
||||
case types.Number:
|
||||
_, ok := b.(types.Number)
|
||||
return ok
|
||||
case types.String:
|
||||
_, ok := b.(types.String)
|
||||
return ok
|
||||
case *types.Array:
|
||||
b, ok := b.(*types.Array)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return unifiesArrays(a, b)
|
||||
case *types.Object:
|
||||
b, ok := b.(*types.Object)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return unifiesObjects(a, b)
|
||||
case *types.Set:
|
||||
b, ok := b.(*types.Set)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return unifies(types.Values(a), types.Values(b))
|
||||
case *types.Function:
|
||||
// TODO(tsandall): revisit once functions become first-class values.
|
||||
return false
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func unifiesAny(a types.Any, b types.Type) bool {
|
||||
if _, ok := b.(*types.Function); ok {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if unifies(a[i], b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return len(a) == 0
|
||||
}
|
||||
|
||||
func unifiesArrays(a, b *types.Array) bool {
|
||||
|
||||
if !unifiesArraysStatic(a, b) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !unifiesArraysStatic(b, a) {
|
||||
return false
|
||||
}
|
||||
|
||||
return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic())
|
||||
}
|
||||
|
||||
func unifiesArraysStatic(a, b *types.Array) bool {
|
||||
if a.Len() != 0 {
|
||||
for i := 0; i < a.Len(); i++ {
|
||||
if !unifies(a.Select(i), b.Select(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func unifiesObjects(a, b *types.Object) bool {
|
||||
if !unifiesObjectsStatic(a, b) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !unifiesObjectsStatic(b, a) {
|
||||
return false
|
||||
}
|
||||
|
||||
return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue())
|
||||
}
|
||||
|
||||
func unifiesObjectsStatic(a, b *types.Object) bool {
|
||||
for _, k := range a.Keys() {
|
||||
if !unifies(a.Select(k), b.Select(k)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// typeErrorCause defines an interface to determine the reason for a type
|
||||
// error. The type error details implement this interface so that type checking
|
||||
// can report more actionable errors.
|
||||
type typeErrorCause interface {
|
||||
nilType() bool
|
||||
}
|
||||
|
||||
func causedByNilType(err *Error) bool {
|
||||
cause, ok := err.Details.(typeErrorCause)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return cause.nilType()
|
||||
}
|
||||
|
||||
// ArgErrDetail represents a generic argument error.
|
||||
type ArgErrDetail struct {
|
||||
Have []types.Type `json:"have"`
|
||||
Want []types.Type `json:"want"`
|
||||
}
|
||||
|
||||
// Lines returns the string representation of the detail.
|
||||
func (d *ArgErrDetail) Lines() []string {
|
||||
lines := make([]string, 2)
|
||||
lines[0] = fmt.Sprint("have: ", formatArgs(d.Have))
|
||||
lines[1] = fmt.Sprint("want: ", formatArgs(d.Want))
|
||||
return lines
|
||||
}
|
||||
|
||||
func (d *ArgErrDetail) nilType() bool {
|
||||
for i := range d.Have {
|
||||
if types.Nil(d.Have[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// UnificationErrDetail describes a type mismatch error when two values are
|
||||
// unified (e.g., x = [1,2,y]).
|
||||
type UnificationErrDetail struct {
|
||||
Left types.Type `json:"a"`
|
||||
Right types.Type `json:"b"`
|
||||
}
|
||||
|
||||
func (a *UnificationErrDetail) nilType() bool {
|
||||
return types.Nil(a.Left) || types.Nil(a.Right)
|
||||
}
|
||||
|
||||
// Lines returns the string representation of the detail.
|
||||
func (a *UnificationErrDetail) Lines() []string {
|
||||
lines := make([]string, 2)
|
||||
lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left))
|
||||
lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right))
|
||||
return lines
|
||||
}
|
||||
|
||||
// RefErrUnsupportedDetail describes an undefined reference error where the
|
||||
// referenced value does not support dereferencing (e.g., scalars).
|
||||
type RefErrUnsupportedDetail struct {
|
||||
Ref Ref `json:"ref"` // invalid ref
|
||||
Pos int `json:"pos"` // invalid element
|
||||
Have types.Type `json:"have"` // referenced type
|
||||
}
|
||||
|
||||
// Lines returns the string representation of the detail.
|
||||
func (r *RefErrUnsupportedDetail) Lines() []string {
|
||||
lines := []string{
|
||||
r.Ref.String(),
|
||||
strings.Repeat("^", len(r.Ref[:r.Pos+1].String())),
|
||||
fmt.Sprintf("have: %v", r.Have),
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// RefErrInvalidDetail describes an undefined reference error where the referenced
|
||||
// value does not support the reference operand (e.g., missing object key,
|
||||
// invalid key type, etc.)
|
||||
type RefErrInvalidDetail struct {
|
||||
Ref Ref `json:"ref"` // invalid ref
|
||||
Pos int `json:"pos"` // invalid element
|
||||
Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements)
|
||||
Want types.Type `json:"want"` // allowed type (for non-object values)
|
||||
OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys)
|
||||
}
|
||||
|
||||
// Lines returns the string representation of the detail.
|
||||
func (r *RefErrInvalidDetail) Lines() []string {
|
||||
lines := []string{r.Ref.String()}
|
||||
offset := len(r.Ref[:r.Pos].String()) + 1
|
||||
pad := strings.Repeat(" ", offset)
|
||||
lines = append(lines, fmt.Sprintf("%s^", pad))
|
||||
if r.Have != nil {
|
||||
lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have))
|
||||
} else {
|
||||
lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos]))
|
||||
}
|
||||
if len(r.OneOf) > 0 {
|
||||
lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf))
|
||||
} else {
|
||||
lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want))
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func formatArgs(args []types.Type) string {
|
||||
buf := make([]string, len(args))
|
||||
for i := range args {
|
||||
buf[i] = types.Sprint(args[i])
|
||||
}
|
||||
return "(" + strings.Join(buf, ", ") + ")"
|
||||
}
|
||||
|
||||
func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error {
|
||||
err := newRefError(loc, ref)
|
||||
err.Details = &RefErrInvalidDetail{
|
||||
Ref: ref,
|
||||
Pos: idx,
|
||||
Have: have,
|
||||
Want: want,
|
||||
OneOf: oneOf,
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error {
|
||||
err := newRefError(loc, ref)
|
||||
err.Details = &RefErrUnsupportedDetail{
|
||||
Ref: ref,
|
||||
Pos: idx,
|
||||
Have: have,
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func newRefError(loc *Location, ref Ref) *Error {
|
||||
return NewError(TypeErr, loc, "undefined ref: %v", ref)
|
||||
}
|
||||
|
||||
func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want []types.Type) *Error {
|
||||
err := NewError(TypeErr, loc, "%v: %v", builtinName, msg)
|
||||
err.Details = &ArgErrDetail{
|
||||
Have: have,
|
||||
Want: want,
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getOneOfForNode(node *typeTreeNode) (result []Value) {
|
||||
node.Children().Iter(func(k, _ util.T) bool {
|
||||
result = append(result, k.(Value))
|
||||
return false
|
||||
})
|
||||
|
||||
sortValueSlice(result)
|
||||
return result
|
||||
}
|
||||
|
||||
func getOneOfForType(tpe types.Type) (result []Value) {
|
||||
switch tpe := tpe.(type) {
|
||||
case *types.Object:
|
||||
for _, k := range tpe.Keys() {
|
||||
v, err := InterfaceToValue(k)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
sortValueSlice(result)
|
||||
return result
|
||||
}
|
||||
|
||||
func sortValueSlice(sl []Value) {
|
||||
sort.Slice(sl, func(i, j int) bool {
|
||||
return sl[i].Compare(sl[j]) < 0
|
||||
})
|
||||
}
|
||||
|
||||
func getArgTypes(env *TypeEnv, args []*Term) []types.Type {
|
||||
pre := make([]types.Type, len(args))
|
||||
for i := range args {
|
||||
pre[i] = env.Get(args[i])
|
||||
}
|
||||
return pre
|
||||
}
|
||||
327
vendor/github.com/open-policy-agent/opa/ast/compare.go
generated
vendored
Normal file
327
vendor/github.com/open-policy-agent/opa/ast/compare.go
generated
vendored
Normal file
@@ -0,0 +1,327 @@
|
||||
// Copyright 2016 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Compare returns an integer indicating whether two AST values are less than,
|
||||
// equal to, or greater than each other.
|
||||
//
|
||||
// If a is less than b, the return value is negative. If a is greater than b,
|
||||
// the return value is positive. If a is equal to b, the return value is zero.
|
||||
//
|
||||
// Different types are never equal to each other. For comparison purposes, types
|
||||
// are sorted as follows:
|
||||
//
|
||||
// nil < Null < Boolean < Number < String < Var < Ref < Array < Object < Set <
|
||||
// ArrayComprehension < ObjectComprehension < SetComprehension < Expr < SomeDecl
|
||||
// < With < Body < Rule < Import < Package < Module.
|
||||
//
|
||||
// Arrays and Refs are equal iff both a and b have the same length and all
|
||||
// corresponding elements are equal. If one element is not equal, the return
|
||||
// value is the same as for the first differing element. If all elements are
|
||||
// equal but a and b have different lengths, the shorter is considered less than
|
||||
// the other.
|
||||
//
|
||||
// Objects are considered equal iff both a and b have the same sorted (key,
|
||||
// value) pairs and are of the same length. Other comparisons are consistent but
|
||||
// not defined.
|
||||
//
|
||||
// Sets are considered equal iff the symmetric difference of a and b is empty.
|
||||
// Other comparisons are consistent but not defined.
|
||||
func Compare(a, b interface{}) int {
|
||||
|
||||
if t, ok := a.(*Term); ok {
|
||||
if t == nil {
|
||||
a = nil
|
||||
} else {
|
||||
a = t.Value
|
||||
}
|
||||
}
|
||||
|
||||
if t, ok := b.(*Term); ok {
|
||||
if t == nil {
|
||||
b = nil
|
||||
} else {
|
||||
b = t.Value
|
||||
}
|
||||
}
|
||||
|
||||
if a == nil {
|
||||
if b == nil {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if b == nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
sortA := sortOrder(a)
|
||||
sortB := sortOrder(b)
|
||||
|
||||
if sortA < sortB {
|
||||
return -1
|
||||
} else if sortB < sortA {
|
||||
return 1
|
||||
}
|
||||
|
||||
switch a := a.(type) {
|
||||
case Null:
|
||||
return 0
|
||||
case Boolean:
|
||||
b := b.(Boolean)
|
||||
if a.Equal(b) {
|
||||
return 0
|
||||
}
|
||||
if !a {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
case Number:
|
||||
if ai, err := json.Number(a).Int64(); err == nil {
|
||||
if bi, err := json.Number(b.(Number)).Int64(); err == nil {
|
||||
if ai == bi {
|
||||
return 0
|
||||
}
|
||||
if ai < bi {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
bigA, ok := new(big.Float).SetString(string(a))
|
||||
if !ok {
|
||||
panic("illegal value")
|
||||
}
|
||||
bigB, ok := new(big.Float).SetString(string(b.(Number)))
|
||||
if !ok {
|
||||
panic("illegal value")
|
||||
}
|
||||
return bigA.Cmp(bigB)
|
||||
case String:
|
||||
b := b.(String)
|
||||
if a.Equal(b) {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
case Var:
|
||||
b := b.(Var)
|
||||
if a.Equal(b) {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
case Ref:
|
||||
b := b.(Ref)
|
||||
return termSliceCompare(a, b)
|
||||
case Array:
|
||||
b := b.(Array)
|
||||
return termSliceCompare(a, b)
|
||||
case Object:
|
||||
b := b.(Object)
|
||||
return a.Compare(b)
|
||||
case Set:
|
||||
b := b.(Set)
|
||||
return a.Compare(b)
|
||||
case *ArrayComprehension:
|
||||
b := b.(*ArrayComprehension)
|
||||
if cmp := Compare(a.Term, b.Term); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return Compare(a.Body, b.Body)
|
||||
case *ObjectComprehension:
|
||||
b := b.(*ObjectComprehension)
|
||||
if cmp := Compare(a.Key, b.Key); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
if cmp := Compare(a.Value, b.Value); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return Compare(a.Body, b.Body)
|
||||
case *SetComprehension:
|
||||
b := b.(*SetComprehension)
|
||||
if cmp := Compare(a.Term, b.Term); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return Compare(a.Body, b.Body)
|
||||
case Call:
|
||||
b := b.(Call)
|
||||
return termSliceCompare(a, b)
|
||||
case *Expr:
|
||||
b := b.(*Expr)
|
||||
return a.Compare(b)
|
||||
case *SomeDecl:
|
||||
b := b.(*SomeDecl)
|
||||
return a.Compare(b)
|
||||
case *With:
|
||||
b := b.(*With)
|
||||
return a.Compare(b)
|
||||
case Body:
|
||||
b := b.(Body)
|
||||
return a.Compare(b)
|
||||
case *Head:
|
||||
b := b.(*Head)
|
||||
return a.Compare(b)
|
||||
case *Rule:
|
||||
b := b.(*Rule)
|
||||
return a.Compare(b)
|
||||
case Args:
|
||||
b := b.(Args)
|
||||
return termSliceCompare(a, b)
|
||||
case *Import:
|
||||
b := b.(*Import)
|
||||
return a.Compare(b)
|
||||
case *Package:
|
||||
b := b.(*Package)
|
||||
return a.Compare(b)
|
||||
case *Module:
|
||||
b := b.(*Module)
|
||||
return a.Compare(b)
|
||||
}
|
||||
panic(fmt.Sprintf("illegal value: %T", a))
|
||||
}
|
||||
|
||||
type termSlice []*Term
|
||||
|
||||
func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 }
|
||||
func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
|
||||
func (s termSlice) Len() int { return len(s) }
|
||||
|
||||
func sortOrder(x interface{}) int {
|
||||
switch x.(type) {
|
||||
case Null:
|
||||
return 0
|
||||
case Boolean:
|
||||
return 1
|
||||
case Number:
|
||||
return 2
|
||||
case String:
|
||||
return 3
|
||||
case Var:
|
||||
return 4
|
||||
case Ref:
|
||||
return 5
|
||||
case Array:
|
||||
return 6
|
||||
case Object:
|
||||
return 7
|
||||
case Set:
|
||||
return 8
|
||||
case *ArrayComprehension:
|
||||
return 9
|
||||
case *ObjectComprehension:
|
||||
return 10
|
||||
case *SetComprehension:
|
||||
return 11
|
||||
case Call:
|
||||
return 12
|
||||
case Args:
|
||||
return 13
|
||||
case *Expr:
|
||||
return 100
|
||||
case *SomeDecl:
|
||||
return 101
|
||||
case *With:
|
||||
return 110
|
||||
case *Head:
|
||||
return 120
|
||||
case Body:
|
||||
return 200
|
||||
case *Rule:
|
||||
return 1000
|
||||
case *Import:
|
||||
return 1001
|
||||
case *Package:
|
||||
return 1002
|
||||
case *Module:
|
||||
return 10000
|
||||
}
|
||||
panic(fmt.Sprintf("illegal value: %T", x))
|
||||
}
|
||||
|
||||
func importsCompare(a, b []*Import) int {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
if len(b) < len(a) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func rulesCompare(a, b []*Rule) int {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
if len(b) < len(a) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func termSliceCompare(a, b []*Term) int {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := Compare(a[i], b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
} else if len(b) < len(a) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func withSliceCompare(a, b []*With) int {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := Compare(a[i], b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
} else if len(b) < len(a) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
3419
vendor/github.com/open-policy-agent/opa/ast/compile.go
generated
vendored
Normal file
3419
vendor/github.com/open-policy-agent/opa/ast/compile.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
42
vendor/github.com/open-policy-agent/opa/ast/compilehelper.go
generated
vendored
Normal file
42
vendor/github.com/open-policy-agent/opa/ast/compilehelper.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2016 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
// CompileModules takes a set of Rego modules represented as strings and
|
||||
// compiles them for evaluation. The keys of the map are used as filenames.
|
||||
func CompileModules(modules map[string]string) (*Compiler, error) {
|
||||
|
||||
parsed := make(map[string]*Module, len(modules))
|
||||
|
||||
for f, module := range modules {
|
||||
var pm *Module
|
||||
var err error
|
||||
if pm, err = ParseModule(f, module); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parsed[f] = pm
|
||||
}
|
||||
|
||||
compiler := NewCompiler()
|
||||
compiler.Compile(parsed)
|
||||
|
||||
if compiler.Failed() {
|
||||
return nil, compiler.Errors
|
||||
}
|
||||
|
||||
return compiler, nil
|
||||
}
|
||||
|
||||
// MustCompileModules compiles a set of Rego modules represented as strings. If
|
||||
// the compilation process fails, this function panics.
|
||||
func MustCompileModules(modules map[string]string) *Compiler {
|
||||
|
||||
compiler, err := CompileModules(modules)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return compiler
|
||||
}
|
||||
48
vendor/github.com/open-policy-agent/opa/ast/conflicts.go
generated
vendored
Normal file
48
vendor/github.com/open-policy-agent/opa/ast/conflicts.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright 2019 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CheckPathConflicts returns a set of errors indicating paths that
|
||||
// are in conflict with the result of the provided callable.
|
||||
func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors {
|
||||
var errs Errors
|
||||
|
||||
root := c.RuleTree.Child(DefaultRootDocument.Value)
|
||||
if root == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, node := range root.Children {
|
||||
errs = append(errs, checkDocumentConflicts(node, exists, nil)...)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors {
|
||||
|
||||
path = append(path, string(node.Key.(String)))
|
||||
|
||||
if len(node.Values) > 0 {
|
||||
s := strings.Join(path, "/")
|
||||
if ok, err := exists(path); err != nil {
|
||||
return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())}
|
||||
} else if ok {
|
||||
return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)}
|
||||
}
|
||||
}
|
||||
|
||||
var errs Errors
|
||||
|
||||
for _, child := range node.Children {
|
||||
errs = append(errs, checkDocumentConflicts(child, exists, path)...)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
36
vendor/github.com/open-policy-agent/opa/ast/doc.go
generated
vendored
Normal file
36
vendor/github.com/open-policy-agent/opa/ast/doc.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2016 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine.
|
||||
//
|
||||
// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc.
|
||||
//
|
||||
// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows:
|
||||
//
|
||||
// Module
|
||||
// |
|
||||
// +--- Package (Reference)
|
||||
// |
|
||||
// +--- Imports
|
||||
// | |
|
||||
// | +--- Import (Term)
|
||||
// |
|
||||
// +--- Rules
|
||||
// |
|
||||
// +--- Rule
|
||||
// |
|
||||
// +--- Head
|
||||
// | |
|
||||
// | +--- Name (Variable)
|
||||
// | |
|
||||
// | +--- Key (Term)
|
||||
// | |
|
||||
// | +--- Value (Term)
|
||||
// |
|
||||
// +--- Body
|
||||
// |
|
||||
// +--- Expression (Term | Terms | Variable Declaration)
|
||||
//
|
||||
// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports.
|
||||
package ast
|
||||
323
vendor/github.com/open-policy-agent/opa/ast/env.go
generated
vendored
Normal file
323
vendor/github.com/open-policy-agent/opa/ast/env.go
generated
vendored
Normal file
@@ -0,0 +1,323 @@
|
||||
// Copyright 2017 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"github.com/open-policy-agent/opa/types"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
)
|
||||
|
||||
// TypeEnv contains type info for static analysis such as type checking.
|
||||
type TypeEnv struct {
|
||||
tree *typeTreeNode
|
||||
next *TypeEnv
|
||||
}
|
||||
|
||||
// NewTypeEnv returns an empty TypeEnv.
|
||||
func NewTypeEnv() *TypeEnv {
|
||||
return &TypeEnv{
|
||||
tree: newTypeTree(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the type of x.
|
||||
func (env *TypeEnv) Get(x interface{}) types.Type {
|
||||
|
||||
if term, ok := x.(*Term); ok {
|
||||
x = term.Value
|
||||
}
|
||||
|
||||
switch x := x.(type) {
|
||||
|
||||
// Scalars.
|
||||
case Null:
|
||||
return types.NewNull()
|
||||
case Boolean:
|
||||
return types.NewBoolean()
|
||||
case Number:
|
||||
return types.NewNumber()
|
||||
case String:
|
||||
return types.NewString()
|
||||
|
||||
// Composites.
|
||||
case Array:
|
||||
static := make([]types.Type, len(x))
|
||||
for i := range static {
|
||||
tpe := env.Get(x[i].Value)
|
||||
static[i] = tpe
|
||||
}
|
||||
|
||||
var dynamic types.Type
|
||||
if len(static) == 0 {
|
||||
dynamic = types.A
|
||||
}
|
||||
|
||||
return types.NewArray(static, dynamic)
|
||||
|
||||
case Object:
|
||||
static := []*types.StaticProperty{}
|
||||
var dynamic *types.DynamicProperty
|
||||
|
||||
x.Foreach(func(k, v *Term) {
|
||||
if IsConstant(k.Value) {
|
||||
kjson, err := JSON(k.Value)
|
||||
if err == nil {
|
||||
tpe := env.Get(v)
|
||||
static = append(static, types.NewStaticProperty(kjson, tpe))
|
||||
return
|
||||
}
|
||||
}
|
||||
// Can't handle it as a static property, fallback to dynamic
|
||||
typeK := env.Get(k.Value)
|
||||
typeV := env.Get(v.Value)
|
||||
dynamic = types.NewDynamicProperty(typeK, typeV)
|
||||
})
|
||||
|
||||
if len(static) == 0 && dynamic == nil {
|
||||
dynamic = types.NewDynamicProperty(types.A, types.A)
|
||||
}
|
||||
|
||||
return types.NewObject(static, dynamic)
|
||||
|
||||
case Set:
|
||||
var tpe types.Type
|
||||
x.Foreach(func(elem *Term) {
|
||||
other := env.Get(elem.Value)
|
||||
tpe = types.Or(tpe, other)
|
||||
})
|
||||
if tpe == nil {
|
||||
tpe = types.A
|
||||
}
|
||||
return types.NewSet(tpe)
|
||||
|
||||
// Comprehensions.
|
||||
case *ArrayComprehension:
|
||||
checker := newTypeChecker()
|
||||
cpy, errs := checker.CheckBody(env, x.Body)
|
||||
if len(errs) == 0 {
|
||||
return types.NewArray(nil, cpy.Get(x.Term))
|
||||
}
|
||||
return nil
|
||||
case *ObjectComprehension:
|
||||
checker := newTypeChecker()
|
||||
cpy, errs := checker.CheckBody(env, x.Body)
|
||||
if len(errs) == 0 {
|
||||
return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value)))
|
||||
}
|
||||
return nil
|
||||
case *SetComprehension:
|
||||
checker := newTypeChecker()
|
||||
cpy, errs := checker.CheckBody(env, x.Body)
|
||||
if len(errs) == 0 {
|
||||
return types.NewSet(cpy.Get(x.Term))
|
||||
}
|
||||
return nil
|
||||
|
||||
// Refs.
|
||||
case Ref:
|
||||
return env.getRef(x)
|
||||
|
||||
// Vars.
|
||||
case Var:
|
||||
if node := env.tree.Child(x); node != nil {
|
||||
return node.Value()
|
||||
}
|
||||
if env.next != nil {
|
||||
return env.next.Get(x)
|
||||
}
|
||||
return nil
|
||||
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func (env *TypeEnv) getRef(ref Ref) types.Type {
|
||||
|
||||
node := env.tree.Child(ref[0].Value)
|
||||
if node == nil {
|
||||
return env.getRefFallback(ref)
|
||||
}
|
||||
|
||||
return env.getRefRec(node, ref, ref[1:])
|
||||
}
|
||||
|
||||
func (env *TypeEnv) getRefFallback(ref Ref) types.Type {
|
||||
|
||||
if env.next != nil {
|
||||
return env.next.Get(ref)
|
||||
}
|
||||
|
||||
if RootDocumentNames.Contains(ref[0]) {
|
||||
return types.A
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type {
|
||||
if len(tail) == 0 {
|
||||
return env.getRefRecExtent(node)
|
||||
}
|
||||
|
||||
if node.Leaf() {
|
||||
return selectRef(node.Value(), tail)
|
||||
}
|
||||
|
||||
if !IsConstant(tail[0].Value) {
|
||||
return selectRef(env.getRefRecExtent(node), tail)
|
||||
}
|
||||
|
||||
child := node.Child(tail[0].Value)
|
||||
if child == nil {
|
||||
return env.getRefFallback(ref)
|
||||
}
|
||||
|
||||
return env.getRefRec(child, ref, tail[1:])
|
||||
}
|
||||
|
||||
func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type {
|
||||
|
||||
if node.Leaf() {
|
||||
return node.Value()
|
||||
}
|
||||
|
||||
children := []*types.StaticProperty{}
|
||||
|
||||
node.Children().Iter(func(k, v util.T) bool {
|
||||
key := k.(Value)
|
||||
child := v.(*typeTreeNode)
|
||||
|
||||
tpe := env.getRefRecExtent(child)
|
||||
// TODO(tsandall): handle non-string keys?
|
||||
if s, ok := key.(String); ok {
|
||||
children = append(children, types.NewStaticProperty(string(s), tpe))
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
// TODO(tsandall): for now, these objects can have any dynamic properties
|
||||
// because we don't have schema for base docs. Once schemas are supported
|
||||
// we can improve this.
|
||||
return types.NewObject(children, types.NewDynamicProperty(types.S, types.A))
|
||||
}
|
||||
|
||||
func (env *TypeEnv) wrap() *TypeEnv {
|
||||
cpy := *env
|
||||
cpy.next = env
|
||||
cpy.tree = newTypeTree()
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// typeTreeNode is used to store type information in a tree.
|
||||
type typeTreeNode struct {
|
||||
key Value
|
||||
value types.Type
|
||||
children *util.HashMap
|
||||
}
|
||||
|
||||
func newTypeTree() *typeTreeNode {
|
||||
return &typeTreeNode{
|
||||
key: nil,
|
||||
value: nil,
|
||||
children: util.NewHashMap(valueEq, valueHash),
|
||||
}
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Child(key Value) *typeTreeNode {
|
||||
value, ok := n.children.Get(key)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return value.(*typeTreeNode)
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Children() *util.HashMap {
|
||||
return n.children
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Get(path Ref) types.Type {
|
||||
curr := n
|
||||
for _, term := range path {
|
||||
child, ok := curr.children.Get(term.Value)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
curr = child.(*typeTreeNode)
|
||||
}
|
||||
return curr.Value()
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Leaf() bool {
|
||||
return n.value != nil
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) PutOne(key Value, tpe types.Type) {
|
||||
c, ok := n.children.Get(key)
|
||||
|
||||
var child *typeTreeNode
|
||||
if !ok {
|
||||
child = newTypeTree()
|
||||
child.key = key
|
||||
n.children.Put(key, child)
|
||||
} else {
|
||||
child = c.(*typeTreeNode)
|
||||
}
|
||||
|
||||
child.value = tpe
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Put(path Ref, tpe types.Type) {
|
||||
curr := n
|
||||
for _, term := range path {
|
||||
c, ok := curr.children.Get(term.Value)
|
||||
|
||||
var child *typeTreeNode
|
||||
if !ok {
|
||||
child = newTypeTree()
|
||||
child.key = term.Value
|
||||
curr.children.Put(child.key, child)
|
||||
} else {
|
||||
child = c.(*typeTreeNode)
|
||||
}
|
||||
|
||||
curr = child
|
||||
}
|
||||
curr.value = tpe
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Value() types.Type {
|
||||
return n.value
|
||||
}
|
||||
|
||||
// selectConstant returns the attribute of the type referred to by the term. If
|
||||
// the attribute type cannot be determined, nil is returned.
|
||||
func selectConstant(tpe types.Type, term *Term) types.Type {
|
||||
x, err := JSON(term.Value)
|
||||
if err == nil {
|
||||
return types.Select(tpe, x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// selectRef returns the type of the nested attribute referred to by ref. If
|
||||
// the attribute type cannot be determined, nil is returned. If the ref
|
||||
// contains vars or refs, then the returned type will be a union of the
|
||||
// possible types.
|
||||
func selectRef(tpe types.Type, ref Ref) types.Type {
|
||||
|
||||
if tpe == nil || len(ref) == 0 {
|
||||
return tpe
|
||||
}
|
||||
|
||||
head, tail := ref[0], ref[1:]
|
||||
|
||||
switch head.Value.(type) {
|
||||
case Var, Ref, Array, Object, Set:
|
||||
return selectRef(types.Values(tpe), tail)
|
||||
default:
|
||||
return selectRef(selectConstant(tpe, head), tail)
|
||||
}
|
||||
}
|
||||
133
vendor/github.com/open-policy-agent/opa/ast/errors.go
generated
vendored
Normal file
133
vendor/github.com/open-policy-agent/opa/ast/errors.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright 2016 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Errors represents a series of errors encountered during parsing, compiling,
|
||||
// etc.
|
||||
type Errors []*Error
|
||||
|
||||
func (e Errors) Error() string {
|
||||
|
||||
if len(e) == 0 {
|
||||
return "no error(s)"
|
||||
}
|
||||
|
||||
if len(e) == 1 {
|
||||
return fmt.Sprintf("1 error occurred: %v", e[0].Error())
|
||||
}
|
||||
|
||||
s := []string{}
|
||||
for _, err := range e {
|
||||
s = append(s, err.Error())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n"))
|
||||
}
|
||||
|
||||
// Sort sorts the error slice by location. If the locations are equal then the
|
||||
// error message is compared.
|
||||
func (e Errors) Sort() {
|
||||
sort.Slice(e, func(i, j int) bool {
|
||||
a := e[i]
|
||||
b := e[j]
|
||||
|
||||
if cmp := a.Location.Compare(b.Location); cmp != 0 {
|
||||
return cmp < 0
|
||||
}
|
||||
|
||||
return a.Error() < b.Error()
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
// ParseErr indicates an unclassified parse error occurred.
|
||||
ParseErr = "rego_parse_error"
|
||||
|
||||
// CompileErr indicates an unclassified compile error occurred.
|
||||
CompileErr = "rego_compile_error"
|
||||
|
||||
// TypeErr indicates a type error was caught.
|
||||
TypeErr = "rego_type_error"
|
||||
|
||||
// UnsafeVarErr indicates an unsafe variable was found during compilation.
|
||||
UnsafeVarErr = "rego_unsafe_var_error"
|
||||
|
||||
// RecursionErr indicates recursion was found during compilation.
|
||||
RecursionErr = "rego_recursion_error"
|
||||
)
|
||||
|
||||
// IsError returns true if err is an AST error with code.
|
||||
func IsError(code string, err error) bool {
|
||||
if err, ok := err.(*Error); ok {
|
||||
return err.Code == code
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ErrorDetails defines the interface for detailed error messages.
|
||||
type ErrorDetails interface {
|
||||
Lines() []string
|
||||
}
|
||||
|
||||
// Error represents a single error caught during parsing, compiling, etc.
|
||||
type Error struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Location *Location `json:"location,omitempty"`
|
||||
Details ErrorDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
|
||||
var prefix string
|
||||
|
||||
if e.Location != nil {
|
||||
|
||||
if len(e.Location.File) > 0 {
|
||||
prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row)
|
||||
} else {
|
||||
prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col)
|
||||
}
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("%v: %v", e.Code, e.Message)
|
||||
|
||||
if len(prefix) > 0 {
|
||||
msg = prefix + ": " + msg
|
||||
}
|
||||
|
||||
if e.Details != nil {
|
||||
for _, line := range e.Details.Lines() {
|
||||
msg += "\n\t" + line
|
||||
}
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
// NewError returns a new Error object.
|
||||
func NewError(code string, loc *Location, f string, a ...interface{}) *Error {
|
||||
return &Error{
|
||||
Code: code,
|
||||
Location: loc,
|
||||
Message: fmt.Sprintf(f, a...),
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errPartialRuleAssignOperator = fmt.Errorf("partial rules must use = operator (not := operator)")
|
||||
errElseAssignOperator = fmt.Errorf("else keyword cannot be used on rule declared with := operator")
|
||||
errFunctionAssignOperator = fmt.Errorf("functions must use = operator (not := operator)")
|
||||
)
|
||||
|
||||
func errTermAssignOperator(x interface{}) error {
|
||||
return fmt.Errorf("cannot assign to %v", TypeName(x))
|
||||
}
|
||||
28
vendor/github.com/open-policy-agent/opa/ast/fuzz.go
generated
vendored
Normal file
28
vendor/github.com/open-policy-agent/opa/ast/fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// +build gofuzz
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// nested { and [ tokens cause the parse time to explode.
|
||||
// see: https://github.com/mna/pigeon/issues/75
|
||||
var blacklistRegexp = regexp.MustCompile(`[{(\[]{5,}`)
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
|
||||
if blacklistRegexp.Match(data) {
|
||||
return -1
|
||||
}
|
||||
|
||||
str := string(data)
|
||||
_, _, err := ParseStatements("", str)
|
||||
|
||||
if err == nil {
|
||||
CompileModules(map[string]string{"": str})
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
798
vendor/github.com/open-policy-agent/opa/ast/index.go
generated
vendored
Normal file
798
vendor/github.com/open-policy-agent/opa/ast/index.go
generated
vendored
Normal file
@@ -0,0 +1,798 @@
|
||||
// Copyright 2017 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
)
|
||||
|
||||
// RuleIndex defines the interface for rule indices.
|
||||
type RuleIndex interface {
|
||||
|
||||
// Build tries to construct an index for the given rules. If the index was
|
||||
// constructed, ok is true, otherwise false.
|
||||
Build(rules []*Rule) (ok bool)
|
||||
|
||||
// Lookup searches the index for rules that will match the provided
|
||||
// resolver. If the resolver returns an error, it is returned via err.
|
||||
Lookup(resolver ValueResolver) (result *IndexResult, err error)
|
||||
|
||||
// AllRules traverses the index and returns all rules that will match
|
||||
// the provided resolver without any optimizations (effectively with
|
||||
// indexing disabled). If the resolver returns an error, it is returned
|
||||
// via err.
|
||||
AllRules(resolver ValueResolver) (result *IndexResult, err error)
|
||||
}
|
||||
|
||||
// IndexResult contains the result of an index lookup.
|
||||
type IndexResult struct {
|
||||
Kind DocKind
|
||||
Rules []*Rule
|
||||
Else map[*Rule][]*Rule
|
||||
Default *Rule
|
||||
}
|
||||
|
||||
// NewIndexResult returns a new IndexResult object.
|
||||
func NewIndexResult(kind DocKind) *IndexResult {
|
||||
return &IndexResult{
|
||||
Kind: kind,
|
||||
Else: map[*Rule][]*Rule{},
|
||||
}
|
||||
}
|
||||
|
||||
// Empty returns true if there are no rules to evaluate.
|
||||
func (ir *IndexResult) Empty() bool {
|
||||
return len(ir.Rules) == 0 && ir.Default == nil
|
||||
}
|
||||
|
||||
type baseDocEqIndex struct {
|
||||
isVirtual func(Ref) bool
|
||||
root *trieNode
|
||||
defaultRule *Rule
|
||||
kind DocKind
|
||||
}
|
||||
|
||||
func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex {
|
||||
return &baseDocEqIndex{
|
||||
isVirtual: isVirtual,
|
||||
root: newTrieNodeImpl(),
|
||||
}
|
||||
}
|
||||
|
||||
func (i *baseDocEqIndex) Build(rules []*Rule) bool {
|
||||
if len(rules) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
i.kind = rules[0].Head.DocKind()
|
||||
indices := newrefindices(i.isVirtual)
|
||||
|
||||
// build indices for each rule.
|
||||
for idx := range rules {
|
||||
WalkRules(rules[idx], func(rule *Rule) bool {
|
||||
if rule.Default {
|
||||
i.defaultRule = rule
|
||||
return false
|
||||
}
|
||||
for _, expr := range rule.Body {
|
||||
indices.Update(rule, expr)
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
// build trie out of indices.
|
||||
for idx := range rules {
|
||||
var prio int
|
||||
WalkRules(rules[idx], func(rule *Rule) bool {
|
||||
if rule.Default {
|
||||
return false
|
||||
}
|
||||
node := i.root
|
||||
if indices.Indexed(rule) {
|
||||
for _, ref := range indices.Sorted() {
|
||||
node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref))
|
||||
}
|
||||
}
|
||||
// Insert rule into trie with (insertion order, priority order)
|
||||
// tuple. Retaining the insertion order allows us to return rules
|
||||
// in the order they were passed to this function.
|
||||
node.rules = append(node.rules, &ruleNode{[...]int{idx, prio}, rule})
|
||||
prio++
|
||||
return false
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
|
||||
|
||||
tr := newTrieTraversalResult()
|
||||
|
||||
err := i.root.Traverse(resolver, tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := NewIndexResult(i.kind)
|
||||
result.Default = i.defaultRule
|
||||
result.Rules = make([]*Rule, 0, len(tr.ordering))
|
||||
|
||||
for _, pos := range tr.ordering {
|
||||
sort.Slice(tr.unordered[pos], func(i, j int) bool {
|
||||
return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
|
||||
})
|
||||
nodes := tr.unordered[pos]
|
||||
root := nodes[0].rule
|
||||
result.Rules = append(result.Rules, root)
|
||||
if len(nodes) > 1 {
|
||||
result.Else[root] = make([]*Rule, len(nodes)-1)
|
||||
for i := 1; i < len(nodes); i++ {
|
||||
result.Else[root][i-1] = nodes[i].rule
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (i *baseDocEqIndex) AllRules(resolver ValueResolver) (*IndexResult, error) {
|
||||
tr := newTrieTraversalResult()
|
||||
|
||||
// Walk over the rule trie and accumulate _all_ rules
|
||||
rw := &ruleWalker{result: tr}
|
||||
i.root.Do(rw)
|
||||
|
||||
result := NewIndexResult(i.kind)
|
||||
result.Default = i.defaultRule
|
||||
result.Rules = make([]*Rule, 0, len(tr.ordering))
|
||||
|
||||
for _, pos := range tr.ordering {
|
||||
sort.Slice(tr.unordered[pos], func(i, j int) bool {
|
||||
return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
|
||||
})
|
||||
nodes := tr.unordered[pos]
|
||||
root := nodes[0].rule
|
||||
result.Rules = append(result.Rules, root)
|
||||
if len(nodes) > 1 {
|
||||
result.Else[root] = make([]*Rule, len(nodes)-1)
|
||||
for i := 1; i < len(nodes); i++ {
|
||||
result.Else[root][i-1] = nodes[i].rule
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type ruleWalker struct {
|
||||
result *trieTraversalResult
|
||||
}
|
||||
|
||||
func (r *ruleWalker) Do(x interface{}) trieWalker {
|
||||
tn := x.(*trieNode)
|
||||
for _, rn := range tn.rules {
|
||||
r.result.Add(rn)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
type valueMapper func(Value) Value
|
||||
|
||||
type refindex struct {
|
||||
Ref Ref
|
||||
Value Value
|
||||
Mapper func(Value) Value
|
||||
}
|
||||
|
||||
type refindices struct {
|
||||
isVirtual func(Ref) bool
|
||||
rules map[*Rule][]*refindex
|
||||
frequency *util.HashMap
|
||||
sorted []Ref
|
||||
}
|
||||
|
||||
func newrefindices(isVirtual func(Ref) bool) *refindices {
|
||||
return &refindices{
|
||||
isVirtual: isVirtual,
|
||||
rules: map[*Rule][]*refindex{},
|
||||
frequency: util.NewHashMap(func(a, b util.T) bool {
|
||||
r1, r2 := a.(Ref), b.(Ref)
|
||||
return r1.Equal(r2)
|
||||
}, func(x util.T) int {
|
||||
return x.(Ref).Hash()
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Update attempts to update the refindices for the given expression in the
|
||||
// given rule. If the expression cannot be indexed the update does not affect
|
||||
// the indices.
|
||||
func (i *refindices) Update(rule *Rule, expr *Expr) {
|
||||
|
||||
if expr.Negated {
|
||||
return
|
||||
}
|
||||
|
||||
if len(expr.With) > 0 {
|
||||
// NOTE(tsandall): In the future, we may need to consider expressions
|
||||
// that have with statements applied to them.
|
||||
return
|
||||
}
|
||||
|
||||
op := expr.Operator()
|
||||
|
||||
if op.Equal(Equality.Ref()) || op.Equal(Equal.Ref()) {
|
||||
|
||||
i.updateEq(rule, expr)
|
||||
|
||||
} else if op.Equal(GlobMatch.Ref()) {
|
||||
|
||||
i.updateGlobMatch(rule, expr)
|
||||
}
|
||||
}
|
||||
|
||||
// Sorted returns a sorted list of references that the indices were built from.
|
||||
// References that appear more frequently in the indexed rules are ordered
|
||||
// before less frequently appearing references.
|
||||
func (i *refindices) Sorted() []Ref {
|
||||
|
||||
if i.sorted == nil {
|
||||
counts := make([]int, 0, i.frequency.Len())
|
||||
i.sorted = make([]Ref, 0, i.frequency.Len())
|
||||
|
||||
i.frequency.Iter(func(k, v util.T) bool {
|
||||
counts = append(counts, v.(int))
|
||||
i.sorted = append(i.sorted, k.(Ref))
|
||||
return false
|
||||
})
|
||||
|
||||
sort.Slice(i.sorted, func(i, j int) bool {
|
||||
return counts[i] > counts[j]
|
||||
})
|
||||
}
|
||||
|
||||
return i.sorted
|
||||
}
|
||||
|
||||
func (i *refindices) Indexed(rule *Rule) bool {
|
||||
return len(i.rules[rule]) > 0
|
||||
}
|
||||
|
||||
func (i *refindices) Value(rule *Rule, ref Ref) Value {
|
||||
if index := i.index(rule, ref); index != nil {
|
||||
return index.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *refindices) Mapper(rule *Rule, ref Ref) valueMapper {
|
||||
if index := i.index(rule, ref); index != nil {
|
||||
return index.Mapper
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *refindices) updateEq(rule *Rule, expr *Expr) {
|
||||
a, b := expr.Operand(0), expr.Operand(1)
|
||||
if ref, value, ok := eqOperandsToRefAndValue(i.isVirtual, a, b); ok {
|
||||
i.insert(rule, &refindex{
|
||||
Ref: ref,
|
||||
Value: value,
|
||||
})
|
||||
} else if ref, value, ok := eqOperandsToRefAndValue(i.isVirtual, b, a); ok {
|
||||
i.insert(rule, &refindex{
|
||||
Ref: ref,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) {
|
||||
|
||||
delim, ok := globDelimiterToString(expr.Operand(1))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if arr := globPatternToArray(expr.Operand(0), delim); arr != nil {
|
||||
// The 3rd operand of glob.match is the value to match. We assume the
|
||||
// 3rd operand was a reference that has been rewritten and bound to a
|
||||
// variable earlier in the query.
|
||||
match := expr.Operand(2)
|
||||
if _, ok := match.Value.(Var); ok {
|
||||
for _, other := range i.rules[rule] {
|
||||
if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 {
|
||||
i.insert(rule, &refindex{
|
||||
Ref: other.Ref,
|
||||
Value: arr.Value,
|
||||
Mapper: func(v Value) Value {
|
||||
if s, ok := v.(String); ok {
|
||||
return stringSliceToArray(splitStringEscaped(string(s), delim))
|
||||
}
|
||||
return v
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *refindices) insert(rule *Rule, index *refindex) {
|
||||
|
||||
count, ok := i.frequency.Get(index.Ref)
|
||||
if !ok {
|
||||
count = 0
|
||||
}
|
||||
|
||||
i.frequency.Put(index.Ref, count.(int)+1)
|
||||
|
||||
for pos, other := range i.rules[rule] {
|
||||
if other.Ref.Equal(index.Ref) {
|
||||
i.rules[rule][pos] = index
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i.rules[rule] = append(i.rules[rule], index)
|
||||
}
|
||||
|
||||
func (i *refindices) index(rule *Rule, ref Ref) *refindex {
|
||||
for _, index := range i.rules[rule] {
|
||||
if index.Ref.Equal(ref) {
|
||||
return index
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type trieWalker interface {
|
||||
Do(x interface{}) trieWalker
|
||||
}
|
||||
|
||||
type trieTraversalResult struct {
|
||||
unordered map[int][]*ruleNode
|
||||
ordering []int
|
||||
}
|
||||
|
||||
func newTrieTraversalResult() *trieTraversalResult {
|
||||
return &trieTraversalResult{
|
||||
unordered: map[int][]*ruleNode{},
|
||||
}
|
||||
}
|
||||
|
||||
func (tr *trieTraversalResult) Add(node *ruleNode) {
|
||||
root := node.prio[0]
|
||||
nodes, ok := tr.unordered[root]
|
||||
if !ok {
|
||||
tr.ordering = append(tr.ordering, root)
|
||||
}
|
||||
tr.unordered[root] = append(nodes, node)
|
||||
}
|
||||
|
||||
type trieNode struct {
|
||||
ref Ref
|
||||
mapper valueMapper
|
||||
next *trieNode
|
||||
any *trieNode
|
||||
undefined *trieNode
|
||||
scalars map[Value]*trieNode
|
||||
array *trieNode
|
||||
rules []*ruleNode
|
||||
}
|
||||
|
||||
func (node *trieNode) String() string {
|
||||
var flags []string
|
||||
flags = append(flags, fmt.Sprintf("self:%p", node))
|
||||
if len(node.ref) > 0 {
|
||||
flags = append(flags, node.ref.String())
|
||||
}
|
||||
if node.next != nil {
|
||||
flags = append(flags, fmt.Sprintf("next:%p", node.next))
|
||||
}
|
||||
if node.any != nil {
|
||||
flags = append(flags, fmt.Sprintf("any:%p", node.any))
|
||||
}
|
||||
if node.undefined != nil {
|
||||
flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined))
|
||||
}
|
||||
if node.array != nil {
|
||||
flags = append(flags, fmt.Sprintf("array:%p", node.array))
|
||||
}
|
||||
if len(node.scalars) > 0 {
|
||||
buf := []string{}
|
||||
for k, v := range node.scalars {
|
||||
buf = append(buf, fmt.Sprintf("scalar(%v):%p", k, v))
|
||||
}
|
||||
sort.Strings(buf)
|
||||
flags = append(flags, strings.Join(buf, " "))
|
||||
}
|
||||
if len(node.rules) > 0 {
|
||||
flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules)))
|
||||
}
|
||||
if node.mapper != nil {
|
||||
flags = append(flags, "mapper")
|
||||
}
|
||||
return strings.Join(flags, " ")
|
||||
}
|
||||
|
||||
type ruleNode struct {
|
||||
prio [2]int
|
||||
rule *Rule
|
||||
}
|
||||
|
||||
func newTrieNodeImpl() *trieNode {
|
||||
return &trieNode{
|
||||
scalars: map[Value]*trieNode{},
|
||||
}
|
||||
}
|
||||
|
||||
func (node *trieNode) Do(walker trieWalker) {
|
||||
next := walker.Do(node)
|
||||
if next == nil {
|
||||
return
|
||||
}
|
||||
if node.any != nil {
|
||||
node.any.Do(next)
|
||||
}
|
||||
if node.undefined != nil {
|
||||
node.undefined.Do(next)
|
||||
}
|
||||
for _, child := range node.scalars {
|
||||
child.Do(next)
|
||||
}
|
||||
if node.array != nil {
|
||||
node.array.Do(next)
|
||||
}
|
||||
if node.next != nil {
|
||||
node.next.Do(next)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *trieNode) Insert(ref Ref, value Value, mapper valueMapper) *trieNode {
|
||||
|
||||
if node.next == nil {
|
||||
node.next = newTrieNodeImpl()
|
||||
node.next.ref = ref
|
||||
}
|
||||
|
||||
node.next.mapper = mapper
|
||||
|
||||
return node.next.insertValue(value)
|
||||
}
|
||||
|
||||
func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error {
|
||||
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range node.rules {
|
||||
tr.Add(node.rules[i])
|
||||
}
|
||||
|
||||
return node.next.traverse(resolver, tr)
|
||||
}
|
||||
|
||||
func (node *trieNode) insertValue(value Value) *trieNode {
|
||||
|
||||
switch value := value.(type) {
|
||||
case nil:
|
||||
if node.undefined == nil {
|
||||
node.undefined = newTrieNodeImpl()
|
||||
}
|
||||
return node.undefined
|
||||
case Var:
|
||||
if node.any == nil {
|
||||
node.any = newTrieNodeImpl()
|
||||
}
|
||||
return node.any
|
||||
case Null, Boolean, Number, String:
|
||||
child, ok := node.scalars[value]
|
||||
if !ok {
|
||||
child = newTrieNodeImpl()
|
||||
node.scalars[value] = child
|
||||
}
|
||||
return child
|
||||
case Array:
|
||||
if node.array == nil {
|
||||
node.array = newTrieNodeImpl()
|
||||
}
|
||||
return node.array.insertArray(value)
|
||||
}
|
||||
|
||||
panic("illegal value")
|
||||
}
|
||||
|
||||
func (node *trieNode) insertArray(arr Array) *trieNode {
|
||||
|
||||
if len(arr) == 0 {
|
||||
return node
|
||||
}
|
||||
|
||||
switch head := arr[0].Value.(type) {
|
||||
case Var:
|
||||
if node.any == nil {
|
||||
node.any = newTrieNodeImpl()
|
||||
}
|
||||
return node.any.insertArray(arr[1:])
|
||||
case Null, Boolean, Number, String:
|
||||
child, ok := node.scalars[head]
|
||||
if !ok {
|
||||
child = newTrieNodeImpl()
|
||||
node.scalars[head] = child
|
||||
}
|
||||
return child.insertArray(arr[1:])
|
||||
}
|
||||
|
||||
panic("illegal value")
|
||||
}
|
||||
|
||||
func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error {
|
||||
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
v, err := resolver.Resolve(node.ref)
|
||||
if err != nil {
|
||||
if IsUnknownValueErr(err) {
|
||||
return node.traverseUnknown(resolver, tr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if node.undefined != nil {
|
||||
node.undefined.Traverse(resolver, tr)
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if node.any != nil {
|
||||
node.any.Traverse(resolver, tr)
|
||||
}
|
||||
|
||||
if node.mapper != nil {
|
||||
v = node.mapper(v)
|
||||
}
|
||||
|
||||
return node.traverseValue(resolver, tr, v)
|
||||
}
|
||||
|
||||
func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error {
|
||||
|
||||
switch value := value.(type) {
|
||||
case Array:
|
||||
if node.array == nil {
|
||||
return nil
|
||||
}
|
||||
return node.array.traverseArray(resolver, tr, value)
|
||||
|
||||
case Null, Boolean, Number, String:
|
||||
child, ok := node.scalars[value]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return child.Traverse(resolver, tr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr Array) error {
|
||||
|
||||
if len(arr) == 0 {
|
||||
return node.Traverse(resolver, tr)
|
||||
}
|
||||
|
||||
head := arr[0].Value
|
||||
|
||||
if !IsScalar(head) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if node.any != nil {
|
||||
node.any.traverseArray(resolver, tr, arr[1:])
|
||||
}
|
||||
|
||||
child, ok := node.scalars[head]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return child.traverseArray(resolver, tr, arr[1:])
|
||||
}
|
||||
|
||||
func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error {
|
||||
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := node.Traverse(resolver, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := node.undefined.traverseUnknown(resolver, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := node.any.traverseUnknown(resolver, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := node.array.traverseUnknown(resolver, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, child := range node.scalars {
|
||||
if err := child.traverseUnknown(resolver, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type triePrinter struct {
|
||||
depth int
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (p triePrinter) Do(x interface{}) trieWalker {
|
||||
padding := strings.Repeat(" ", p.depth)
|
||||
fmt.Fprintf(p.w, "%v%v\n", padding, x)
|
||||
p.depth++
|
||||
return p
|
||||
}
|
||||
|
||||
func eqOperandsToRefAndValue(isVirtual func(Ref) bool, a, b *Term) (Ref, Value, bool) {
|
||||
|
||||
ref, ok := a.Value.(Ref)
|
||||
if !ok {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
if !RootDocumentNames.Contains(ref[0]) {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
if isVirtual(ref) {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
if ref.IsNested() || !ref.IsGround() {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
switch b := b.Value.(type) {
|
||||
case Null, Boolean, Number, String, Var:
|
||||
return ref, b, true
|
||||
case Array:
|
||||
stop := false
|
||||
first := true
|
||||
vis := NewGenericVisitor(func(x interface{}) bool {
|
||||
if first {
|
||||
first = false
|
||||
return false
|
||||
}
|
||||
switch x.(type) {
|
||||
// No nested structures or values that require evaluation (other than var).
|
||||
case Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref:
|
||||
stop = true
|
||||
}
|
||||
return stop
|
||||
})
|
||||
vis.Walk(b)
|
||||
if !stop {
|
||||
return ref, b, true
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
func globDelimiterToString(delim *Term) (string, bool) {
|
||||
|
||||
arr, ok := delim.Value.(Array)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
var result string
|
||||
|
||||
if len(arr) == 0 {
|
||||
result = "."
|
||||
} else {
|
||||
for _, term := range arr {
|
||||
s, ok := term.Value.(String)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
result += string(s)
|
||||
}
|
||||
}
|
||||
|
||||
return result, true
|
||||
}
|
||||
|
||||
func globPatternToArray(pattern *Term, delim string) *Term {
|
||||
|
||||
s, ok := pattern.Value.(String)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
parts := splitStringEscaped(string(s), delim)
|
||||
result := make(Array, len(parts))
|
||||
|
||||
for i := range parts {
|
||||
if parts[i] == "*" {
|
||||
result[i] = VarTerm("$globwildcard")
|
||||
} else {
|
||||
var escaped bool
|
||||
for _, c := range parts[i] {
|
||||
if c == '\\' {
|
||||
escaped = !escaped
|
||||
continue
|
||||
}
|
||||
if !escaped {
|
||||
switch c {
|
||||
case '[', '?', '{', '*':
|
||||
// TODO(tsandall): super glob and character pattern
|
||||
// matching not supported yet.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
escaped = false
|
||||
}
|
||||
result[i] = StringTerm(parts[i])
|
||||
}
|
||||
}
|
||||
|
||||
return NewTerm(result)
|
||||
}
|
||||
|
||||
// splits s on characters in delim except if delim characters have been escaped
|
||||
// with reverse solidus.
|
||||
func splitStringEscaped(s string, delim string) []string {
|
||||
|
||||
var last, curr int
|
||||
var escaped bool
|
||||
var result []string
|
||||
|
||||
for ; curr < len(s); curr++ {
|
||||
if s[curr] == '\\' || escaped {
|
||||
escaped = !escaped
|
||||
continue
|
||||
}
|
||||
if strings.ContainsRune(delim, rune(s[curr])) {
|
||||
result = append(result, s[last:curr])
|
||||
last = curr + 1
|
||||
}
|
||||
}
|
||||
|
||||
result = append(result, s[last:])
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func stringSliceToArray(s []string) (result Array) {
|
||||
result = make(Array, len(s))
|
||||
for i := range s {
|
||||
result[i] = StringTerm(s[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
133
vendor/github.com/open-policy-agent/opa/ast/map.go
generated
vendored
Normal file
133
vendor/github.com/open-policy-agent/opa/ast/map.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright 2016 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
)
|
||||
|
||||
// ValueMap represents a key/value map between AST term values. Any type of term
|
||||
// can be used as a key in the map.
|
||||
type ValueMap struct {
|
||||
hashMap *util.HashMap
|
||||
}
|
||||
|
||||
// NewValueMap returns a new ValueMap.
|
||||
func NewValueMap() *ValueMap {
|
||||
vs := &ValueMap{
|
||||
hashMap: util.NewHashMap(valueEq, valueHash),
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// MarshalJSON provides a custom marshaller for the ValueMap which
|
||||
// will include the key, value, and value type.
|
||||
func (vs *ValueMap) MarshalJSON() ([]byte, error) {
|
||||
var tmp []map[string]interface{}
|
||||
vs.Iter(func(k Value, v Value) bool {
|
||||
tmp = append(tmp, map[string]interface{}{
|
||||
"name": k.String(),
|
||||
"type": TypeName(v),
|
||||
"value": v,
|
||||
})
|
||||
return false
|
||||
})
|
||||
return json.Marshal(tmp)
|
||||
}
|
||||
|
||||
// Copy returns a shallow copy of the ValueMap.
|
||||
func (vs *ValueMap) Copy() *ValueMap {
|
||||
if vs == nil {
|
||||
return nil
|
||||
}
|
||||
cpy := NewValueMap()
|
||||
cpy.hashMap = vs.hashMap.Copy()
|
||||
return cpy
|
||||
}
|
||||
|
||||
// Equal returns true if this ValueMap equals the other.
|
||||
func (vs *ValueMap) Equal(other *ValueMap) bool {
|
||||
if vs == nil {
|
||||
return other == nil || other.Len() == 0
|
||||
}
|
||||
if other == nil {
|
||||
return vs == nil || vs.Len() == 0
|
||||
}
|
||||
return vs.hashMap.Equal(other.hashMap)
|
||||
}
|
||||
|
||||
// Len returns the number of elements in the map.
|
||||
func (vs *ValueMap) Len() int {
|
||||
if vs == nil {
|
||||
return 0
|
||||
}
|
||||
return vs.hashMap.Len()
|
||||
}
|
||||
|
||||
// Get returns the value in the map for k.
|
||||
func (vs *ValueMap) Get(k Value) Value {
|
||||
if vs != nil {
|
||||
if v, ok := vs.hashMap.Get(k); ok {
|
||||
return v.(Value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns a hash code for this ValueMap.
|
||||
func (vs *ValueMap) Hash() int {
|
||||
if vs == nil {
|
||||
return 0
|
||||
}
|
||||
return vs.hashMap.Hash()
|
||||
}
|
||||
|
||||
// Iter calls the iter function for each key/value pair in the map. If the iter
|
||||
// function returns true, iteration stops.
|
||||
func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool {
|
||||
if vs == nil {
|
||||
return false
|
||||
}
|
||||
return vs.hashMap.Iter(func(kt, vt util.T) bool {
|
||||
k := kt.(Value)
|
||||
v := vt.(Value)
|
||||
return iter(k, v)
|
||||
})
|
||||
}
|
||||
|
||||
// Put inserts a key k into the map with value v.
|
||||
func (vs *ValueMap) Put(k, v Value) {
|
||||
if vs == nil {
|
||||
panic("put on nil value map")
|
||||
}
|
||||
vs.hashMap.Put(k, v)
|
||||
}
|
||||
|
||||
// Delete removes a key k from the map.
|
||||
func (vs *ValueMap) Delete(k Value) {
|
||||
if vs == nil {
|
||||
return
|
||||
}
|
||||
vs.hashMap.Delete(k)
|
||||
}
|
||||
|
||||
func (vs *ValueMap) String() string {
|
||||
if vs == nil {
|
||||
return "{}"
|
||||
}
|
||||
return vs.hashMap.String()
|
||||
}
|
||||
|
||||
func valueHash(v util.T) int {
|
||||
return v.(Value).Hash()
|
||||
}
|
||||
|
||||
func valueEq(a, b util.T) bool {
|
||||
av := a.(Value)
|
||||
bv := b.(Value)
|
||||
return av.Compare(bv) == 0
|
||||
}
|
||||
5150
vendor/github.com/open-policy-agent/opa/ast/parser.go
generated
vendored
Normal file
5150
vendor/github.com/open-policy-agent/opa/ast/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
838
vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
generated
vendored
Normal file
838
vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
generated
vendored
Normal file
@@ -0,0 +1,838 @@
|
||||
// Copyright 2016 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains extra functions for parsing Rego.
|
||||
// Most of the parsing is handled by the auto-generated code in
|
||||
// parser.go, however, there are additional utilities that are
|
||||
// helpful for dealing with Rego source inputs (e.g., REPL
|
||||
// statements, source files, etc.)
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MustParseBody returns a parsed body.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseBody(input string) Body {
|
||||
parsed, err := ParseBody(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// MustParseExpr returns a parsed expression.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseExpr(input string) *Expr {
|
||||
parsed, err := ParseExpr(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// MustParseImports returns a slice of imports.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseImports(input string) []*Import {
|
||||
parsed, err := ParseImports(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// MustParseModule returns a parsed module.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseModule(input string) *Module {
|
||||
parsed, err := ParseModule("", input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// MustParsePackage returns a Package.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParsePackage(input string) *Package {
|
||||
parsed, err := ParsePackage(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// MustParseStatements returns a slice of parsed statements.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseStatements(input string) []Statement {
|
||||
parsed, _, err := ParseStatements("", input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// MustParseStatement returns exactly one statement.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseStatement(input string) Statement {
|
||||
parsed, err := ParseStatement(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// MustParseRef returns a parsed reference.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseRef(input string) Ref {
|
||||
parsed, err := ParseRef(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// MustParseRule returns a parsed rule.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseRule(input string) *Rule {
|
||||
parsed, err := ParseRule(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// MustParseTerm returns a parsed term.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseTerm(input string) *Term {
|
||||
parsed, err := ParseTerm(input)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
// ParseRuleFromBody returns a rule if the body can be interpreted as a rule
|
||||
// definition. Otherwise, an error is returned.
|
||||
func ParseRuleFromBody(module *Module, body Body) (*Rule, error) {
|
||||
|
||||
if len(body) != 1 {
|
||||
return nil, fmt.Errorf("multiple expressions cannot be used for rule head")
|
||||
}
|
||||
|
||||
return ParseRuleFromExpr(module, body[0])
|
||||
}
|
||||
|
||||
// ParseRuleFromExpr returns a rule if the expression can be interpreted as a
|
||||
// rule definition.
|
||||
func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) {
|
||||
|
||||
if len(expr.With) > 0 {
|
||||
return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head")
|
||||
}
|
||||
|
||||
if expr.Negated {
|
||||
return nil, fmt.Errorf("negated expressions cannot be used for rule head")
|
||||
}
|
||||
|
||||
if _, ok := expr.Terms.(*SomeDecl); ok {
|
||||
return nil, errors.New("some declarations cannot be used for rule head")
|
||||
}
|
||||
|
||||
if term, ok := expr.Terms.(*Term); ok {
|
||||
switch v := term.Value.(type) {
|
||||
case Ref:
|
||||
return ParsePartialSetDocRuleFromTerm(module, term)
|
||||
default:
|
||||
return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(v))
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := expr.Terms.([]*Term); !ok {
|
||||
// This is a defensive check in case other kinds of expression terms are
|
||||
// introduced in the future.
|
||||
return nil, errors.New("expression cannot be used for rule head")
|
||||
}
|
||||
|
||||
if expr.IsAssignment() {
|
||||
|
||||
lhs, rhs := expr.Operand(0), expr.Operand(1)
|
||||
rule, err := ParseCompleteDocRuleFromAssignmentExpr(module, lhs, rhs)
|
||||
|
||||
if err == nil {
|
||||
return rule, nil
|
||||
} else if _, ok := lhs.Value.(Call); ok {
|
||||
return nil, errFunctionAssignOperator
|
||||
} else if _, ok := lhs.Value.(Ref); ok {
|
||||
return nil, errPartialRuleAssignOperator
|
||||
}
|
||||
|
||||
return nil, errTermAssignOperator(lhs.Value)
|
||||
}
|
||||
|
||||
if expr.IsEquality() {
|
||||
|
||||
lhs, rhs := expr.Operand(0), expr.Operand(1)
|
||||
rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
|
||||
|
||||
if err == nil {
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs)
|
||||
if err == nil {
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
return ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs)
|
||||
}
|
||||
|
||||
if _, ok := BuiltinMap[expr.Operator().String()]; ok {
|
||||
return nil, fmt.Errorf("rule name conflicts with built-in function")
|
||||
}
|
||||
|
||||
return ParseRuleFromCallExpr(module, expr.Terms.([]*Term))
|
||||
}
|
||||
|
||||
// ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can
|
||||
// be interpreted as a complete document definition declared with the assignment
|
||||
// operator.
|
||||
func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
|
||||
rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rule.Head.Assign = true
|
||||
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
// ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be
|
||||
// interpreted as a complete document definition.
|
||||
func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
|
||||
var name Var
|
||||
|
||||
if RootDocumentRefs.Contains(lhs) {
|
||||
name = lhs.Value.(Ref)[0].Value.(Var)
|
||||
} else if v, ok := lhs.Value.(Var); ok {
|
||||
name = v
|
||||
} else {
|
||||
return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(lhs.Value))
|
||||
}
|
||||
|
||||
rule := &Rule{
|
||||
Location: rhs.Location,
|
||||
Head: &Head{
|
||||
Location: rhs.Location,
|
||||
Name: name,
|
||||
Value: rhs,
|
||||
},
|
||||
Body: NewBody(
|
||||
NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location),
|
||||
),
|
||||
Module: module,
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
// ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be
|
||||
// interpreted as a partial object document definition.
|
||||
func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
|
||||
ref, ok := lhs.Value.(Ref)
|
||||
if !ok || len(ref) != 2 {
|
||||
return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(lhs.Value))
|
||||
}
|
||||
|
||||
name := ref[0].Value.(Var)
|
||||
key := ref[1]
|
||||
|
||||
rule := &Rule{
|
||||
Location: rhs.Location,
|
||||
Head: &Head{
|
||||
Location: rhs.Location,
|
||||
Name: name,
|
||||
Key: key,
|
||||
Value: rhs,
|
||||
},
|
||||
Body: NewBody(
|
||||
NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location),
|
||||
),
|
||||
Module: module,
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
// ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted
|
||||
// as a partial set document definition.
|
||||
func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) {
|
||||
|
||||
ref, ok := term.Value.(Ref)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value))
|
||||
}
|
||||
|
||||
if len(ref) != 2 {
|
||||
return nil, fmt.Errorf("refs cannot be used for rule")
|
||||
}
|
||||
|
||||
rule := &Rule{
|
||||
Location: term.Location,
|
||||
Head: &Head{
|
||||
Location: term.Location,
|
||||
Name: ref[0].Value.(Var),
|
||||
Key: ref[1],
|
||||
},
|
||||
Body: NewBody(
|
||||
NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location),
|
||||
),
|
||||
Module: module,
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
// ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a
|
||||
// function definition (e.g., f(x) = y => f(x) = y { true }).
|
||||
func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
|
||||
call, ok := lhs.Value.(Call)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("must be call")
|
||||
}
|
||||
|
||||
rule := &Rule{
|
||||
Location: lhs.Location,
|
||||
Head: &Head{
|
||||
Location: lhs.Location,
|
||||
Name: call[0].Value.(Ref)[0].Value.(Var),
|
||||
Args: Args(call[1:]),
|
||||
Value: rhs,
|
||||
},
|
||||
Body: NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)),
|
||||
Module: module,
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
// ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a
|
||||
// function returning true or some value (e.g., f(x) => f(x) = true { true }).
|
||||
func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) {
|
||||
|
||||
if len(terms) <= 1 {
|
||||
return nil, fmt.Errorf("rule argument list must take at least one argument")
|
||||
}
|
||||
|
||||
loc := terms[0].Location
|
||||
args := terms[1:]
|
||||
value := BooleanTerm(true).SetLocation(loc)
|
||||
|
||||
rule := &Rule{
|
||||
Location: loc,
|
||||
Head: &Head{
|
||||
Location: loc,
|
||||
Name: Var(terms[0].String()),
|
||||
Args: args,
|
||||
Value: value,
|
||||
},
|
||||
Module: module,
|
||||
Body: NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)),
|
||||
}
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
// ParseImports returns a slice of Import objects.
|
||||
func ParseImports(input string) ([]*Import, error) {
|
||||
stmts, _, err := ParseStatements("", input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := []*Import{}
|
||||
for _, stmt := range stmts {
|
||||
if imp, ok := stmt.(*Import); ok {
|
||||
result = append(result, imp)
|
||||
} else {
|
||||
return nil, fmt.Errorf("expected import but got %T", stmt)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ParseModule returns a parsed Module object.
|
||||
// For details on Module objects and their fields, see policy.go.
|
||||
// Empty input will return nil, nil.
|
||||
func ParseModule(filename, input string) (*Module, error) {
|
||||
stmts, comments, err := ParseStatements(filename, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseModule(filename, stmts, comments)
|
||||
}
|
||||
|
||||
// ParseBody returns exactly one body.
|
||||
// If multiple bodies are parsed, an error is returned.
|
||||
func ParseBody(input string) (Body, error) {
|
||||
stmts, _, err := ParseStatements("", input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := Body{}
|
||||
|
||||
for _, stmt := range stmts {
|
||||
switch stmt := stmt.(type) {
|
||||
case Body:
|
||||
result = append(result, stmt...)
|
||||
case *Comment:
|
||||
// skip
|
||||
default:
|
||||
return nil, fmt.Errorf("expected body but got %T", stmt)
|
||||
}
|
||||
}
|
||||
|
||||
setExprIndices(result)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ParseExpr returns exactly one expression.
|
||||
// If multiple expressions are parsed, an error is returned.
|
||||
func ParseExpr(input string) (*Expr, error) {
|
||||
body, err := ParseBody(input)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse expression")
|
||||
}
|
||||
if len(body) != 1 {
|
||||
return nil, fmt.Errorf("expected exactly one expression but got: %v", body)
|
||||
}
|
||||
return body[0], nil
|
||||
}
|
||||
|
||||
// ParsePackage returns exactly one Package.
|
||||
// If multiple statements are parsed, an error is returned.
|
||||
func ParsePackage(input string) (*Package, error) {
|
||||
stmt, err := ParseStatement(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pkg, ok := stmt.(*Package)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected package but got %T", stmt)
|
||||
}
|
||||
return pkg, nil
|
||||
}
|
||||
|
||||
// ParseTerm returns exactly one term.
|
||||
// If multiple terms are parsed, an error is returned.
|
||||
func ParseTerm(input string) (*Term, error) {
|
||||
body, err := ParseBody(input)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse term")
|
||||
}
|
||||
if len(body) != 1 {
|
||||
return nil, fmt.Errorf("expected exactly one term but got: %v", body)
|
||||
}
|
||||
term, ok := body[0].Terms.(*Term)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected term but got %v", body[0].Terms)
|
||||
}
|
||||
return term, nil
|
||||
}
|
||||
|
||||
// ParseRef returns exactly one reference.
|
||||
func ParseRef(input string) (Ref, error) {
|
||||
term, err := ParseTerm(input)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse ref")
|
||||
}
|
||||
ref, ok := term.Value.(Ref)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected ref but got %v", term)
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
// ParseRule returns exactly one rule.
|
||||
// If multiple rules are parsed, an error is returned.
|
||||
func ParseRule(input string) (*Rule, error) {
|
||||
stmts, _, err := ParseStatements("", input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(stmts) != 1 {
|
||||
return nil, fmt.Errorf("expected exactly one statement (rule)")
|
||||
}
|
||||
rule, ok := stmts[0].(*Rule)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected rule but got %T", stmts[0])
|
||||
}
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
// ParseStatement returns exactly one statement.
|
||||
// A statement might be a term, expression, rule, etc. Regardless,
|
||||
// this function expects *exactly* one statement. If multiple
|
||||
// statements are parsed, an error is returned.
|
||||
func ParseStatement(input string) (Statement, error) {
|
||||
stmts, _, err := ParseStatements("", input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(stmts) != 1 {
|
||||
return nil, fmt.Errorf("expected exactly one statement")
|
||||
}
|
||||
return stmts[0], nil
|
||||
}
|
||||
|
||||
// CommentsOption returns a parser option to initialize the comments store within
|
||||
// the parser.
|
||||
func CommentsOption() Option {
|
||||
return GlobalStore(commentsKey, map[commentKey]*Comment{})
|
||||
}
|
||||
|
||||
type commentKey struct {
|
||||
File string
|
||||
Row int
|
||||
Col int
|
||||
}
|
||||
|
||||
func (a commentKey) Compare(other commentKey) int {
|
||||
if a.File < other.File {
|
||||
return -1
|
||||
} else if a.File > other.File {
|
||||
return 1
|
||||
} else if a.Row < other.Row {
|
||||
return -1
|
||||
} else if a.Row > other.Row {
|
||||
return 1
|
||||
} else if a.Col < other.Col {
|
||||
return -1
|
||||
} else if a.Col > other.Col {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ParseStatements returns a slice of parsed statements.
|
||||
// This is the default return value from the parser.
|
||||
func ParseStatements(filename, input string) ([]Statement, []*Comment, error) {
|
||||
|
||||
bs := []byte(input)
|
||||
|
||||
parsed, err := Parse(filename, bs, GlobalStore(filenameKey, filename), CommentsOption())
|
||||
if err != nil {
|
||||
return nil, nil, formatParserErrors(filename, bs, err)
|
||||
}
|
||||
|
||||
var comments []*Comment
|
||||
var sl []interface{}
|
||||
if p, ok := parsed.(program); ok {
|
||||
sl = p.buf
|
||||
commentMap := p.comments.(map[commentKey]*Comment)
|
||||
commentKeys := []commentKey{}
|
||||
for k := range commentMap {
|
||||
commentKeys = append(commentKeys, k)
|
||||
}
|
||||
sort.Slice(commentKeys, func(i, j int) bool {
|
||||
return commentKeys[i].Compare(commentKeys[j]) < 0
|
||||
})
|
||||
for _, k := range commentKeys {
|
||||
comments = append(comments, commentMap[k])
|
||||
}
|
||||
} else {
|
||||
sl = parsed.([]interface{})
|
||||
}
|
||||
stmts := make([]Statement, 0, len(sl))
|
||||
|
||||
for _, x := range sl {
|
||||
if rules, ok := x.([]*Rule); ok {
|
||||
for _, rule := range rules {
|
||||
stmts = append(stmts, rule)
|
||||
}
|
||||
} else {
|
||||
// Unchecked cast should be safe. A panic indicates grammar is
|
||||
// out-of-sync.
|
||||
stmts = append(stmts, x.(Statement))
|
||||
}
|
||||
}
|
||||
|
||||
return stmts, comments, postProcess(filename, stmts)
|
||||
}
|
||||
|
||||
func formatParserErrors(filename string, bs []byte, err error) error {
|
||||
// Errors returned by the parser are always of type errList and the errList
|
||||
// always contains *parserError.
|
||||
// https://godoc.org/github.com/mna/pigeon#hdr-Error_reporting.
|
||||
errs := err.(errList)
|
||||
r := make(Errors, len(errs))
|
||||
for i, e := range errs {
|
||||
r[i] = formatParserError(filename, bs, e.(*parserError))
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func formatParserError(filename string, bs []byte, e *parserError) *Error {
|
||||
loc := NewLocation(nil, filename, e.pos.line, e.pos.col)
|
||||
inner := e.Inner.Error()
|
||||
idx := strings.Index(inner, "no match found")
|
||||
if idx >= 0 {
|
||||
// Match errors end with "no match found, expected: ...". We do not want to
|
||||
// include ", expected: ..." as it does not provide any value, so truncate the
|
||||
// string here.
|
||||
inner = inner[:idx+14]
|
||||
}
|
||||
err := NewError(ParseErr, loc, inner)
|
||||
err.Details = newParserErrorDetail(bs, e.pos)
|
||||
return err
|
||||
}
|
||||
|
||||
func parseModule(filename string, stmts []Statement, comments []*Comment) (*Module, error) {
|
||||
|
||||
if len(stmts) == 0 {
|
||||
return nil, NewError(ParseErr, &Location{File: filename}, "empty module")
|
||||
}
|
||||
|
||||
var errs Errors
|
||||
|
||||
_package, ok := stmts[0].(*Package)
|
||||
if !ok {
|
||||
loc := stmts[0].(Statement).Loc()
|
||||
errs = append(errs, NewError(ParseErr, loc, "package expected"))
|
||||
}
|
||||
|
||||
mod := &Module{
|
||||
Package: _package,
|
||||
}
|
||||
|
||||
// The comments slice only holds comments that were not their own statements.
|
||||
mod.Comments = append(mod.Comments, comments...)
|
||||
|
||||
for _, stmt := range stmts[1:] {
|
||||
switch stmt := stmt.(type) {
|
||||
case *Import:
|
||||
mod.Imports = append(mod.Imports, stmt)
|
||||
case *Rule:
|
||||
setRuleModule(stmt, mod)
|
||||
mod.Rules = append(mod.Rules, stmt)
|
||||
case Body:
|
||||
rule, err := ParseRuleFromBody(mod, stmt)
|
||||
if err != nil {
|
||||
errs = append(errs, NewError(ParseErr, stmt[0].Location, err.Error()))
|
||||
} else {
|
||||
mod.Rules = append(mod.Rules, rule)
|
||||
}
|
||||
case *Package:
|
||||
errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package"))
|
||||
case *Comment: // Ignore comments, they're handled above.
|
||||
default:
|
||||
panic("illegal value") // Indicates grammar is out-of-sync with code.
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) == 0 {
|
||||
return mod, nil
|
||||
}
|
||||
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
func postProcess(filename string, stmts []Statement) error {
|
||||
|
||||
if err := mangleDataVars(stmts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mangleInputVars(stmts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mangleWildcards(stmts)
|
||||
mangleExprIndices(stmts)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mangleDataVars(stmts []Statement) error {
|
||||
for i := range stmts {
|
||||
vt := newVarToRefTransformer(DefaultRootDocument.Value.(Var), DefaultRootRef.Copy())
|
||||
stmt, err := Transform(vt, stmts[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stmts[i] = stmt.(Statement)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mangleInputVars(stmts []Statement) error {
|
||||
for i := range stmts {
|
||||
vt := newVarToRefTransformer(InputRootDocument.Value.(Var), InputRootRef.Copy())
|
||||
stmt, err := Transform(vt, stmts[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stmts[i] = stmt.(Statement)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mangleExprIndices(stmts []Statement) {
|
||||
for _, stmt := range stmts {
|
||||
setExprIndices(stmt)
|
||||
}
|
||||
}
|
||||
|
||||
func setExprIndices(x interface{}) {
|
||||
WalkBodies(x, func(b Body) bool {
|
||||
for i, expr := range b {
|
||||
expr.Index = i
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func mangleWildcards(stmts []Statement) {
|
||||
m := &wildcardMangler{}
|
||||
for i := range stmts {
|
||||
stmt, _ := Transform(m, stmts[i])
|
||||
stmts[i] = stmt.(Statement)
|
||||
}
|
||||
}
|
||||
|
||||
type wildcardMangler struct {
|
||||
c int
|
||||
}
|
||||
|
||||
func (m *wildcardMangler) Transform(x interface{}) (interface{}, error) {
|
||||
if term, ok := x.(Var); ok {
|
||||
if term.Equal(Wildcard.Value) {
|
||||
name := fmt.Sprintf("%s%d", WildcardPrefix, m.c)
|
||||
m.c++
|
||||
return Var(name), nil
|
||||
}
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
func setRuleModule(rule *Rule, module *Module) {
|
||||
rule.Module = module
|
||||
if rule.Else != nil {
|
||||
setRuleModule(rule.Else, module)
|
||||
}
|
||||
}
|
||||
|
||||
type varToRefTransformer struct {
|
||||
orig Var
|
||||
target Ref
|
||||
// skip set to true to avoid recursively processing the result of
|
||||
// transformation.
|
||||
skip bool
|
||||
}
|
||||
|
||||
func newVarToRefTransformer(orig Var, target Ref) *varToRefTransformer {
|
||||
return &varToRefTransformer{
|
||||
orig: orig,
|
||||
target: target,
|
||||
skip: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (vt *varToRefTransformer) Transform(x interface{}) (interface{}, error) {
|
||||
if vt.skip {
|
||||
vt.skip = false
|
||||
return x, nil
|
||||
}
|
||||
switch x := x.(type) {
|
||||
case *Head:
|
||||
// The next AST node will be the rule name (which should not be
|
||||
// transformed).
|
||||
vt.skip = true
|
||||
case Ref:
|
||||
// The next AST node will be the ref head (which should not be
|
||||
// transformed).
|
||||
vt.skip = true
|
||||
case Var:
|
||||
if x.Equal(vt.orig) {
|
||||
vt.skip = true
|
||||
return vt.target, nil
|
||||
}
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// ParserErrorDetail holds additional details for parser errors.
|
||||
type ParserErrorDetail struct {
|
||||
Line string `json:"line"`
|
||||
Idx int `json:"idx"`
|
||||
}
|
||||
|
||||
func newParserErrorDetail(bs []byte, pos position) *ParserErrorDetail {
|
||||
|
||||
offset := pos.offset
|
||||
|
||||
// Find first non-space character at or before offset position.
|
||||
if offset >= len(bs) {
|
||||
offset = len(bs) - 1
|
||||
} else if offset < 0 {
|
||||
offset = 0
|
||||
}
|
||||
|
||||
for offset > 0 && unicode.IsSpace(rune(bs[offset])) {
|
||||
offset--
|
||||
}
|
||||
|
||||
// Find beginning of line containing offset.
|
||||
begin := offset
|
||||
|
||||
for begin > 0 && !isNewLineChar(bs[begin]) {
|
||||
begin--
|
||||
}
|
||||
|
||||
if isNewLineChar(bs[begin]) {
|
||||
begin++
|
||||
}
|
||||
|
||||
// Find end of line containing offset.
|
||||
end := offset
|
||||
|
||||
for end < len(bs) && !isNewLineChar(bs[end]) {
|
||||
end++
|
||||
}
|
||||
|
||||
if begin > end {
|
||||
begin = end
|
||||
}
|
||||
|
||||
// Extract line and compute index of offset byte in line.
|
||||
line := bs[begin:end]
|
||||
index := offset - begin
|
||||
|
||||
return &ParserErrorDetail{
|
||||
Line: string(line),
|
||||
Idx: index,
|
||||
}
|
||||
}
|
||||
|
||||
// Lines returns the pretty formatted line output for the error details.
|
||||
func (d ParserErrorDetail) Lines() []string {
|
||||
line := strings.TrimLeft(d.Line, "\t") // remove leading tabs
|
||||
tabCount := len(d.Line) - len(line)
|
||||
return []string{line, strings.Repeat(" ", d.Idx-tabCount) + "^"}
|
||||
}
|
||||
|
||||
func isNewLineChar(b byte) bool {
|
||||
return b == '\r' || b == '\n'
|
||||
}
|
||||
620
vendor/github.com/open-policy-agent/opa/ast/parser_internal.go
generated
vendored
Normal file
620
vendor/github.com/open-policy-agent/opa/ast/parser_internal.go
generated
vendored
Normal file
@@ -0,0 +1,620 @@
|
||||
// Copyright 2018 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.op
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
const (
|
||||
// commentsKey is the global map key for the comments slice.
|
||||
commentsKey = "comments"
|
||||
|
||||
// filenameKey is the global map key for the filename.
|
||||
filenameKey = "filename"
|
||||
)
|
||||
|
||||
type program struct {
|
||||
buf []interface{}
|
||||
comments interface{}
|
||||
}
|
||||
|
||||
type ruleExt struct {
|
||||
loc *Location
|
||||
term *Term
|
||||
body Body
|
||||
}
|
||||
|
||||
// currentLocation converts the parser context to a Location object.
|
||||
func currentLocation(c *current) *Location {
|
||||
return NewLocation(c.text, c.globalStore[filenameKey].(string), c.pos.line, c.pos.col)
|
||||
}
|
||||
|
||||
func makeProgram(c *current, vals interface{}) (interface{}, error) {
|
||||
var buf []interface{}
|
||||
if vals == nil {
|
||||
return buf, nil
|
||||
}
|
||||
ifaceSlice := vals.([]interface{})
|
||||
head := ifaceSlice[0]
|
||||
buf = append(buf, head)
|
||||
for _, tail := range ifaceSlice[1].([]interface{}) {
|
||||
stmt := tail.([]interface{})[1]
|
||||
buf = append(buf, stmt)
|
||||
}
|
||||
return program{buf, c.globalStore[commentsKey]}, nil
|
||||
}
|
||||
|
||||
func makePackage(loc *Location, value interface{}) (interface{}, error) {
|
||||
// All packages are implicitly declared under the default root document.
|
||||
term := value.(*Term)
|
||||
path := Ref{DefaultRootDocument.Copy().SetLocation(term.Location)}
|
||||
switch v := term.Value.(type) {
|
||||
case Ref:
|
||||
// Convert head of package Ref to String because it will be prefixed
|
||||
// with the root document variable.
|
||||
head := StringTerm(string(v[0].Value.(Var))).SetLocation(v[0].Location)
|
||||
tail := v[1:]
|
||||
if !tail.IsGround() {
|
||||
return nil, fmt.Errorf("package name cannot contain variables: %v", v)
|
||||
}
|
||||
|
||||
// We do not allow non-string values in package names.
|
||||
// Because documents are typically represented as JSON, non-string keys are
|
||||
// not allowed for now.
|
||||
// TODO(tsandall): consider special syntax for namespacing under arrays.
|
||||
for _, p := range tail {
|
||||
_, ok := p.Value.(String)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("package name cannot contain non-string values: %v", v)
|
||||
}
|
||||
}
|
||||
path = append(path, head)
|
||||
path = append(path, tail...)
|
||||
case Var:
|
||||
s := StringTerm(string(v)).SetLocation(term.Location)
|
||||
path = append(path, s)
|
||||
}
|
||||
pkg := &Package{Location: loc, Path: path}
|
||||
return pkg, nil
|
||||
}
|
||||
|
||||
func makeImport(loc *Location, path, alias interface{}) (interface{}, error) {
|
||||
imp := &Import{}
|
||||
imp.Location = loc
|
||||
imp.Path = path.(*Term)
|
||||
if err := IsValidImportPath(imp.Path.Value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if alias == nil {
|
||||
return imp, nil
|
||||
}
|
||||
aliasSlice := alias.([]interface{})
|
||||
// Import definition above describes the "alias" slice. We only care about the "Var" element.
|
||||
imp.Alias = aliasSlice[3].(*Term).Value.(Var)
|
||||
return imp, nil
|
||||
}
|
||||
|
||||
func makeDefaultRule(loc *Location, name, operator, value interface{}) (interface{}, error) {
|
||||
|
||||
if string(operator.([]uint8)) == Assign.Infix {
|
||||
return nil, fmt.Errorf("default rules must use = operator (not := operator)")
|
||||
}
|
||||
|
||||
term := value.(*Term)
|
||||
var err error
|
||||
|
||||
vis := NewGenericVisitor(func(x interface{}) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
switch x.(type) {
|
||||
case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures
|
||||
return true
|
||||
case Ref, Var:
|
||||
err = fmt.Errorf("default rule value cannot contain %v", TypeName(x))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
vis.Walk(term)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)))
|
||||
|
||||
rule := &Rule{
|
||||
Location: loc,
|
||||
Default: true,
|
||||
Head: &Head{
|
||||
Location: loc,
|
||||
Name: name.(*Term).Value.(Var),
|
||||
Value: value.(*Term),
|
||||
},
|
||||
Body: body,
|
||||
}
|
||||
rule.Body[0].Location = loc
|
||||
|
||||
return []*Rule{rule}, nil
|
||||
}
|
||||
|
||||
func makeRule(loc *Location, head, rest interface{}) (interface{}, error) {
|
||||
|
||||
if head == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sl := rest.([]interface{})
|
||||
|
||||
rules := []*Rule{
|
||||
{
|
||||
Location: loc,
|
||||
Head: head.(*Head),
|
||||
Body: sl[0].(Body),
|
||||
},
|
||||
}
|
||||
|
||||
var ordered bool
|
||||
prev := rules[0]
|
||||
|
||||
for i, elem := range sl[1].([]interface{}) {
|
||||
|
||||
next := elem.([]interface{})
|
||||
re := next[1].(ruleExt)
|
||||
|
||||
if rules[0].Head.Assign {
|
||||
return nil, errElseAssignOperator
|
||||
}
|
||||
|
||||
if re.term == nil {
|
||||
if ordered {
|
||||
return nil, fmt.Errorf("expected 'else' keyword")
|
||||
}
|
||||
rules = append(rules, &Rule{
|
||||
Location: re.loc,
|
||||
Head: prev.Head.Copy(),
|
||||
Body: re.body,
|
||||
})
|
||||
} else {
|
||||
if (rules[0].Head.DocKind() != CompleteDoc) || (i != 0 && !ordered) {
|
||||
return nil, fmt.Errorf("unexpected 'else' keyword")
|
||||
}
|
||||
ordered = true
|
||||
curr := &Rule{
|
||||
Location: re.loc,
|
||||
Head: &Head{
|
||||
Name: prev.Head.Name,
|
||||
Args: prev.Head.Args.Copy(),
|
||||
Value: re.term,
|
||||
Location: re.term.Location,
|
||||
},
|
||||
Body: re.body,
|
||||
}
|
||||
prev.Else = curr
|
||||
prev = curr
|
||||
}
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func makeRuleHead(loc *Location, name, args, key, value interface{}) (interface{}, error) {
|
||||
|
||||
head := &Head{}
|
||||
|
||||
head.Location = loc
|
||||
head.Name = name.(*Term).Value.(Var)
|
||||
|
||||
if args != nil && key != nil {
|
||||
return nil, fmt.Errorf("partial rules cannot take arguments")
|
||||
}
|
||||
|
||||
if args != nil {
|
||||
argSlice := args.([]interface{})
|
||||
head.Args = argSlice[3].(Args)
|
||||
}
|
||||
|
||||
if key != nil {
|
||||
keySlice := key.([]interface{})
|
||||
// Head definition above describes the "key" slice. We care about the "Term" element.
|
||||
head.Key = keySlice[3].(*Term)
|
||||
}
|
||||
|
||||
if value != nil {
|
||||
valueSlice := value.([]interface{})
|
||||
operator := string(valueSlice[1].([]uint8))
|
||||
|
||||
if operator == Assign.Infix {
|
||||
if head.Key != nil {
|
||||
return nil, errPartialRuleAssignOperator
|
||||
} else if len(head.Args) > 0 {
|
||||
return nil, errFunctionAssignOperator
|
||||
}
|
||||
head.Assign = true
|
||||
}
|
||||
|
||||
// Head definition above describes the "value" slice. We care about the "Term" element.
|
||||
head.Value = valueSlice[len(valueSlice)-1].(*Term)
|
||||
}
|
||||
|
||||
if key == nil && value == nil {
|
||||
head.Value = BooleanTerm(true).SetLocation(head.Location)
|
||||
}
|
||||
|
||||
if key != nil && value != nil {
|
||||
switch head.Key.Value.(type) {
|
||||
case Var, String, Ref: // nop
|
||||
default:
|
||||
return nil, fmt.Errorf("object key must be string, var, or ref, not %v", TypeName(head.Key.Value))
|
||||
}
|
||||
}
|
||||
|
||||
return head, nil
|
||||
}
|
||||
|
||||
func makeArgs(list interface{}) (interface{}, error) {
|
||||
termSlice := list.([]*Term)
|
||||
args := make(Args, len(termSlice))
|
||||
for i := 0; i < len(args); i++ {
|
||||
args[i] = termSlice[i]
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func makeRuleExt(loc *Location, val, b interface{}) (interface{}, error) {
|
||||
bs := b.([]interface{})
|
||||
body := bs[1].(Body)
|
||||
|
||||
if val == nil {
|
||||
term := BooleanTerm(true)
|
||||
term.Location = loc
|
||||
return ruleExt{term.Location, term, body}, nil
|
||||
}
|
||||
|
||||
vs := val.([]interface{})
|
||||
t := vs[3].(*Term)
|
||||
return ruleExt{loc, t, body}, nil
|
||||
}
|
||||
|
||||
func makeLiteral(negated, value, with interface{}) (interface{}, error) {
|
||||
|
||||
expr := value.(*Expr)
|
||||
|
||||
expr.Negated = negated.(bool)
|
||||
|
||||
if with != nil {
|
||||
expr.With = with.([]*With)
|
||||
}
|
||||
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
func makeLiteralExpr(loc *Location, lhs, rest interface{}) (interface{}, error) {
|
||||
|
||||
if rest == nil {
|
||||
if call, ok := lhs.(*Term).Value.(Call); ok {
|
||||
return NewExpr([]*Term(call)).SetLocation(loc), nil
|
||||
}
|
||||
return NewExpr(lhs).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
termSlice := rest.([]interface{})
|
||||
terms := []*Term{
|
||||
termSlice[1].(*Term),
|
||||
lhs.(*Term),
|
||||
termSlice[3].(*Term),
|
||||
}
|
||||
|
||||
expr := NewExpr(terms).SetLocation(loc)
|
||||
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
func makeSomeDeclLiteral(loc *Location, sl interface{}) (interface{}, error) {
|
||||
symbols := sl.([]*Term)
|
||||
return NewExpr(&SomeDecl{Location: loc, Symbols: symbols}).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeSomeDeclSymbols(head interface{}, rest interface{}) (interface{}, error) {
|
||||
|
||||
var symbols []*Term
|
||||
|
||||
symbols = append(symbols, head.(*Term))
|
||||
|
||||
if sl1, ok := rest.([]interface{}); ok {
|
||||
for i := range sl1 {
|
||||
if sl2, ok := sl1[i].([]interface{}); ok {
|
||||
symbols = append(symbols, sl2[3].(*Term))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return symbols, nil
|
||||
}
|
||||
|
||||
func makeWithKeywordList(head, tail interface{}) (interface{}, error) {
|
||||
var withs []*With
|
||||
|
||||
if head == nil {
|
||||
return withs, nil
|
||||
}
|
||||
|
||||
sl := tail.([]interface{})
|
||||
|
||||
withs = make([]*With, 0, len(sl)+1)
|
||||
withs = append(withs, head.(*With))
|
||||
|
||||
for i := range sl {
|
||||
withSlice := sl[i].([]interface{})
|
||||
withs = append(withs, withSlice[1].(*With))
|
||||
}
|
||||
|
||||
return withs, nil
|
||||
}
|
||||
|
||||
func makeWithKeyword(loc *Location, target, value interface{}) (interface{}, error) {
|
||||
w := &With{
|
||||
Target: target.(*Term),
|
||||
Value: value.(*Term),
|
||||
}
|
||||
return w.SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeExprTerm(loc *Location, lhs, rest interface{}) (interface{}, error) {
|
||||
|
||||
if rest == nil {
|
||||
return lhs, nil
|
||||
}
|
||||
|
||||
sl := rest.([]interface{})
|
||||
|
||||
if len(sl) == 0 {
|
||||
return lhs, nil
|
||||
}
|
||||
|
||||
for i := range sl {
|
||||
termSlice := sl[i].([]interface{})
|
||||
call := Call{
|
||||
termSlice[1].(*Term),
|
||||
lhs.(*Term),
|
||||
termSlice[3].(*Term),
|
||||
}
|
||||
lhs = NewTerm(call).SetLocation(loc)
|
||||
}
|
||||
|
||||
return lhs, nil
|
||||
}
|
||||
|
||||
func makeCall(loc *Location, operator, args interface{}) (interface{}, error) {
|
||||
|
||||
termSlice := args.([]*Term)
|
||||
termOperator := operator.(*Term)
|
||||
|
||||
call := make(Call, len(termSlice)+1)
|
||||
|
||||
if _, ok := termOperator.Value.(Var); ok {
|
||||
termOperator = RefTerm(termOperator).SetLocation(loc)
|
||||
}
|
||||
|
||||
call[0] = termOperator
|
||||
|
||||
for i := 1; i < len(call); i++ {
|
||||
call[i] = termSlice[i-1]
|
||||
}
|
||||
|
||||
return NewTerm(call).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeBraceEnclosedBody(loc *Location, body interface{}) (interface{}, error) {
|
||||
if body != nil {
|
||||
return body, nil
|
||||
}
|
||||
return NewBody(NewExpr(ObjectTerm().SetLocation(loc)).SetLocation(loc)), nil
|
||||
}
|
||||
|
||||
func makeBody(head, tail interface{}, pos int) (interface{}, error) {
|
||||
|
||||
sl := tail.([]interface{})
|
||||
body := make(Body, len(sl)+1)
|
||||
body[0] = head.(*Expr)
|
||||
|
||||
for i := 1; i < len(body); i++ {
|
||||
body[i] = sl[i-1].([]interface{})[pos].(*Expr)
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func makeExprTermList(head, tail interface{}) (interface{}, error) {
|
||||
|
||||
var terms []*Term
|
||||
|
||||
if head == nil {
|
||||
return terms, nil
|
||||
}
|
||||
|
||||
sl := tail.([]interface{})
|
||||
|
||||
terms = make([]*Term, 0, len(sl)+1)
|
||||
terms = append(terms, head.(*Term))
|
||||
|
||||
for i := range sl {
|
||||
termSlice := sl[i].([]interface{})
|
||||
terms = append(terms, termSlice[3].(*Term))
|
||||
}
|
||||
|
||||
return terms, nil
|
||||
}
|
||||
|
||||
func makeExprTermPairList(head, tail interface{}) (interface{}, error) {
|
||||
|
||||
var terms [][2]*Term
|
||||
|
||||
if head == nil {
|
||||
return terms, nil
|
||||
}
|
||||
|
||||
sl := tail.([]interface{})
|
||||
|
||||
terms = make([][2]*Term, 0, len(sl)+1)
|
||||
terms = append(terms, head.([2]*Term))
|
||||
|
||||
for i := range sl {
|
||||
termSlice := sl[i].([]interface{})
|
||||
terms = append(terms, termSlice[3].([2]*Term))
|
||||
}
|
||||
|
||||
return terms, nil
|
||||
}
|
||||
|
||||
func makeExprTermPair(key, value interface{}) (interface{}, error) {
|
||||
return [2]*Term{key.(*Term), value.(*Term)}, nil
|
||||
}
|
||||
|
||||
func makeInfixOperator(loc *Location, text []byte) (interface{}, error) {
|
||||
op := string(text)
|
||||
for _, b := range Builtins {
|
||||
if string(b.Infix) == op {
|
||||
op = string(b.Name)
|
||||
}
|
||||
}
|
||||
operator := RefTerm(VarTerm(op).SetLocation(loc)).SetLocation(loc)
|
||||
return operator, nil
|
||||
}
|
||||
|
||||
func makeArray(loc *Location, list interface{}) (interface{}, error) {
|
||||
termSlice := list.([]*Term)
|
||||
return ArrayTerm(termSlice...).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeObject(loc *Location, list interface{}) (interface{}, error) {
|
||||
termPairSlice := list.([][2]*Term)
|
||||
return ObjectTerm(termPairSlice...).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeSet(loc *Location, list interface{}) (interface{}, error) {
|
||||
termSlice := list.([]*Term)
|
||||
return SetTerm(termSlice...).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeArrayComprehension(loc *Location, head, body interface{}) (interface{}, error) {
|
||||
return ArrayComprehensionTerm(head.(*Term), body.(Body)).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeSetComprehension(loc *Location, head, body interface{}) (interface{}, error) {
|
||||
return SetComprehensionTerm(head.(*Term), body.(Body)).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeObjectComprehension(loc *Location, head, body interface{}) (interface{}, error) {
|
||||
pair := head.([2]*Term)
|
||||
return ObjectComprehensionTerm(pair[0], pair[1], body.(Body)).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeRef(loc *Location, head, rest interface{}) (interface{}, error) {
|
||||
|
||||
headTerm := head.(*Term)
|
||||
ifaceSlice := rest.([]interface{})
|
||||
|
||||
if len(ifaceSlice) == 0 {
|
||||
return headTerm, nil
|
||||
}
|
||||
|
||||
ref := make(Ref, len(ifaceSlice)+1)
|
||||
ref[0] = headTerm
|
||||
|
||||
for i := 1; i < len(ref); i++ {
|
||||
ref[i] = ifaceSlice[i-1].(*Term)
|
||||
}
|
||||
|
||||
return NewTerm(ref).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeRefOperandDot(loc *Location, val interface{}) (interface{}, error) {
|
||||
return StringTerm(string(val.(*Term).Value.(Var))).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeVar(loc *Location, text interface{}) (interface{}, error) {
|
||||
str := string(text.([]byte))
|
||||
return VarTerm(str).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeNumber(loc *Location, text interface{}) (interface{}, error) {
|
||||
f, ok := new(big.Float).SetString(string(text.([]byte)))
|
||||
if !ok {
|
||||
// This indicates the grammar is out-of-sync with what the string
|
||||
// representation of floating point numbers. This should not be
|
||||
// possible.
|
||||
panic("illegal value")
|
||||
}
|
||||
|
||||
// Put limit on size of exponent to prevent non-linear cost of String()
|
||||
// function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068
|
||||
//
|
||||
// n == sign * mantissa * 2^exp
|
||||
// 0.5 <= mantissa < 1.0
|
||||
//
|
||||
// The limit is arbitrary.
|
||||
exp := f.MantExp(nil)
|
||||
if exp > 1e5 || exp < -1e5 {
|
||||
return nil, fmt.Errorf("number too big")
|
||||
}
|
||||
|
||||
return NumberTerm(json.Number(f.String())).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeString(loc *Location, text interface{}) (interface{}, error) {
|
||||
var v string
|
||||
err := json.Unmarshal(text.([]byte), &v)
|
||||
return StringTerm(v).SetLocation(loc), err
|
||||
}
|
||||
|
||||
func makeRawString(loc *Location, text interface{}) (interface{}, error) {
|
||||
s := string(text.([]byte))
|
||||
s = s[1 : len(s)-1] // Trim surrounding quotes.
|
||||
return StringTerm(s).SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeNonterminatedString(loc *Location, s string) (interface{}, error) {
|
||||
return StringTerm(s).SetLocation(loc), fmt.Errorf("found non-terminated string literal")
|
||||
}
|
||||
|
||||
func makeBool(loc *Location, text interface{}) (interface{}, error) {
|
||||
var term *Term
|
||||
if string(text.([]byte)) == "true" {
|
||||
term = BooleanTerm(true)
|
||||
} else {
|
||||
term = BooleanTerm(false)
|
||||
}
|
||||
return term.SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeNull(loc *Location) (interface{}, error) {
|
||||
return NullTerm().SetLocation(loc), nil
|
||||
}
|
||||
|
||||
func makeComments(c *current, text interface{}) (interface{}, error) {
|
||||
|
||||
var buf bytes.Buffer
|
||||
for _, x := range text.([]interface{}) {
|
||||
buf.Write(x.([]byte))
|
||||
}
|
||||
|
||||
comment := NewComment(buf.Bytes())
|
||||
comment.Location = currentLocation(c)
|
||||
comments := c.globalStore[commentsKey].(map[commentKey]*Comment)
|
||||
key := commentKey{
|
||||
File: comment.Location.File,
|
||||
Row: comment.Location.Row,
|
||||
Col: comment.Location.Col,
|
||||
}
|
||||
comments[key] = comment
|
||||
return comment, nil
|
||||
}
|
||||
1352
vendor/github.com/open-policy-agent/opa/ast/policy.go
generated
vendored
Normal file
1352
vendor/github.com/open-policy-agent/opa/ast/policy.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
82
vendor/github.com/open-policy-agent/opa/ast/pretty.go
generated
vendored
Normal file
82
vendor/github.com/open-policy-agent/opa/ast/pretty.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2018 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Pretty writes a pretty representation of the AST rooted at x to w.
|
||||
//
|
||||
// This is function is intended for debug purposes when inspecting ASTs.
|
||||
func Pretty(w io.Writer, x interface{}) {
|
||||
pp := &prettyPrinter{
|
||||
depth: -1,
|
||||
w: w,
|
||||
}
|
||||
NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x)
|
||||
}
|
||||
|
||||
type prettyPrinter struct {
|
||||
depth int
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) Before(x interface{}) bool {
|
||||
switch x.(type) {
|
||||
case *Term:
|
||||
default:
|
||||
pp.depth++
|
||||
}
|
||||
|
||||
switch x := x.(type) {
|
||||
case *Term:
|
||||
return false
|
||||
case Args:
|
||||
if len(x) == 0 {
|
||||
return false
|
||||
}
|
||||
pp.writeType(x)
|
||||
case *Expr:
|
||||
extras := []string{}
|
||||
if x.Negated {
|
||||
extras = append(extras, "negated")
|
||||
}
|
||||
extras = append(extras, fmt.Sprintf("index=%d", x.Index))
|
||||
pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " "))
|
||||
case Null, Boolean, Number, String, Var:
|
||||
pp.writeValue(x)
|
||||
default:
|
||||
pp.writeType(x)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) After(x interface{}) {
|
||||
switch x.(type) {
|
||||
case *Term:
|
||||
default:
|
||||
pp.depth--
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) writeValue(x interface{}) {
|
||||
pp.writeIndent(fmt.Sprint(x))
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) writeType(x interface{}) {
|
||||
pp.writeIndent(TypeName(x))
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) {
|
||||
pad := strings.Repeat(" ", pp.depth)
|
||||
pp.write(pad+f, a...)
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) write(f string, a ...interface{}) {
|
||||
fmt.Fprintf(pp.w, f+"\n", a...)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user