fix devops go vet (#1928)
Signed-off-by: runzexia <runzexia@yunify.com>
This commit is contained in:
14
go.mod
14
go.mod
@@ -65,8 +65,8 @@ require (
|
|||||||
github.com/mholt/certmagic v0.5.1 // indirect
|
github.com/mholt/certmagic v0.5.1 // indirect
|
||||||
github.com/miekg/dns v1.1.9 // indirect
|
github.com/miekg/dns v1.1.9 // indirect
|
||||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
||||||
github.com/onsi/ginkgo v1.10.1
|
github.com/onsi/ginkgo v1.8.0
|
||||||
github.com/onsi/gomega v1.7.0
|
github.com/onsi/gomega v1.5.0
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||||
github.com/openshift/api v3.9.0+incompatible // indirect
|
github.com/openshift/api v3.9.0+incompatible // indirect
|
||||||
@@ -91,13 +91,13 @@ require (
|
|||||||
gopkg.in/yaml.v2 v2.2.4
|
gopkg.in/yaml.v2 v2.2.4
|
||||||
istio.io/api v0.0.0-20191111210003-35e06ef8d838
|
istio.io/api v0.0.0-20191111210003-35e06ef8d838
|
||||||
istio.io/client-go v0.0.0-20191113122552-9bd0ba57c3d2
|
istio.io/client-go v0.0.0-20191113122552-9bd0ba57c3d2
|
||||||
k8s.io/api v0.17.0
|
k8s.io/api v0.0.0-20191114100352-16d7abae0d2a
|
||||||
k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833
|
k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833
|
||||||
k8s.io/apimachinery v0.17.0
|
k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb
|
||||||
k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682
|
k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682
|
||||||
k8s.io/client-go v0.17.0
|
k8s.io/client-go v0.0.0-20191114101535-6c5935290e33
|
||||||
k8s.io/code-generator v0.17.0
|
k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894
|
||||||
k8s.io/component-base v0.17.0
|
k8s.io/component-base v0.0.0-20191114102325-35a9586014f7
|
||||||
k8s.io/klog v1.0.0
|
k8s.io/klog v1.0.0
|
||||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a
|
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a
|
||||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f // indirect
|
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f // indirect
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
/*
|
|
||||||
|
|
||||||
Copyright 2019 The KubeSphere Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
package install
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/emicklei/go-restful"
|
|
||||||
urlruntime "k8s.io/apimachinery/pkg/util/runtime"
|
|
||||||
"kubesphere.io/kubesphere/pkg/apiserver/runtime"
|
|
||||||
"kubesphere.io/kubesphere/pkg/kapis/devops/v1alpha2"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Install(runtime.Container)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Install(container *restful.Container) {
|
|
||||||
urlruntime.Must(v1alpha2.AddToContainer(container))
|
|
||||||
}
|
|
||||||
@@ -48,7 +48,7 @@ func Test_NoScmPipelineConfig_Discarder(t *testing.T) {
|
|||||||
Description: "for test",
|
Description: "for test",
|
||||||
Jenkinsfile: "node{echo 'hello'}",
|
Jenkinsfile: "node{echo 'hello'}",
|
||||||
Discarder: &devops.DiscarderProperty{
|
Discarder: &devops.DiscarderProperty{
|
||||||
"3", "5",
|
DaysToKeep: "3", NumToKeep: "5",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -56,7 +56,7 @@ func Test_NoScmPipelineConfig_Discarder(t *testing.T) {
|
|||||||
Description: "for test",
|
Description: "for test",
|
||||||
Jenkinsfile: "node{echo 'hello'}",
|
Jenkinsfile: "node{echo 'hello'}",
|
||||||
Discarder: &devops.DiscarderProperty{
|
Discarder: &devops.DiscarderProperty{
|
||||||
"3", "",
|
DaysToKeep: "3", NumToKeep: "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -64,7 +64,7 @@ func Test_NoScmPipelineConfig_Discarder(t *testing.T) {
|
|||||||
Description: "for test",
|
Description: "for test",
|
||||||
Jenkinsfile: "node{echo 'hello'}",
|
Jenkinsfile: "node{echo 'hello'}",
|
||||||
Discarder: &devops.DiscarderProperty{
|
Discarder: &devops.DiscarderProperty{
|
||||||
"", "21321",
|
DaysToKeep: "", NumToKeep: "21321",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -72,7 +72,7 @@ func Test_NoScmPipelineConfig_Discarder(t *testing.T) {
|
|||||||
Description: "for test",
|
Description: "for test",
|
||||||
Jenkinsfile: "node{echo 'hello'}",
|
Jenkinsfile: "node{echo 'hello'}",
|
||||||
Discarder: &devops.DiscarderProperty{
|
Discarder: &devops.DiscarderProperty{
|
||||||
"", "",
|
DaysToKeep: "", NumToKeep: "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ func TestFakeS3(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
o, ok := s3.storage["hello"]
|
o, ok := s3.Storage["hello"]
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatal("should have hello object")
|
t.Fatal("should have hello object")
|
||||||
}
|
}
|
||||||
@@ -41,7 +41,7 @@ func TestFakeS3(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, ok = s3.storage["hello"]
|
_, ok = s3.Storage["hello"]
|
||||||
if ok {
|
if ok {
|
||||||
t.Fatal("should not have hello object")
|
t.Fatal("should not have hello object")
|
||||||
}
|
}
|
||||||
|
|||||||
21
vendor/github.com/MakeNowJust/heredoc/LICENSE
generated
vendored
21
vendor/github.com/MakeNowJust/heredoc/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2014-2017 TSUYUSATO Kitsune
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
53
vendor/github.com/MakeNowJust/heredoc/README.md
generated
vendored
53
vendor/github.com/MakeNowJust/heredoc/README.md
generated
vendored
@@ -1,53 +0,0 @@
|
|||||||
# heredoc [](https://circleci.com/gh/MakeNowJust/heredoc) [](https://gowalker.org/github.com/MakeNowJust/heredoc)
|
|
||||||
|
|
||||||
## About
|
|
||||||
|
|
||||||
Package heredoc provides the here-document with keeping indent.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ go get github.com/MakeNowJust/heredoc
|
|
||||||
```
|
|
||||||
|
|
||||||
## Import
|
|
||||||
|
|
||||||
```go
|
|
||||||
// usual
|
|
||||||
import "github.com/MakeNowJust/heredoc"
|
|
||||||
// shortcuts
|
|
||||||
import . "github.com/MakeNowJust/heredoc/dot"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
. "github.com/MakeNowJust/heredoc/dot"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Println(D(`
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipisicing elit,
|
|
||||||
sed do eiusmod tempor incididunt ut labore et dolore magna
|
|
||||||
aliqua. Ut enim ad minim veniam, ...
|
|
||||||
`))
|
|
||||||
// Output:
|
|
||||||
// Lorem ipsum dolor sit amet, consectetur adipisicing elit,
|
|
||||||
// sed do eiusmod tempor incididunt ut labore et dolore magna
|
|
||||||
// aliqua. Ut enim ad minim veniam, ...
|
|
||||||
//
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## API Document
|
|
||||||
|
|
||||||
- [Go Walker - github.com/MakeNowJust/heredoc](https://gowalker.org/github.com/MakeNowJust/heredoc)
|
|
||||||
- [Go Walker - github.com/MakeNowJust/heredoc/dot](https://gowalker.org/github.com/MakeNowJust/heredoc/dot)
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
This software is released under the MIT License, see LICENSE.
|
|
||||||
98
vendor/github.com/MakeNowJust/heredoc/heredoc.go
generated
vendored
98
vendor/github.com/MakeNowJust/heredoc/heredoc.go
generated
vendored
@@ -1,98 +0,0 @@
|
|||||||
// Copyright (c) 2014-2017 TSUYUSATO Kitsune
|
|
||||||
// This software is released under the MIT License.
|
|
||||||
// http://opensource.org/licenses/mit-license.php
|
|
||||||
|
|
||||||
// Package heredoc provides creation of here-documents from raw strings.
|
|
||||||
//
|
|
||||||
// Golang supports raw-string syntax.
|
|
||||||
// doc := `
|
|
||||||
// Foo
|
|
||||||
// Bar
|
|
||||||
// `
|
|
||||||
// But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to
|
|
||||||
// "\n\tFoo\n\tBar\n"
|
|
||||||
// I dont't want this!
|
|
||||||
//
|
|
||||||
// However this problem is solved by package heredoc.
|
|
||||||
// doc := heredoc.Doc(`
|
|
||||||
// Foo
|
|
||||||
// Bar
|
|
||||||
// `)
|
|
||||||
// Is equivalent to
|
|
||||||
// "Foo\nBar\n"
|
|
||||||
package heredoc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
const maxInt = int(^uint(0) >> 1)
|
|
||||||
|
|
||||||
// Doc returns un-indented string as here-document.
|
|
||||||
func Doc(raw string) string {
|
|
||||||
skipFirstLine := false
|
|
||||||
if raw[0] == '\n' {
|
|
||||||
raw = raw[1:]
|
|
||||||
} else {
|
|
||||||
skipFirstLine = true
|
|
||||||
}
|
|
||||||
|
|
||||||
lines := strings.Split(raw, "\n")
|
|
||||||
|
|
||||||
minIndentSize := getMinIndent(lines, skipFirstLine)
|
|
||||||
lines = removeIndentation(lines, minIndentSize, skipFirstLine)
|
|
||||||
|
|
||||||
return strings.Join(lines, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// getMinIndent calculates the minimum indentation in lines, excluding empty lines.
|
|
||||||
func getMinIndent(lines []string, skipFirstLine bool) int {
|
|
||||||
minIndentSize := maxInt
|
|
||||||
|
|
||||||
for i, line := range lines {
|
|
||||||
if i == 0 && skipFirstLine {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
indentSize := 0
|
|
||||||
for _, r := range []rune(line) {
|
|
||||||
if unicode.IsSpace(r) {
|
|
||||||
indentSize += 1
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(line) == indentSize {
|
|
||||||
if i == len(lines)-1 && indentSize < minIndentSize {
|
|
||||||
lines[i] = ""
|
|
||||||
}
|
|
||||||
} else if indentSize < minIndentSize {
|
|
||||||
minIndentSize = indentSize
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return minIndentSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeIndentation removes n characters from the front of each line in lines.
|
|
||||||
// Skips first line if skipFirstLine is true, skips empty lines.
|
|
||||||
func removeIndentation(lines []string, n int, skipFirstLine bool) []string {
|
|
||||||
for i, line := range lines {
|
|
||||||
if i == 0 && skipFirstLine {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(lines[i]) >= n {
|
|
||||||
lines[i] = line[n:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lines
|
|
||||||
}
|
|
||||||
|
|
||||||
// Docf returns unindented and formatted string as here-document.
|
|
||||||
// Formatting is done as for fmt.Printf().
|
|
||||||
func Docf(raw string, args ...interface{}) string {
|
|
||||||
return fmt.Sprintf(Doc(raw), args...)
|
|
||||||
}
|
|
||||||
27
vendor/github.com/chai2010/gettext-go/LICENSE
generated
vendored
27
vendor/github.com/chai2010/gettext-go/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
|||||||
Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
39
vendor/github.com/chai2010/gettext-go/gettext/caller.go
generated
vendored
39
vendor/github.com/chai2010/gettext-go/gettext/caller.go
generated
vendored
@@ -1,39 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package gettext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
reInit = regexp.MustCompile(`init·\d+$`) // main.init·1
|
|
||||||
reClosure = regexp.MustCompile(`func·\d+$`) // main.func·001
|
|
||||||
)
|
|
||||||
|
|
||||||
// caller types:
|
|
||||||
// runtime.goexit
|
|
||||||
// runtime.main
|
|
||||||
// main.init
|
|
||||||
// main.main
|
|
||||||
// main.init·1 -> main.init
|
|
||||||
// main.func·001 -> main.func
|
|
||||||
// code.google.com/p/gettext-go/gettext.TestCallerName
|
|
||||||
// ...
|
|
||||||
func callerName(skip int) string {
|
|
||||||
pc, _, _, ok := runtime.Caller(skip)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
name := runtime.FuncForPC(pc).Name()
|
|
||||||
if reInit.MatchString(name) {
|
|
||||||
return reInit.ReplaceAllString(name, "init")
|
|
||||||
}
|
|
||||||
if reClosure.MatchString(name) {
|
|
||||||
return reClosure.ReplaceAllString(name, "func")
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
66
vendor/github.com/chai2010/gettext-go/gettext/doc.go
generated
vendored
66
vendor/github.com/chai2010/gettext-go/gettext/doc.go
generated
vendored
@@ -1,66 +0,0 @@
|
|||||||
// Copyright 2013 <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package gettext implements a basic GNU's gettext library.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
import (
|
|
||||||
"github.com/chai2010/gettext-go/gettext"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
gettext.SetLocale("zh_CN")
|
|
||||||
gettext.Textdomain("hello")
|
|
||||||
|
|
||||||
// gettext.BindTextdomain("hello", "local", nil) // from local dir
|
|
||||||
// gettext.BindTextdomain("hello", "local.zip", nil) // from local zip file
|
|
||||||
// gettext.BindTextdomain("hello", "local.zip", zipData) // from embedded zip data
|
|
||||||
|
|
||||||
gettext.BindTextdomain("hello", "local", nil)
|
|
||||||
|
|
||||||
// translate source text
|
|
||||||
fmt.Println(gettext.Gettext("Hello, world!"))
|
|
||||||
// Output: 你好, 世界!
|
|
||||||
|
|
||||||
// translate resource
|
|
||||||
fmt.Println(string(gettext.Getdata("poems.txt")))
|
|
||||||
// Output: ...
|
|
||||||
}
|
|
||||||
|
|
||||||
Translate directory struct("../examples/local.zip"):
|
|
||||||
|
|
||||||
Root: "path" or "file.zip/zipBaseName"
|
|
||||||
+-default # local: $(LC_MESSAGES) or $(LANG) or "default"
|
|
||||||
| +-LC_MESSAGES # just for `gettext.Gettext`
|
|
||||||
| | +-hello.mo # $(Root)/$(local)/LC_MESSAGES/$(domain).mo
|
|
||||||
| | \-hello.po # $(Root)/$(local)/LC_MESSAGES/$(domain).mo
|
|
||||||
| |
|
|
||||||
| \-LC_RESOURCE # just for `gettext.Getdata`
|
|
||||||
| +-hello # domain map a dir in resource translate
|
|
||||||
| +-favicon.ico # $(Root)/$(local)/LC_RESOURCE/$(domain)/$(filename)
|
|
||||||
| \-poems.txt
|
|
||||||
|
|
|
||||||
\-zh_CN # simple chinese translate
|
|
||||||
+-LC_MESSAGES
|
|
||||||
| +-hello.mo # try "$(domain).mo" first
|
|
||||||
| \-hello.po # try "$(domain).po" second
|
|
||||||
|
|
|
||||||
\-LC_RESOURCE
|
|
||||||
+-hello
|
|
||||||
+-favicon.ico # try "$(local)/$(domain)/file" first
|
|
||||||
\-poems.txt # try "default/$(domain)/file" second
|
|
||||||
|
|
||||||
See:
|
|
||||||
http://en.wikipedia.org/wiki/Gettext
|
|
||||||
http://www.gnu.org/software/gettext/manual/html_node
|
|
||||||
http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html
|
|
||||||
http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html
|
|
||||||
http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html
|
|
||||||
http://www.poedit.net/
|
|
||||||
|
|
||||||
Please report bugs to <chaishushan{AT}gmail.com>.
|
|
||||||
Thanks!
|
|
||||||
*/
|
|
||||||
package gettext
|
|
||||||
119
vendor/github.com/chai2010/gettext-go/gettext/domain.go
generated
vendored
119
vendor/github.com/chai2010/gettext-go/gettext/domain.go
generated
vendored
@@ -1,119 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package gettext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type domainManager struct {
|
|
||||||
mutex sync.Mutex
|
|
||||||
locale string
|
|
||||||
domain string
|
|
||||||
domainMap map[string]*fileSystem
|
|
||||||
trTextMap map[string]*translator
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDomainManager() *domainManager {
|
|
||||||
return &domainManager{
|
|
||||||
locale: DefaultLocale,
|
|
||||||
domainMap: make(map[string]*fileSystem),
|
|
||||||
trTextMap: make(map[string]*translator),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) makeTrMapKey(domain, locale string) string {
|
|
||||||
return domain + "_$$$_" + locale
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) Bind(domain, path string, data []byte) (domains, paths []string) {
|
|
||||||
p.mutex.Lock()
|
|
||||||
defer p.mutex.Unlock()
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case domain != "" && path != "": // bind new domain
|
|
||||||
p.bindDomainTranslators(domain, path, data)
|
|
||||||
case domain != "" && path == "": // delete domain
|
|
||||||
p.deleteDomain(domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return all bind domain
|
|
||||||
for k, fs := range p.domainMap {
|
|
||||||
domains = append(domains, k)
|
|
||||||
paths = append(paths, fs.FsName)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) SetLocale(locale string) string {
|
|
||||||
p.mutex.Lock()
|
|
||||||
defer p.mutex.Unlock()
|
|
||||||
if locale != "" {
|
|
||||||
p.locale = locale
|
|
||||||
}
|
|
||||||
return p.locale
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) SetDomain(domain string) string {
|
|
||||||
p.mutex.Lock()
|
|
||||||
defer p.mutex.Unlock()
|
|
||||||
if domain != "" {
|
|
||||||
p.domain = domain
|
|
||||||
}
|
|
||||||
return p.domain
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) Getdata(name string) []byte {
|
|
||||||
return p.getdata(p.domain, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) DGetdata(domain, name string) []byte {
|
|
||||||
return p.getdata(domain, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) PNGettext(msgctxt, msgid, msgidPlural string, n int) string {
|
|
||||||
p.mutex.Lock()
|
|
||||||
defer p.mutex.Unlock()
|
|
||||||
return p.gettext(p.domain, msgctxt, msgid, msgidPlural, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string {
|
|
||||||
p.mutex.Lock()
|
|
||||||
defer p.mutex.Unlock()
|
|
||||||
return p.gettext(domain, msgctxt, msgid, msgidPlural, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) gettext(domain, msgctxt, msgid, msgidPlural string, n int) string {
|
|
||||||
if p.locale == "" || p.domain == "" {
|
|
||||||
return msgid
|
|
||||||
}
|
|
||||||
if _, ok := p.domainMap[domain]; !ok {
|
|
||||||
return msgid
|
|
||||||
}
|
|
||||||
if f, ok := p.trTextMap[p.makeTrMapKey(domain, p.locale)]; ok {
|
|
||||||
return f.PNGettext(msgctxt, msgid, msgidPlural, n)
|
|
||||||
}
|
|
||||||
return msgid
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) getdata(domain, name string) []byte {
|
|
||||||
if p.locale == "" || p.domain == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if _, ok := p.domainMap[domain]; !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if fs, ok := p.domainMap[domain]; ok {
|
|
||||||
if data, err := fs.LoadResourceFile(domain, p.locale, name); err == nil {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
if p.locale != "default" {
|
|
||||||
if data, err := fs.LoadResourceFile(domain, "default", name); err == nil {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
50
vendor/github.com/chai2010/gettext-go/gettext/domain_helper.go
generated
vendored
50
vendor/github.com/chai2010/gettext-go/gettext/domain_helper.go
generated
vendored
@@ -1,50 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package gettext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (p *domainManager) bindDomainTranslators(domain, path string, data []byte) {
|
|
||||||
if _, ok := p.domainMap[domain]; ok {
|
|
||||||
p.deleteDomain(domain) // delete old domain
|
|
||||||
}
|
|
||||||
fs := newFileSystem(path, data)
|
|
||||||
for locale, _ := range fs.LocaleMap {
|
|
||||||
trMapKey := p.makeTrMapKey(domain, locale)
|
|
||||||
if data, err := fs.LoadMessagesFile(domain, locale, ".mo"); err == nil {
|
|
||||||
p.trTextMap[trMapKey], _ = newMoTranslator(
|
|
||||||
fmt.Sprintf("%s_%s.mo", domain, locale),
|
|
||||||
data,
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if data, err := fs.LoadMessagesFile(domain, locale, ".po"); err == nil {
|
|
||||||
p.trTextMap[trMapKey], _ = newPoTranslator(
|
|
||||||
fmt.Sprintf("%s_%s.po", domain, locale),
|
|
||||||
data,
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
p.trTextMap[p.makeTrMapKey(domain, locale)] = nilTranslator
|
|
||||||
}
|
|
||||||
p.domainMap[domain] = fs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *domainManager) deleteDomain(domain string) {
|
|
||||||
if _, ok := p.domainMap[domain]; !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// delete all mo files
|
|
||||||
trMapKeyPrefix := p.makeTrMapKey(domain, "")
|
|
||||||
for k, _ := range p.trTextMap {
|
|
||||||
if strings.HasPrefix(k, trMapKeyPrefix) {
|
|
||||||
delete(p.trTextMap, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(p.domainMap, domain)
|
|
||||||
}
|
|
||||||
187
vendor/github.com/chai2010/gettext-go/gettext/fs.go
generated
vendored
187
vendor/github.com/chai2010/gettext-go/gettext/fs.go
generated
vendored
@@ -1,187 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package gettext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type fileSystem struct {
|
|
||||||
FsName string
|
|
||||||
FsRoot string
|
|
||||||
FsZipData []byte
|
|
||||||
LocaleMap map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFileSystem(path string, data []byte) *fileSystem {
|
|
||||||
fs := &fileSystem{
|
|
||||||
FsName: path,
|
|
||||||
FsZipData: data,
|
|
||||||
}
|
|
||||||
if err := fs.init(); err != nil {
|
|
||||||
log.Printf("gettext-go: invalid domain, err = %v", err)
|
|
||||||
}
|
|
||||||
return fs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *fileSystem) init() error {
|
|
||||||
zipName := func(name string) string {
|
|
||||||
if x := strings.LastIndexAny(name, `\/`); x != -1 {
|
|
||||||
name = name[x+1:]
|
|
||||||
}
|
|
||||||
name = strings.TrimSuffix(name, ".zip")
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// zip data
|
|
||||||
if len(p.FsZipData) != 0 {
|
|
||||||
p.FsRoot = zipName(p.FsName)
|
|
||||||
p.LocaleMap = p.lsZip(p.FsZipData)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// local dir or zip file
|
|
||||||
fi, err := os.Stat(p.FsName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// local dir
|
|
||||||
if fi.IsDir() {
|
|
||||||
p.FsRoot = p.FsName
|
|
||||||
p.LocaleMap = p.lsDir(p.FsName)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// local zip file
|
|
||||||
p.FsZipData, err = ioutil.ReadFile(p.FsName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.FsRoot = zipName(p.FsName)
|
|
||||||
p.LocaleMap = p.lsZip(p.FsZipData)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *fileSystem) LoadMessagesFile(domain, local, ext string) ([]byte, error) {
|
|
||||||
if len(p.FsZipData) == 0 {
|
|
||||||
trName := p.makeMessagesFileName(domain, local, ext)
|
|
||||||
rcData, err := ioutil.ReadFile(trName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rcData, nil
|
|
||||||
} else {
|
|
||||||
r, err := zip.NewReader(bytes.NewReader(p.FsZipData), int64(len(p.FsZipData)))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
trName := p.makeMessagesFileName(domain, local, ext)
|
|
||||||
for _, f := range r.File {
|
|
||||||
if f.Name != trName {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rc, err := f.Open()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rcData, err := ioutil.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
return rcData, err
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("not found")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *fileSystem) LoadResourceFile(domain, local, name string) ([]byte, error) {
|
|
||||||
if len(p.FsZipData) == 0 {
|
|
||||||
rcName := p.makeResourceFileName(domain, local, name)
|
|
||||||
rcData, err := ioutil.ReadFile(rcName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return rcData, nil
|
|
||||||
} else {
|
|
||||||
r, err := zip.NewReader(bytes.NewReader(p.FsZipData), int64(len(p.FsZipData)))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rcName := p.makeResourceFileName(domain, local, name)
|
|
||||||
for _, f := range r.File {
|
|
||||||
if f.Name != rcName {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rc, err := f.Open()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rcData, err := ioutil.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
return rcData, err
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("not found")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *fileSystem) makeMessagesFileName(domain, local, ext string) string {
|
|
||||||
return fmt.Sprintf("%s/%s/LC_MESSAGES/%s%s", p.FsRoot, local, domain, ext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *fileSystem) makeResourceFileName(domain, local, name string) string {
|
|
||||||
return fmt.Sprintf("%s/%s/LC_RESOURCE/%s/%s", p.FsRoot, local, domain, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *fileSystem) lsZip(data []byte) map[string]bool {
|
|
||||||
r, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ssMap := make(map[string]bool)
|
|
||||||
for _, f := range r.File {
|
|
||||||
if x := strings.Index(f.Name, "LC_MESSAGES"); x != -1 {
|
|
||||||
s := strings.TrimRight(f.Name[:x], `\/`)
|
|
||||||
if x = strings.LastIndexAny(s, `\/`); x != -1 {
|
|
||||||
s = s[x+1:]
|
|
||||||
}
|
|
||||||
if s != "" {
|
|
||||||
ssMap[s] = true
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if x := strings.Index(f.Name, "LC_RESOURCE"); x != -1 {
|
|
||||||
s := strings.TrimRight(f.Name[:x], `\/`)
|
|
||||||
if x = strings.LastIndexAny(s, `\/`); x != -1 {
|
|
||||||
s = s[x+1:]
|
|
||||||
}
|
|
||||||
if s != "" {
|
|
||||||
ssMap[s] = true
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ssMap
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *fileSystem) lsDir(path string) map[string]bool {
|
|
||||||
list, err := ioutil.ReadDir(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ssMap := make(map[string]bool)
|
|
||||||
for _, dir := range list {
|
|
||||||
if dir.IsDir() {
|
|
||||||
ssMap[dir.Name()] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ssMap
|
|
||||||
}
|
|
||||||
184
vendor/github.com/chai2010/gettext-go/gettext/gettext.go
generated
vendored
184
vendor/github.com/chai2010/gettext-go/gettext/gettext.go
generated
vendored
@@ -1,184 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package gettext
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultManager = newDomainManager()
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
DefaultLocale = getDefaultLocale() // use $(LC_MESSAGES) or $(LANG) or "default"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetLocale sets and queries the program's current locale.
|
|
||||||
//
|
|
||||||
// If the locale is not empty string, set the new local.
|
|
||||||
//
|
|
||||||
// If the locale is empty string, don't change anything.
|
|
||||||
//
|
|
||||||
// Returns is the current locale.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// SetLocale("") // get locale: return DefaultLocale
|
|
||||||
// SetLocale("zh_CN") // set locale: return zh_CN
|
|
||||||
// SetLocale("") // get locale: return zh_CN
|
|
||||||
func SetLocale(locale string) string {
|
|
||||||
return defaultManager.SetLocale(locale)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BindTextdomain sets and queries program's domains.
|
|
||||||
//
|
|
||||||
// If the domain and path are all not empty string, bind the new domain.
|
|
||||||
// If the domain already exists, return error.
|
|
||||||
//
|
|
||||||
// If the domain is not empty string, but the path is the empty string,
|
|
||||||
// delete the domain.
|
|
||||||
// If the domain don't exists, return error.
|
|
||||||
//
|
|
||||||
// If the domain and the path are all empty string, don't change anything.
|
|
||||||
//
|
|
||||||
// Returns is the all bind domains.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// BindTextdomain("poedit", "local", nil) // bind "poedit" domain
|
|
||||||
// BindTextdomain("", "", nil) // return all domains
|
|
||||||
// BindTextdomain("poedit", "", nil) // delete "poedit" domain
|
|
||||||
// BindTextdomain("", "", nil) // return all domains
|
|
||||||
//
|
|
||||||
// Use zip file:
|
|
||||||
// BindTextdomain("poedit", "local.zip", nil) // bind "poedit" domain
|
|
||||||
// BindTextdomain("poedit", "local.zip", zipData) // bind "poedit" domain
|
|
||||||
//
|
|
||||||
func BindTextdomain(domain, path string, zipData []byte) (domains, paths []string) {
|
|
||||||
return defaultManager.Bind(domain, path, zipData)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Textdomain sets and retrieves the current message domain.
|
|
||||||
//
|
|
||||||
// If the domain is not empty string, set the new domains.
|
|
||||||
//
|
|
||||||
// If the domain is empty string, don't change anything.
|
|
||||||
//
|
|
||||||
// Returns is the all used domains.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// Textdomain("poedit") // set domain: poedit
|
|
||||||
// Textdomain("") // get domain: return poedit
|
|
||||||
func Textdomain(domain string) string {
|
|
||||||
return defaultManager.SetDomain(domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gettext attempt to translate a text string into the user's native language,
|
|
||||||
// by looking up the translation in a message catalog.
|
|
||||||
//
|
|
||||||
// It use the caller's function name as the msgctxt.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// msg := gettext.Gettext("Hello") // msgctxt is "some/package/name.Foo"
|
|
||||||
// }
|
|
||||||
func Gettext(msgid string) string {
|
|
||||||
return PGettext(callerName(2), msgid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Getdata attempt to translate a resource file into the user's native language,
|
|
||||||
// by looking up the translation in a message catalog.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// Textdomain("hello")
|
|
||||||
// BindTextdomain("hello", "local.zip", nilOrZipData)
|
|
||||||
// poems := gettext.Getdata("poems.txt")
|
|
||||||
// }
|
|
||||||
func Getdata(name string) []byte {
|
|
||||||
return defaultManager.Getdata(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NGettext attempt to translate a text string into the user's native language,
|
|
||||||
// by looking up the appropriate plural form of the translation in a message
|
|
||||||
// catalog.
|
|
||||||
//
|
|
||||||
// It use the caller's function name as the msgctxt.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// msg := gettext.NGettext("%d people", "%d peoples", 2)
|
|
||||||
// }
|
|
||||||
func NGettext(msgid, msgidPlural string, n int) string {
|
|
||||||
return PNGettext(callerName(2), msgid, msgidPlural, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PGettext attempt to translate a text string into the user's native language,
|
|
||||||
// by looking up the translation in a message catalog.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// msg := gettext.PGettext("gettext-go.example", "Hello") // msgctxt is "gettext-go.example"
|
|
||||||
// }
|
|
||||||
func PGettext(msgctxt, msgid string) string {
|
|
||||||
return PNGettext(msgctxt, msgid, "", 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PNGettext attempt to translate a text string into the user's native language,
|
|
||||||
// by looking up the appropriate plural form of the translation in a message
|
|
||||||
// catalog.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// msg := gettext.PNGettext("gettext-go.example", "%d people", "%d peoples", 2)
|
|
||||||
// }
|
|
||||||
func PNGettext(msgctxt, msgid, msgidPlural string, n int) string {
|
|
||||||
return defaultManager.PNGettext(msgctxt, msgid, msgidPlural, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DGettext like Gettext(), but looking up the message in the specified domain.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// msg := gettext.DGettext("poedit", "Hello")
|
|
||||||
// }
|
|
||||||
func DGettext(domain, msgid string) string {
|
|
||||||
return DPGettext(domain, callerName(2), msgid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DNGettext like NGettext(), but looking up the message in the specified domain.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// msg := gettext.PNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2)
|
|
||||||
// }
|
|
||||||
func DNGettext(domain, msgid, msgidPlural string, n int) string {
|
|
||||||
return DPNGettext(domain, callerName(2), msgid, msgidPlural, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DPGettext like PGettext(), but looking up the message in the specified domain.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// msg := gettext.DPGettext("poedit", "gettext-go.example", "Hello")
|
|
||||||
// }
|
|
||||||
func DPGettext(domain, msgctxt, msgid string) string {
|
|
||||||
return DPNGettext(domain, msgctxt, msgid, "", 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DPNGettext like PNGettext(), but looking up the message in the specified domain.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// msg := gettext.DPNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2)
|
|
||||||
// }
|
|
||||||
func DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string {
|
|
||||||
return defaultManager.DPNGettext(domain, msgctxt, msgid, msgidPlural, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DGetdata like Getdata(), but looking up the resource in the specified domain.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
// func Foo() {
|
|
||||||
// msg := gettext.DGetdata("hello", "poems.txt")
|
|
||||||
// }
|
|
||||||
func DGetdata(domain, name string) []byte {
|
|
||||||
return defaultManager.DGetdata(domain, name)
|
|
||||||
}
|
|
||||||
34
vendor/github.com/chai2010/gettext-go/gettext/local.go
generated
vendored
34
vendor/github.com/chai2010/gettext-go/gettext/local.go
generated
vendored
@@ -1,34 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package gettext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getDefaultLocale() string {
|
|
||||||
if v := os.Getenv("LC_MESSAGES"); v != "" {
|
|
||||||
return simplifiedLocale(v)
|
|
||||||
}
|
|
||||||
if v := os.Getenv("LANG"); v != "" {
|
|
||||||
return simplifiedLocale(v)
|
|
||||||
}
|
|
||||||
return "default"
|
|
||||||
}
|
|
||||||
|
|
||||||
func simplifiedLocale(lang string) string {
|
|
||||||
// en_US/en_US.UTF-8/zh_CN/zh_TW/el_GR@euro/...
|
|
||||||
if idx := strings.Index(lang, ":"); idx != -1 {
|
|
||||||
lang = lang[:idx]
|
|
||||||
}
|
|
||||||
if idx := strings.Index(lang, "@"); idx != -1 {
|
|
||||||
lang = lang[:idx]
|
|
||||||
}
|
|
||||||
if idx := strings.Index(lang, "."); idx != -1 {
|
|
||||||
lang = lang[:idx]
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(lang)
|
|
||||||
}
|
|
||||||
74
vendor/github.com/chai2010/gettext-go/gettext/mo/doc.go
generated
vendored
74
vendor/github.com/chai2010/gettext-go/gettext/mo/doc.go
generated
vendored
@@ -1,74 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package mo provides support for reading and writing GNU MO file.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
import (
|
|
||||||
"github.com/chai2010/gettext-go/gettext/mo"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
moFile, err := mo.Load("test.mo")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("%v", moFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
GNU MO file struct:
|
|
||||||
|
|
||||||
byte
|
|
||||||
+------------------------------------------+
|
|
||||||
0 | magic number = 0x950412de |
|
|
||||||
| |
|
|
||||||
4 | file format revision = 0 |
|
|
||||||
| |
|
|
||||||
8 | number of strings | == N
|
|
||||||
| |
|
|
||||||
12 | offset of table with original strings | == O
|
|
||||||
| |
|
|
||||||
16 | offset of table with translation strings | == T
|
|
||||||
| |
|
|
||||||
20 | size of hashing table | == S
|
|
||||||
| |
|
|
||||||
24 | offset of hashing table | == H
|
|
||||||
| |
|
|
||||||
. .
|
|
||||||
. (possibly more entries later) .
|
|
||||||
. .
|
|
||||||
| |
|
|
||||||
O | length & offset 0th string ----------------.
|
|
||||||
O + 8 | length & offset 1st string ------------------.
|
|
||||||
... ... | |
|
|
||||||
O + ((N-1)*8)| length & offset (N-1)th string | | |
|
|
||||||
| | | |
|
|
||||||
T | length & offset 0th translation ---------------.
|
|
||||||
T + 8 | length & offset 1st translation -----------------.
|
|
||||||
... ... | | | |
|
|
||||||
T + ((N-1)*8)| length & offset (N-1)th translation | | | | |
|
|
||||||
| | | | | |
|
|
||||||
H | start hash table | | | | |
|
|
||||||
... ... | | | |
|
|
||||||
H + S * 4 | end hash table | | | | |
|
|
||||||
| | | | | |
|
|
||||||
| NUL terminated 0th string <----------------' | | |
|
|
||||||
| | | | |
|
|
||||||
| NUL terminated 1st string <------------------' | |
|
|
||||||
| | | |
|
|
||||||
... ... | |
|
|
||||||
| | | |
|
|
||||||
| NUL terminated 0th translation <---------------' |
|
|
||||||
| | |
|
|
||||||
| NUL terminated 1st translation <-----------------'
|
|
||||||
| |
|
|
||||||
... ...
|
|
||||||
| |
|
|
||||||
+------------------------------------------+
|
|
||||||
|
|
||||||
The GNU MO file specification is at
|
|
||||||
http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html.
|
|
||||||
*/
|
|
||||||
package mo
|
|
||||||
124
vendor/github.com/chai2010/gettext-go/gettext/mo/encoder.go
generated
vendored
124
vendor/github.com/chai2010/gettext-go/gettext/mo/encoder.go
generated
vendored
@@ -1,124 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package mo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type moHeader struct {
|
|
||||||
MagicNumber uint32
|
|
||||||
MajorVersion uint16
|
|
||||||
MinorVersion uint16
|
|
||||||
MsgIdCount uint32
|
|
||||||
MsgIdOffset uint32
|
|
||||||
MsgStrOffset uint32
|
|
||||||
HashSize uint32
|
|
||||||
HashOffset uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
type moStrPos struct {
|
|
||||||
Size uint32 // must keep fields order
|
|
||||||
Addr uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeFile(f *File) []byte {
|
|
||||||
hdr := &moHeader{
|
|
||||||
MagicNumber: MoMagicLittleEndian,
|
|
||||||
}
|
|
||||||
data := encodeData(hdr, f)
|
|
||||||
data = append(encodeHeader(hdr), data...)
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode data and init moHeader
|
|
||||||
func encodeData(hdr *moHeader, f *File) []byte {
|
|
||||||
msgList := []Message{f.MimeHeader.toMessage()}
|
|
||||||
for _, v := range f.Messages {
|
|
||||||
if len(v.MsgId) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(v.MsgStr) == 0 && len(v.MsgStrPlural) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
msgList = append(msgList, v)
|
|
||||||
}
|
|
||||||
sort.Sort(byMessages(msgList))
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
var msgIdPosList = make([]moStrPos, len(msgList))
|
|
||||||
var msgStrPosList = make([]moStrPos, len(msgList))
|
|
||||||
for i, v := range msgList {
|
|
||||||
// write msgid
|
|
||||||
msgId := encodeMsgId(v)
|
|
||||||
msgIdPosList[i].Addr = uint32(buf.Len() + MoHeaderSize)
|
|
||||||
msgIdPosList[i].Size = uint32(len(msgId))
|
|
||||||
buf.WriteString(msgId)
|
|
||||||
// write msgstr
|
|
||||||
msgStr := encodeMsgStr(v)
|
|
||||||
msgStrPosList[i].Addr = uint32(buf.Len() + MoHeaderSize)
|
|
||||||
msgStrPosList[i].Size = uint32(len(msgStr))
|
|
||||||
buf.WriteString(msgStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr.MsgIdOffset = uint32(buf.Len() + MoHeaderSize)
|
|
||||||
binary.Write(&buf, binary.LittleEndian, msgIdPosList)
|
|
||||||
hdr.MsgStrOffset = uint32(buf.Len() + MoHeaderSize)
|
|
||||||
binary.Write(&buf, binary.LittleEndian, msgStrPosList)
|
|
||||||
|
|
||||||
hdr.MsgIdCount = uint32(len(msgList))
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// must called after encodeData
|
|
||||||
func encodeHeader(hdr *moHeader) []byte {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
binary.Write(&buf, binary.LittleEndian, hdr)
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeMsgId(v Message) string {
|
|
||||||
if v.MsgContext != "" && v.MsgIdPlural != "" {
|
|
||||||
return v.MsgContext + EotSeparator + v.MsgId + NulSeparator + v.MsgIdPlural
|
|
||||||
}
|
|
||||||
if v.MsgContext != "" && v.MsgIdPlural == "" {
|
|
||||||
return v.MsgContext + EotSeparator + v.MsgId
|
|
||||||
}
|
|
||||||
if v.MsgContext == "" && v.MsgIdPlural != "" {
|
|
||||||
return v.MsgId + NulSeparator + v.MsgIdPlural
|
|
||||||
}
|
|
||||||
return v.MsgId
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeMsgStr(v Message) string {
|
|
||||||
if v.MsgIdPlural != "" {
|
|
||||||
return strings.Join(v.MsgStrPlural, NulSeparator)
|
|
||||||
}
|
|
||||||
return v.MsgStr
|
|
||||||
}
|
|
||||||
|
|
||||||
type byMessages []Message
|
|
||||||
|
|
||||||
func (d byMessages) Len() int {
|
|
||||||
return len(d)
|
|
||||||
}
|
|
||||||
func (d byMessages) Less(i, j int) bool {
|
|
||||||
if a, b := d[i].MsgContext, d[j].MsgContext; a != b {
|
|
||||||
return a < b
|
|
||||||
}
|
|
||||||
if a, b := d[i].MsgId, d[j].MsgId; a != b {
|
|
||||||
return a < b
|
|
||||||
}
|
|
||||||
if a, b := d[i].MsgIdPlural, d[j].MsgIdPlural; a != b {
|
|
||||||
return a < b
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
func (d byMessages) Swap(i, j int) {
|
|
||||||
d[i], d[j] = d[j], d[i]
|
|
||||||
}
|
|
||||||
193
vendor/github.com/chai2010/gettext-go/gettext/mo/file.go
generated
vendored
193
vendor/github.com/chai2010/gettext-go/gettext/mo/file.go
generated
vendored
@@ -1,193 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package mo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
MoHeaderSize = 28
|
|
||||||
MoMagicLittleEndian = 0x950412de
|
|
||||||
MoMagicBigEndian = 0xde120495
|
|
||||||
|
|
||||||
EotSeparator = "\x04" // msgctxt and msgid separator
|
|
||||||
NulSeparator = "\x00" // msgid and msgstr separator
|
|
||||||
)
|
|
||||||
|
|
||||||
// File represents an MO File.
|
|
||||||
//
|
|
||||||
// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html
|
|
||||||
type File struct {
|
|
||||||
MagicNumber uint32
|
|
||||||
MajorVersion uint16
|
|
||||||
MinorVersion uint16
|
|
||||||
MsgIdCount uint32
|
|
||||||
MsgIdOffset uint32
|
|
||||||
MsgStrOffset uint32
|
|
||||||
HashSize uint32
|
|
||||||
HashOffset uint32
|
|
||||||
MimeHeader Header
|
|
||||||
Messages []Message
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load loads a named mo file.
|
|
||||||
func Load(name string) (*File, error) {
|
|
||||||
data, err := ioutil.ReadFile(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return LoadData(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadData loads mo file format data.
|
|
||||||
func LoadData(data []byte) (*File, error) {
|
|
||||||
r := bytes.NewReader(data)
|
|
||||||
|
|
||||||
var magicNumber uint32
|
|
||||||
if err := binary.Read(r, binary.LittleEndian, &magicNumber); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
var bo binary.ByteOrder
|
|
||||||
switch magicNumber {
|
|
||||||
case MoMagicLittleEndian:
|
|
||||||
bo = binary.LittleEndian
|
|
||||||
case MoMagicBigEndian:
|
|
||||||
bo = binary.BigEndian
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("gettext: %v", "invalid magic number")
|
|
||||||
}
|
|
||||||
|
|
||||||
var header struct {
|
|
||||||
MajorVersion uint16
|
|
||||||
MinorVersion uint16
|
|
||||||
MsgIdCount uint32
|
|
||||||
MsgIdOffset uint32
|
|
||||||
MsgStrOffset uint32
|
|
||||||
HashSize uint32
|
|
||||||
HashOffset uint32
|
|
||||||
}
|
|
||||||
if err := binary.Read(r, bo, &header); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
if v := header.MajorVersion; v != 0 && v != 1 {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", "invalid version number")
|
|
||||||
}
|
|
||||||
if v := header.MinorVersion; v != 0 && v != 1 {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", "invalid version number")
|
|
||||||
}
|
|
||||||
|
|
||||||
msgIdStart := make([]uint32, header.MsgIdCount)
|
|
||||||
msgIdLen := make([]uint32, header.MsgIdCount)
|
|
||||||
if _, err := r.Seek(int64(header.MsgIdOffset), 0); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
for i := 0; i < int(header.MsgIdCount); i++ {
|
|
||||||
if err := binary.Read(r, bo, &msgIdLen[i]); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
if err := binary.Read(r, bo, &msgIdStart[i]); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
msgStrStart := make([]int32, header.MsgIdCount)
|
|
||||||
msgStrLen := make([]int32, header.MsgIdCount)
|
|
||||||
if _, err := r.Seek(int64(header.MsgStrOffset), 0); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
for i := 0; i < int(header.MsgIdCount); i++ {
|
|
||||||
if err := binary.Read(r, bo, &msgStrLen[i]); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
if err := binary.Read(r, bo, &msgStrStart[i]); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
file := &File{
|
|
||||||
MagicNumber: magicNumber,
|
|
||||||
MajorVersion: header.MajorVersion,
|
|
||||||
MinorVersion: header.MinorVersion,
|
|
||||||
MsgIdCount: header.MsgIdCount,
|
|
||||||
MsgIdOffset: header.MsgIdOffset,
|
|
||||||
MsgStrOffset: header.MsgStrOffset,
|
|
||||||
HashSize: header.HashSize,
|
|
||||||
HashOffset: header.HashOffset,
|
|
||||||
}
|
|
||||||
for i := 0; i < int(header.MsgIdCount); i++ {
|
|
||||||
if _, err := r.Seek(int64(msgIdStart[i]), 0); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
msgIdData := make([]byte, msgIdLen[i])
|
|
||||||
if _, err := r.Read(msgIdData); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := r.Seek(int64(msgStrStart[i]), 0); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
msgStrData := make([]byte, msgStrLen[i])
|
|
||||||
if _, err := r.Read(msgStrData); err != nil {
|
|
||||||
return nil, fmt.Errorf("gettext: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(msgIdData) == 0 {
|
|
||||||
var msg = Message{
|
|
||||||
MsgId: string(msgIdData),
|
|
||||||
MsgStr: string(msgStrData),
|
|
||||||
}
|
|
||||||
file.MimeHeader.fromMessage(&msg)
|
|
||||||
} else {
|
|
||||||
var msg = Message{
|
|
||||||
MsgId: string(msgIdData),
|
|
||||||
MsgStr: string(msgStrData),
|
|
||||||
}
|
|
||||||
// Is this a context message?
|
|
||||||
if idx := strings.Index(msg.MsgId, EotSeparator); idx != -1 {
|
|
||||||
msg.MsgContext, msg.MsgId = msg.MsgId[:idx], msg.MsgId[idx+1:]
|
|
||||||
}
|
|
||||||
// Is this a plural message?
|
|
||||||
if idx := strings.Index(msg.MsgId, NulSeparator); idx != -1 {
|
|
||||||
msg.MsgId, msg.MsgIdPlural = msg.MsgId[:idx], msg.MsgId[idx+1:]
|
|
||||||
msg.MsgStrPlural = strings.Split(msg.MsgStr, NulSeparator)
|
|
||||||
msg.MsgStr = ""
|
|
||||||
}
|
|
||||||
file.Messages = append(file.Messages, msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return file, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save saves a mo file.
|
|
||||||
func (f *File) Save(name string) error {
|
|
||||||
return ioutil.WriteFile(name, f.Data(), 0666)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save returns a mo file format data.
|
|
||||||
func (f *File) Data() []byte {
|
|
||||||
return encodeFile(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the po format file string.
|
|
||||||
func (f *File) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmt.Fprintf(&buf, "# version: %d.%d\n", f.MajorVersion, f.MinorVersion)
|
|
||||||
fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String())
|
|
||||||
fmt.Fprintf(&buf, "\n")
|
|
||||||
|
|
||||||
for k, v := range f.Messages {
|
|
||||||
fmt.Fprintf(&buf, `msgid "%v"`+"\n", k)
|
|
||||||
fmt.Fprintf(&buf, `msgstr "%s"`+"\n", v.MsgStr)
|
|
||||||
fmt.Fprintf(&buf, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
109
vendor/github.com/chai2010/gettext-go/gettext/mo/header.go
generated
vendored
109
vendor/github.com/chai2010/gettext-go/gettext/mo/header.go
generated
vendored
@@ -1,109 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package mo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR"
|
|
||||||
// and "FIRST AUTHOR <EMAIL@ADDRESS>, YEAR" ought to be replaced by sensible information.
|
|
||||||
//
|
|
||||||
// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry
|
|
||||||
type Header struct {
|
|
||||||
ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION
|
|
||||||
ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR <EMAIL@ADDRESS>
|
|
||||||
POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE
|
|
||||||
PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
|
|
||||||
LastTranslator string // Last-Translator: FIRST AUTHOR <EMAIL@ADDRESS>
|
|
||||||
LanguageTeam string // Language-Team: golang-china
|
|
||||||
Language string // Language: zh_CN
|
|
||||||
MimeVersion string // MIME-Version: 1.0
|
|
||||||
ContentType string // Content-Type: text/plain; charset=UTF-8
|
|
||||||
ContentTransferEncoding string // Content-Transfer-Encoding: 8bit
|
|
||||||
PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1;
|
|
||||||
XGenerator string // X-Generator: Poedit 1.5.5
|
|
||||||
UnknowFields map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Header) fromMessage(msg *Message) {
|
|
||||||
if msg.MsgId != "" || msg.MsgStr == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lines := strings.Split(msg.MsgStr, "\n")
|
|
||||||
for i := 0; i < len(lines); i++ {
|
|
||||||
idx := strings.Index(lines[i], ":")
|
|
||||||
if idx < 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
key := strings.TrimSpace(lines[i][:idx])
|
|
||||||
val := strings.TrimSpace(lines[i][idx+1:])
|
|
||||||
switch strings.ToUpper(key) {
|
|
||||||
case strings.ToUpper("Project-Id-Version"):
|
|
||||||
p.ProjectIdVersion = val
|
|
||||||
case strings.ToUpper("Report-Msgid-Bugs-To"):
|
|
||||||
p.ReportMsgidBugsTo = val
|
|
||||||
case strings.ToUpper("POT-Creation-Date"):
|
|
||||||
p.POTCreationDate = val
|
|
||||||
case strings.ToUpper("PO-Revision-Date"):
|
|
||||||
p.PORevisionDate = val
|
|
||||||
case strings.ToUpper("Last-Translator"):
|
|
||||||
p.LastTranslator = val
|
|
||||||
case strings.ToUpper("Language-Team"):
|
|
||||||
p.LanguageTeam = val
|
|
||||||
case strings.ToUpper("Language"):
|
|
||||||
p.Language = val
|
|
||||||
case strings.ToUpper("MIME-Version"):
|
|
||||||
p.MimeVersion = val
|
|
||||||
case strings.ToUpper("Content-Type"):
|
|
||||||
p.ContentType = val
|
|
||||||
case strings.ToUpper("Content-Transfer-Encoding"):
|
|
||||||
p.ContentTransferEncoding = val
|
|
||||||
case strings.ToUpper("Plural-Forms"):
|
|
||||||
p.PluralForms = val
|
|
||||||
case strings.ToUpper("X-Generator"):
|
|
||||||
p.XGenerator = val
|
|
||||||
default:
|
|
||||||
if p.UnknowFields == nil {
|
|
||||||
p.UnknowFields = make(map[string]string)
|
|
||||||
}
|
|
||||||
p.UnknowFields[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Header) toMessage() Message {
|
|
||||||
return Message{
|
|
||||||
MsgStr: p.String(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the po format header string.
|
|
||||||
func (p Header) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmt.Fprintf(&buf, `msgid ""`+"\n")
|
|
||||||
fmt.Fprintf(&buf, `msgstr ""`+"\n")
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language)
|
|
||||||
if p.MimeVersion != "" {
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding)
|
|
||||||
if p.XGenerator != "" {
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator)
|
|
||||||
}
|
|
||||||
for k, v := range p.UnknowFields {
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v)
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
39
vendor/github.com/chai2010/gettext-go/gettext/mo/message.go
generated
vendored
39
vendor/github.com/chai2010/gettext-go/gettext/mo/message.go
generated
vendored
@@ -1,39 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package mo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A MO file is made up of many entries,
|
|
||||||
// each entry holding the relation between an original untranslated string
|
|
||||||
// and its corresponding translation.
|
|
||||||
//
|
|
||||||
// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html
|
|
||||||
type Message struct {
|
|
||||||
MsgContext string // msgctxt context
|
|
||||||
MsgId string // msgid untranslated-string
|
|
||||||
MsgIdPlural string // msgid_plural untranslated-string-plural
|
|
||||||
MsgStr string // msgstr translated-string
|
|
||||||
MsgStrPlural []string // msgstr[0] translated-string-case-0
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the po format entry string.
|
|
||||||
func (p Message) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId))
|
|
||||||
if p.MsgIdPlural != "" {
|
|
||||||
fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural))
|
|
||||||
}
|
|
||||||
if p.MsgStr != "" {
|
|
||||||
fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr))
|
|
||||||
}
|
|
||||||
for i := 0; i < len(p.MsgStrPlural); i++ {
|
|
||||||
fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i]))
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
110
vendor/github.com/chai2010/gettext-go/gettext/mo/util.go
generated
vendored
110
vendor/github.com/chai2010/gettext-go/gettext/mo/util.go
generated
vendored
@@ -1,110 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package mo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func decodePoString(text string) string {
|
|
||||||
lines := strings.Split(text, "\n")
|
|
||||||
for i := 0; i < len(lines); i++ {
|
|
||||||
left := strings.Index(lines[i], `"`)
|
|
||||||
right := strings.LastIndex(lines[i], `"`)
|
|
||||||
if left < 0 || right < 0 || left == right {
|
|
||||||
lines[i] = ""
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
line := lines[i][left+1 : right]
|
|
||||||
data := make([]byte, 0, len(line))
|
|
||||||
for i := 0; i < len(line); i++ {
|
|
||||||
if line[i] != '\\' {
|
|
||||||
data = append(data, line[i])
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if i+1 >= len(line) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
switch line[i+1] {
|
|
||||||
case 'n': // \\n -> \n
|
|
||||||
data = append(data, '\n')
|
|
||||||
i++
|
|
||||||
case 't': // \\t -> \n
|
|
||||||
data = append(data, '\t')
|
|
||||||
i++
|
|
||||||
case '\\': // \\\ -> ?
|
|
||||||
data = append(data, '\\')
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lines[i] = string(data)
|
|
||||||
}
|
|
||||||
return strings.Join(lines, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodePoString(text string) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
lines := strings.Split(text, "\n")
|
|
||||||
for i := 0; i < len(lines); i++ {
|
|
||||||
if lines[i] == "" {
|
|
||||||
if i != len(lines)-1 {
|
|
||||||
buf.WriteString(`"\n"` + "\n")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
buf.WriteRune('"')
|
|
||||||
for _, r := range lines[i] {
|
|
||||||
switch r {
|
|
||||||
case '\\':
|
|
||||||
buf.WriteString(`\\`)
|
|
||||||
case '"':
|
|
||||||
buf.WriteString(`\"`)
|
|
||||||
case '\n':
|
|
||||||
buf.WriteString(`\n`)
|
|
||||||
case '\t':
|
|
||||||
buf.WriteString(`\t`)
|
|
||||||
default:
|
|
||||||
buf.WriteRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.WriteString(`\n"` + "\n")
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeCommentPoString(text string) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
lines := strings.Split(text, "\n")
|
|
||||||
if len(lines) > 1 {
|
|
||||||
buf.WriteString(`""` + "\n")
|
|
||||||
}
|
|
||||||
for i := 0; i < len(lines); i++ {
|
|
||||||
if len(lines) > 0 {
|
|
||||||
buf.WriteString("#| ")
|
|
||||||
}
|
|
||||||
buf.WriteRune('"')
|
|
||||||
for _, r := range lines[i] {
|
|
||||||
switch r {
|
|
||||||
case '\\':
|
|
||||||
buf.WriteString(`\\`)
|
|
||||||
case '"':
|
|
||||||
buf.WriteString(`\"`)
|
|
||||||
case '\n':
|
|
||||||
buf.WriteString(`\n`)
|
|
||||||
case '\t':
|
|
||||||
buf.WriteString(`\t`)
|
|
||||||
default:
|
|
||||||
buf.WriteRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i < len(lines)-1 {
|
|
||||||
buf.WriteString(`\n"` + "\n")
|
|
||||||
} else {
|
|
||||||
buf.WriteString(`"`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
36
vendor/github.com/chai2010/gettext-go/gettext/plural/doc.go
generated
vendored
36
vendor/github.com/chai2010/gettext-go/gettext/plural/doc.go
generated
vendored
@@ -1,36 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package plural provides standard plural formulas.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
import (
|
|
||||||
"code.google.com/p/gettext-go/gettext/plural"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
enFormula := plural.Formula("en_US")
|
|
||||||
xxFormula := plural.Formula("zh_CN")
|
|
||||||
|
|
||||||
fmt.Printf("%s: %d\n", "en", enFormula(0))
|
|
||||||
fmt.Printf("%s: %d\n", "en", enFormula(1))
|
|
||||||
fmt.Printf("%s: %d\n", "en", enFormula(2))
|
|
||||||
fmt.Printf("%s: %d\n", "??", xxFormula(0))
|
|
||||||
fmt.Printf("%s: %d\n", "??", xxFormula(1))
|
|
||||||
fmt.Printf("%s: %d\n", "??", xxFormula(2))
|
|
||||||
fmt.Printf("%s: %d\n", "??", xxFormula(9))
|
|
||||||
// Output:
|
|
||||||
// en: 0
|
|
||||||
// en: 0
|
|
||||||
// en: 1
|
|
||||||
// ??: 0
|
|
||||||
// ??: 0
|
|
||||||
// ??: 1
|
|
||||||
// ??: 8
|
|
||||||
}
|
|
||||||
|
|
||||||
See http://www.gnu.org/software/gettext/manual/html_node/Plural-forms.html
|
|
||||||
*/
|
|
||||||
package plural
|
|
||||||
181
vendor/github.com/chai2010/gettext-go/gettext/plural/formula.go
generated
vendored
181
vendor/github.com/chai2010/gettext-go/gettext/plural/formula.go
generated
vendored
@@ -1,181 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package plural
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Formula provides the language's standard plural formula.
|
|
||||||
func Formula(lang string) func(n int) int {
|
|
||||||
if idx := index(lang); idx != -1 {
|
|
||||||
return formulaTable[fmtForms(FormsTable[idx].Value)]
|
|
||||||
}
|
|
||||||
if idx := index("??"); idx != -1 {
|
|
||||||
return formulaTable[fmtForms(FormsTable[idx].Value)]
|
|
||||||
}
|
|
||||||
return func(n int) int {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func index(lang string) int {
|
|
||||||
for i := 0; i < len(FormsTable); i++ {
|
|
||||||
if strings.HasPrefix(lang, FormsTable[i].Lang) {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func fmtForms(forms string) string {
|
|
||||||
forms = strings.TrimSpace(forms)
|
|
||||||
forms = strings.Replace(forms, " ", "", -1)
|
|
||||||
return forms
|
|
||||||
}
|
|
||||||
|
|
||||||
var formulaTable = map[string]func(n int) int{
|
|
||||||
fmtForms("nplurals=n; plural=n-1;"): func(n int) int {
|
|
||||||
if n > 0 {
|
|
||||||
return n - 1
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=1; plural=0;"): func(n int) int {
|
|
||||||
return 0
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=2; plural=(n != 1);"): func(n int) int {
|
|
||||||
if n <= 1 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=2; plural=(n > 1);"): func(n int) int {
|
|
||||||
if n <= 1 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"): func(n int) int {
|
|
||||||
if n%10 == 1 && n%100 != 11 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n != 0 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"): func(n int) int {
|
|
||||||
if n == 1 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n == 2 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"): func(n int) int {
|
|
||||||
if n == 1 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n == 0 || (n%100 > 0 && n%100 < 20) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
|
||||||
if n%10 == 1 && n%100 != 11 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n%10 >= 2 && (n%100 < 10 || n%100 >= 20) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
|
||||||
if n%10 == 1 && n%100 != 11 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
|
||||||
if n%10 == 1 && n%100 != 11 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
|
||||||
if n%10 == 1 && n%100 != 11 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
|
||||||
if n%10 == 1 && n%100 != 11 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
|
||||||
if n%10 == 1 && n%100 != 11 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int {
|
|
||||||
if n == 1 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n >= 2 && n <= 4 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int {
|
|
||||||
if n == 1 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n >= 2 && n <= 4 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
|
||||||
if n == 1 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 2
|
|
||||||
},
|
|
||||||
fmtForms("nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"): func(n int) int {
|
|
||||||
if n%100 == 1 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if n%100 == 2 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if n%100 == 3 || n%100 == 4 {
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
return 3
|
|
||||||
},
|
|
||||||
}
|
|
||||||
55
vendor/github.com/chai2010/gettext-go/gettext/plural/table.go
generated
vendored
55
vendor/github.com/chai2010/gettext-go/gettext/plural/table.go
generated
vendored
@@ -1,55 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package plural
|
|
||||||
|
|
||||||
// FormsTable are standard hard-coded plural rules.
|
|
||||||
// The application developers and the translators need to understand them.
|
|
||||||
//
|
|
||||||
// See GNU's gettext library source code: gettext/gettext-tools/src/plural-table.c
|
|
||||||
var FormsTable = []struct {
|
|
||||||
Lang string
|
|
||||||
Language string
|
|
||||||
Value string
|
|
||||||
}{
|
|
||||||
{"??", "Unknown", "nplurals=1; plural=0;"},
|
|
||||||
{"ja", "Japanese", "nplurals=1; plural=0;"},
|
|
||||||
{"vi", "Vietnamese", "nplurals=1; plural=0;"},
|
|
||||||
{"ko", "Korean", "nplurals=1; plural=0;"},
|
|
||||||
{"en", "English", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"de", "German", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"nl", "Dutch", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"sv", "Swedish", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"da", "Danish", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"no", "Norwegian", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"nb", "Norwegian Bokmal", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"nn", "Norwegian Nynorsk", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"fo", "Faroese", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"es", "Spanish", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"pt", "Portuguese", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"it", "Italian", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"bg", "Bulgarian", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"el", "Greek", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"fi", "Finnish", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"et", "Estonian", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"he", "Hebrew", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"eo", "Esperanto", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"hu", "Hungarian", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"tr", "Turkish", "nplurals=2; plural=(n != 1);"},
|
|
||||||
{"pt_BR", "Brazilian", "nplurals=2; plural=(n > 1);"},
|
|
||||||
{"fr", "French", "nplurals=2; plural=(n > 1);"},
|
|
||||||
{"lv", "Latvian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"},
|
|
||||||
{"ga", "Irish", "nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"},
|
|
||||||
{"ro", "Romanian", "nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"},
|
|
||||||
{"lt", "Lithuanian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
|
||||||
{"ru", "Russian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
|
||||||
{"uk", "Ukrainian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
|
||||||
{"be", "Belarusian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
|
||||||
{"sr", "Serbian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
|
||||||
{"hr", "Croatian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
|
||||||
{"cs", "Czech", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"},
|
|
||||||
{"sk", "Slovak", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"},
|
|
||||||
{"pl", "Polish", "nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
|
||||||
{"sl", "Slovenian", "nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"},
|
|
||||||
}
|
|
||||||
270
vendor/github.com/chai2010/gettext-go/gettext/po/comment.go
generated
vendored
270
vendor/github.com/chai2010/gettext-go/gettext/po/comment.go
generated
vendored
@@ -1,270 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package po
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Comment represents every message's comments.
|
|
||||||
type Comment struct {
|
|
||||||
StartLine int // comment start line
|
|
||||||
TranslatorComment string // # translator-comments // TrimSpace
|
|
||||||
ExtractedComment string // #. extracted-comments
|
|
||||||
ReferenceFile []string // #: src/msgcmp.c:338 src/po-lex.c:699
|
|
||||||
ReferenceLine []int // #: src/msgcmp.c:338 src/po-lex.c:699
|
|
||||||
Flags []string // #, fuzzy,c-format,range:0..10
|
|
||||||
PrevMsgContext string // #| msgctxt previous-context
|
|
||||||
PrevMsgId string // #| msgid previous-untranslated-string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Comment) less(q *Comment) bool {
|
|
||||||
if p.StartLine != 0 || q.StartLine != 0 {
|
|
||||||
return p.StartLine < q.StartLine
|
|
||||||
}
|
|
||||||
if a, b := len(p.ReferenceFile), len(q.ReferenceFile); a != b {
|
|
||||||
return a < b
|
|
||||||
}
|
|
||||||
for i := 0; i < len(p.ReferenceFile); i++ {
|
|
||||||
if a, b := p.ReferenceFile[i], q.ReferenceFile[i]; a != b {
|
|
||||||
return a < b
|
|
||||||
}
|
|
||||||
if a, b := p.ReferenceLine[i], q.ReferenceLine[i]; a != b {
|
|
||||||
return a < b
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Comment) readPoComment(r *lineReader) (err error) {
|
|
||||||
*p = Comment{}
|
|
||||||
if err = r.skipBlankLine(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func(oldPos int) {
|
|
||||||
newPos := r.currentPos()
|
|
||||||
if newPos != oldPos && err == io.EOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}(r.currentPos())
|
|
||||||
|
|
||||||
p.StartLine = r.currentPos() + 1
|
|
||||||
for {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.currentLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(s) == 0 || s[0] != '#' {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = p.readTranslatorComment(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = p.readExtractedComment(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = p.readReferenceComment(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = p.readFlagsComment(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = p.readPrevMsgContext(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = p.readPrevMsgId(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Comment) readTranslatorComment(r *lineReader) (err error) {
|
|
||||||
const prefix = "# " // .,:|
|
|
||||||
for {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.readLine(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(s) < 1 || s[0] != '#' {
|
|
||||||
r.unreadLine()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if len(s) >= 2 {
|
|
||||||
switch s[1] {
|
|
||||||
case '.', ',', ':', '|':
|
|
||||||
r.unreadLine()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.TranslatorComment != "" {
|
|
||||||
p.TranslatorComment += "\n"
|
|
||||||
}
|
|
||||||
p.TranslatorComment += strings.TrimSpace(s[1:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Comment) readExtractedComment(r *lineReader) (err error) {
|
|
||||||
const prefix = "#."
|
|
||||||
for {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.readLine(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(s) < len(prefix) || s[:len(prefix)] != prefix {
|
|
||||||
r.unreadLine()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if p.ExtractedComment != "" {
|
|
||||||
p.ExtractedComment += "\n"
|
|
||||||
}
|
|
||||||
p.ExtractedComment += strings.TrimSpace(s[len(prefix):])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Comment) readReferenceComment(r *lineReader) (err error) {
|
|
||||||
const prefix = "#:"
|
|
||||||
for {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.readLine(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(s) < len(prefix) || s[:len(prefix)] != prefix {
|
|
||||||
r.unreadLine()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ss := strings.Split(strings.TrimSpace(s[len(prefix):]), " ")
|
|
||||||
for i := 0; i < len(ss); i++ {
|
|
||||||
idx := strings.Index(ss[i], ":")
|
|
||||||
if idx <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
name := strings.TrimSpace(ss[i][:idx])
|
|
||||||
line, _ := strconv.Atoi(strings.TrimSpace(ss[i][idx+1:]))
|
|
||||||
p.ReferenceFile = append(p.ReferenceFile, name)
|
|
||||||
p.ReferenceLine = append(p.ReferenceLine, line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Comment) readFlagsComment(r *lineReader) (err error) {
|
|
||||||
const prefix = "#,"
|
|
||||||
for {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.readLine(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(s) < len(prefix) || s[:len(prefix)] != prefix {
|
|
||||||
r.unreadLine()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ss := strings.Split(strings.TrimSpace(s[len(prefix):]), ",")
|
|
||||||
for i := 0; i < len(ss); i++ {
|
|
||||||
p.Flags = append(p.Flags, strings.TrimSpace(ss[i]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Comment) readPrevMsgContext(r *lineReader) (err error) {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.currentLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !rePrevMsgContextComments.MatchString(s) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.PrevMsgContext, err = p.readString(r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Comment) readPrevMsgId(r *lineReader) (err error) {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.currentLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !rePrevMsgIdComments.MatchString(s) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.PrevMsgId, err = p.readString(r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Comment) readString(r *lineReader) (msg string, err error) {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.readLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
msg += decodePoString(s)
|
|
||||||
for {
|
|
||||||
if s, _, err = r.readLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !reStringLineComments.MatchString(s) {
|
|
||||||
r.unreadLine()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
msg += decodePoString(s)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFuzzy gets the fuzzy flag.
|
|
||||||
func (p *Comment) GetFuzzy() bool {
|
|
||||||
for _, s := range p.Flags {
|
|
||||||
if s == "fuzzy" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFuzzy sets the fuzzy flag.
|
|
||||||
func (p *Comment) SetFuzzy(fuzzy bool) {
|
|
||||||
//
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the po format comment string.
|
|
||||||
func (p Comment) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if p.TranslatorComment != "" {
|
|
||||||
ss := strings.Split(p.TranslatorComment, "\n")
|
|
||||||
for i := 0; i < len(ss); i++ {
|
|
||||||
fmt.Fprintf(&buf, "# %s\n", ss[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.ExtractedComment != "" {
|
|
||||||
ss := strings.Split(p.ExtractedComment, "\n")
|
|
||||||
for i := 0; i < len(ss); i++ {
|
|
||||||
fmt.Fprintf(&buf, "#. %s\n", ss[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if a, b := len(p.ReferenceFile), len(p.ReferenceLine); a != 0 && a == b {
|
|
||||||
fmt.Fprintf(&buf, "#:")
|
|
||||||
for i := 0; i < len(p.ReferenceFile); i++ {
|
|
||||||
fmt.Fprintf(&buf, " %s:%d", p.ReferenceFile[i], p.ReferenceLine[i])
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "\n")
|
|
||||||
}
|
|
||||||
if len(p.Flags) != 0 {
|
|
||||||
fmt.Fprintf(&buf, "#, %s", p.Flags[0])
|
|
||||||
for i := 1; i < len(p.Flags); i++ {
|
|
||||||
fmt.Fprintf(&buf, ", %s", p.Flags[i])
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "\n")
|
|
||||||
}
|
|
||||||
if p.PrevMsgContext != "" {
|
|
||||||
s := encodeCommentPoString(p.PrevMsgContext)
|
|
||||||
fmt.Fprintf(&buf, "#| msgctxt %s\n", s)
|
|
||||||
}
|
|
||||||
if p.PrevMsgId != "" {
|
|
||||||
s := encodeCommentPoString(p.PrevMsgId)
|
|
||||||
fmt.Fprintf(&buf, "#| msgid %s\n", s)
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
24
vendor/github.com/chai2010/gettext-go/gettext/po/doc.go
generated
vendored
24
vendor/github.com/chai2010/gettext-go/gettext/po/doc.go
generated
vendored
@@ -1,24 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package po provides support for reading and writing GNU PO file.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
import (
|
|
||||||
"github.com/chai2010/gettext-go/gettext/po"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
poFile, err := po.Load("test.po")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("%v", poFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
The GNU PO file specification is at
|
|
||||||
http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html.
|
|
||||||
*/
|
|
||||||
package po
|
|
||||||
75
vendor/github.com/chai2010/gettext-go/gettext/po/file.go
generated
vendored
75
vendor/github.com/chai2010/gettext-go/gettext/po/file.go
generated
vendored
@@ -1,75 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package po
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// File represents an PO File.
|
|
||||||
//
|
|
||||||
// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html
|
|
||||||
type File struct {
|
|
||||||
MimeHeader Header
|
|
||||||
Messages []Message
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load loads a named po file.
|
|
||||||
func Load(name string) (*File, error) {
|
|
||||||
data, err := ioutil.ReadFile(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return LoadData(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadData loads po file format data.
|
|
||||||
func LoadData(data []byte) (*File, error) {
|
|
||||||
r := newLineReader(string(data))
|
|
||||||
var file File
|
|
||||||
for {
|
|
||||||
var msg Message
|
|
||||||
if err := msg.readPoEntry(r); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
return &file, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if msg.MsgId == "" {
|
|
||||||
file.MimeHeader.parseHeader(&msg)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
file.Messages = append(file.Messages, msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save saves a po file.
|
|
||||||
func (f *File) Save(name string) error {
|
|
||||||
return ioutil.WriteFile(name, []byte(f.String()), 0666)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save returns a po file format data.
|
|
||||||
func (f *File) Data() []byte {
|
|
||||||
// sort the massge as ReferenceFile/ReferenceLine field
|
|
||||||
var messages []Message
|
|
||||||
messages = append(messages, f.Messages...)
|
|
||||||
sort.Sort(byMessages(messages))
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String())
|
|
||||||
for i := 0; i < len(messages); i++ {
|
|
||||||
fmt.Fprintf(&buf, "%s\n", messages[i].String())
|
|
||||||
}
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the po format file string.
|
|
||||||
func (f *File) String() string {
|
|
||||||
return string(f.Data())
|
|
||||||
}
|
|
||||||
106
vendor/github.com/chai2010/gettext-go/gettext/po/header.go
generated
vendored
106
vendor/github.com/chai2010/gettext-go/gettext/po/header.go
generated
vendored
@@ -1,106 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package po
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR"
|
|
||||||
// and "FIRST AUTHOR <EMAIL@ADDRESS>, YEAR" ought to be replaced by sensible information.
|
|
||||||
//
|
|
||||||
// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry
|
|
||||||
type Header struct {
|
|
||||||
Comment // Header Comments
|
|
||||||
ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION
|
|
||||||
ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR <EMAIL@ADDRESS>
|
|
||||||
POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE
|
|
||||||
PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
|
|
||||||
LastTranslator string // Last-Translator: FIRST AUTHOR <EMAIL@ADDRESS>
|
|
||||||
LanguageTeam string // Language-Team: golang-china
|
|
||||||
Language string // Language: zh_CN
|
|
||||||
MimeVersion string // MIME-Version: 1.0
|
|
||||||
ContentType string // Content-Type: text/plain; charset=UTF-8
|
|
||||||
ContentTransferEncoding string // Content-Transfer-Encoding: 8bit
|
|
||||||
PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1;
|
|
||||||
XGenerator string // X-Generator: Poedit 1.5.5
|
|
||||||
UnknowFields map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Header) parseHeader(msg *Message) {
|
|
||||||
if msg.MsgId != "" || msg.MsgStr == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lines := strings.Split(msg.MsgStr, "\n")
|
|
||||||
for i := 0; i < len(lines); i++ {
|
|
||||||
idx := strings.Index(lines[i], ":")
|
|
||||||
if idx < 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
key := strings.TrimSpace(lines[i][:idx])
|
|
||||||
val := strings.TrimSpace(lines[i][idx+1:])
|
|
||||||
switch strings.ToUpper(key) {
|
|
||||||
case strings.ToUpper("Project-Id-Version"):
|
|
||||||
p.ProjectIdVersion = val
|
|
||||||
case strings.ToUpper("Report-Msgid-Bugs-To"):
|
|
||||||
p.ReportMsgidBugsTo = val
|
|
||||||
case strings.ToUpper("POT-Creation-Date"):
|
|
||||||
p.POTCreationDate = val
|
|
||||||
case strings.ToUpper("PO-Revision-Date"):
|
|
||||||
p.PORevisionDate = val
|
|
||||||
case strings.ToUpper("Last-Translator"):
|
|
||||||
p.LastTranslator = val
|
|
||||||
case strings.ToUpper("Language-Team"):
|
|
||||||
p.LanguageTeam = val
|
|
||||||
case strings.ToUpper("Language"):
|
|
||||||
p.Language = val
|
|
||||||
case strings.ToUpper("MIME-Version"):
|
|
||||||
p.MimeVersion = val
|
|
||||||
case strings.ToUpper("Content-Type"):
|
|
||||||
p.ContentType = val
|
|
||||||
case strings.ToUpper("Content-Transfer-Encoding"):
|
|
||||||
p.ContentTransferEncoding = val
|
|
||||||
case strings.ToUpper("Plural-Forms"):
|
|
||||||
p.PluralForms = val
|
|
||||||
case strings.ToUpper("X-Generator"):
|
|
||||||
p.XGenerator = val
|
|
||||||
default:
|
|
||||||
if p.UnknowFields == nil {
|
|
||||||
p.UnknowFields = make(map[string]string)
|
|
||||||
}
|
|
||||||
p.UnknowFields[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.Comment = msg.Comment
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the po format header string.
|
|
||||||
func (p Header) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmt.Fprintf(&buf, "%s", p.Comment.String())
|
|
||||||
fmt.Fprintf(&buf, `msgid ""`+"\n")
|
|
||||||
fmt.Fprintf(&buf, `msgstr ""`+"\n")
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language)
|
|
||||||
if p.MimeVersion != "" {
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType)
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding)
|
|
||||||
if p.XGenerator != "" {
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator)
|
|
||||||
}
|
|
||||||
for k, v := range p.UnknowFields {
|
|
||||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v)
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
62
vendor/github.com/chai2010/gettext-go/gettext/po/line_reader.go
generated
vendored
62
vendor/github.com/chai2010/gettext-go/gettext/po/line_reader.go
generated
vendored
@@ -1,62 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package po
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type lineReader struct {
|
|
||||||
lines []string
|
|
||||||
pos int
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLineReader(data string) *lineReader {
|
|
||||||
data = strings.Replace(data, "\r", "", -1)
|
|
||||||
lines := strings.Split(data, "\n")
|
|
||||||
return &lineReader{lines: lines}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lineReader) skipBlankLine() error {
|
|
||||||
for ; r.pos < len(r.lines); r.pos++ {
|
|
||||||
if strings.TrimSpace(r.lines[r.pos]) != "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if r.pos >= len(r.lines) {
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lineReader) currentPos() int {
|
|
||||||
return r.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lineReader) currentLine() (s string, pos int, err error) {
|
|
||||||
if r.pos >= len(r.lines) {
|
|
||||||
err = io.EOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s, pos = r.lines[r.pos], r.pos
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lineReader) readLine() (s string, pos int, err error) {
|
|
||||||
if r.pos >= len(r.lines) {
|
|
||||||
err = io.EOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s, pos = r.lines[r.pos], r.pos
|
|
||||||
r.pos++
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lineReader) unreadLine() {
|
|
||||||
if r.pos >= 0 {
|
|
||||||
r.pos--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
189
vendor/github.com/chai2010/gettext-go/gettext/po/message.go
generated
vendored
189
vendor/github.com/chai2010/gettext-go/gettext/po/message.go
generated
vendored
@@ -1,189 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package po
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A PO file is made up of many entries,
|
|
||||||
// each entry holding the relation between an original untranslated string
|
|
||||||
// and its corresponding translation.
|
|
||||||
//
|
|
||||||
// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html
|
|
||||||
type Message struct {
|
|
||||||
Comment // Coments
|
|
||||||
MsgContext string // msgctxt context
|
|
||||||
MsgId string // msgid untranslated-string
|
|
||||||
MsgIdPlural string // msgid_plural untranslated-string-plural
|
|
||||||
MsgStr string // msgstr translated-string
|
|
||||||
MsgStrPlural []string // msgstr[0] translated-string-case-0
|
|
||||||
}
|
|
||||||
|
|
||||||
type byMessages []Message
|
|
||||||
|
|
||||||
func (d byMessages) Len() int {
|
|
||||||
return len(d)
|
|
||||||
}
|
|
||||||
func (d byMessages) Less(i, j int) bool {
|
|
||||||
if d[i].Comment.less(&d[j].Comment) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if a, b := d[i].MsgContext, d[j].MsgContext; a != b {
|
|
||||||
return a < b
|
|
||||||
}
|
|
||||||
if a, b := d[i].MsgId, d[j].MsgId; a != b {
|
|
||||||
return a < b
|
|
||||||
}
|
|
||||||
if a, b := d[i].MsgIdPlural, d[j].MsgIdPlural; a != b {
|
|
||||||
return a < b
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
func (d byMessages) Swap(i, j int) {
|
|
||||||
d[i], d[j] = d[j], d[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Message) readPoEntry(r *lineReader) (err error) {
|
|
||||||
*p = Message{}
|
|
||||||
if err = r.skipBlankLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func(oldPos int) {
|
|
||||||
newPos := r.currentPos()
|
|
||||||
if newPos != oldPos && err == io.EOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}(r.currentPos())
|
|
||||||
|
|
||||||
if err = p.Comment.readPoComment(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.currentLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.isInvalidLine(s) {
|
|
||||||
err = fmt.Errorf("gettext: line %d, %v", r.currentPos(), "invalid line")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if reComment.MatchString(s) || reBlankLine.MatchString(s) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = p.readMsgContext(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = p.readMsgId(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = p.readMsgIdPlural(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = p.readMsgStrOrPlural(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Message) readMsgContext(r *lineReader) (err error) {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.currentLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !reMsgContext.MatchString(s) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.MsgContext, err = p.readString(r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Message) readMsgId(r *lineReader) (err error) {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.currentLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !reMsgId.MatchString(s) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.MsgId, err = p.readString(r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Message) readMsgIdPlural(r *lineReader) (err error) {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.currentLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !reMsgIdPlural.MatchString(s) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.MsgIdPlural, err = p.readString(r)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Message) readMsgStrOrPlural(r *lineReader) (err error) {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.currentLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !reMsgStr.MatchString(s) && !reMsgStrPlural.MatchString(s) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if reMsgStrPlural.MatchString(s) {
|
|
||||||
left, right := strings.Index(s, `[`), strings.LastIndex(s, `]`)
|
|
||||||
idx, _ := strconv.Atoi(s[left+1 : right])
|
|
||||||
s, err = p.readString(r)
|
|
||||||
if n := len(p.MsgStrPlural); (idx + 1) > n {
|
|
||||||
p.MsgStrPlural = append(p.MsgStrPlural, make([]string, (idx+1)-n)...)
|
|
||||||
}
|
|
||||||
p.MsgStrPlural[idx] = s
|
|
||||||
} else {
|
|
||||||
p.MsgStr, err = p.readString(r)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Message) readString(r *lineReader) (msg string, err error) {
|
|
||||||
var s string
|
|
||||||
if s, _, err = r.readLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
msg += decodePoString(s)
|
|
||||||
for {
|
|
||||||
if s, _, err = r.readLine(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !reStringLine.MatchString(s) {
|
|
||||||
r.unreadLine()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
msg += decodePoString(s)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the po format entry string.
|
|
||||||
func (p Message) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmt.Fprintf(&buf, "%s", p.Comment.String())
|
|
||||||
fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId))
|
|
||||||
if p.MsgIdPlural != "" {
|
|
||||||
fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural))
|
|
||||||
}
|
|
||||||
if p.MsgStr != "" {
|
|
||||||
fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr))
|
|
||||||
}
|
|
||||||
for i := 0; i < len(p.MsgStrPlural); i++ {
|
|
||||||
fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i]))
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
58
vendor/github.com/chai2010/gettext-go/gettext/po/re.go
generated
vendored
58
vendor/github.com/chai2010/gettext-go/gettext/po/re.go
generated
vendored
@@ -1,58 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package po
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
reComment = regexp.MustCompile(`^#`) // #
|
|
||||||
reExtractedComments = regexp.MustCompile(`^#\.`) // #.
|
|
||||||
reReferenceComments = regexp.MustCompile(`^#:`) // #:
|
|
||||||
reFlagsComments = regexp.MustCompile(`^#,`) // #, fuzzy,c-format
|
|
||||||
rePrevMsgContextComments = regexp.MustCompile(`^#\|\s+msgctxt`) // #| msgctxt
|
|
||||||
rePrevMsgIdComments = regexp.MustCompile(`^#\|\s+msgid`) // #| msgid
|
|
||||||
reStringLineComments = regexp.MustCompile(`^#\|\s+".*"\s*$`) // #| "message"
|
|
||||||
|
|
||||||
reMsgContext = regexp.MustCompile(`^msgctxt\s+".*"\s*$`) // msgctxt
|
|
||||||
reMsgId = regexp.MustCompile(`^msgid\s+".*"\s*$`) // msgid
|
|
||||||
reMsgIdPlural = regexp.MustCompile(`^msgid_plural\s+".*"\s*$`) // msgid_plural
|
|
||||||
reMsgStr = regexp.MustCompile(`^msgstr\s*".*"\s*$`) // msgstr
|
|
||||||
reMsgStrPlural = regexp.MustCompile(`^msgstr\s*(\[\d+\])\s*".*"\s*$`) // msgstr[0]
|
|
||||||
reStringLine = regexp.MustCompile(`^\s*".*"\s*$`) // "message"
|
|
||||||
reBlankLine = regexp.MustCompile(`^\s*$`) //
|
|
||||||
)
|
|
||||||
|
|
||||||
func (p *Message) isInvalidLine(s string) bool {
|
|
||||||
if reComment.MatchString(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if reBlankLine.MatchString(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if reMsgContext.MatchString(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if reMsgId.MatchString(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if reMsgIdPlural.MatchString(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if reMsgStr.MatchString(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if reMsgStrPlural.MatchString(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if reStringLine.MatchString(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
110
vendor/github.com/chai2010/gettext-go/gettext/po/util.go
generated
vendored
110
vendor/github.com/chai2010/gettext-go/gettext/po/util.go
generated
vendored
@@ -1,110 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package po
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func decodePoString(text string) string {
|
|
||||||
lines := strings.Split(text, "\n")
|
|
||||||
for i := 0; i < len(lines); i++ {
|
|
||||||
left := strings.Index(lines[i], `"`)
|
|
||||||
right := strings.LastIndex(lines[i], `"`)
|
|
||||||
if left < 0 || right < 0 || left == right {
|
|
||||||
lines[i] = ""
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
line := lines[i][left+1 : right]
|
|
||||||
data := make([]byte, 0, len(line))
|
|
||||||
for i := 0; i < len(line); i++ {
|
|
||||||
if line[i] != '\\' {
|
|
||||||
data = append(data, line[i])
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if i+1 >= len(line) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
switch line[i+1] {
|
|
||||||
case 'n': // \\n -> \n
|
|
||||||
data = append(data, '\n')
|
|
||||||
i++
|
|
||||||
case 't': // \\t -> \n
|
|
||||||
data = append(data, '\t')
|
|
||||||
i++
|
|
||||||
case '\\': // \\\ -> ?
|
|
||||||
data = append(data, '\\')
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lines[i] = string(data)
|
|
||||||
}
|
|
||||||
return strings.Join(lines, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodePoString(text string) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
lines := strings.Split(text, "\n")
|
|
||||||
for i := 0; i < len(lines); i++ {
|
|
||||||
if lines[i] == "" {
|
|
||||||
if i != len(lines)-1 {
|
|
||||||
buf.WriteString(`"\n"` + "\n")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
buf.WriteRune('"')
|
|
||||||
for _, r := range lines[i] {
|
|
||||||
switch r {
|
|
||||||
case '\\':
|
|
||||||
buf.WriteString(`\\`)
|
|
||||||
case '"':
|
|
||||||
buf.WriteString(`\"`)
|
|
||||||
case '\n':
|
|
||||||
buf.WriteString(`\n`)
|
|
||||||
case '\t':
|
|
||||||
buf.WriteString(`\t`)
|
|
||||||
default:
|
|
||||||
buf.WriteRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.WriteString(`\n"` + "\n")
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeCommentPoString(text string) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
lines := strings.Split(text, "\n")
|
|
||||||
if len(lines) > 1 {
|
|
||||||
buf.WriteString(`""` + "\n")
|
|
||||||
}
|
|
||||||
for i := 0; i < len(lines); i++ {
|
|
||||||
if len(lines) > 0 {
|
|
||||||
buf.WriteString("#| ")
|
|
||||||
}
|
|
||||||
buf.WriteRune('"')
|
|
||||||
for _, r := range lines[i] {
|
|
||||||
switch r {
|
|
||||||
case '\\':
|
|
||||||
buf.WriteString(`\\`)
|
|
||||||
case '"':
|
|
||||||
buf.WriteString(`\"`)
|
|
||||||
case '\n':
|
|
||||||
buf.WriteString(`\n`)
|
|
||||||
case '\t':
|
|
||||||
buf.WriteString(`\t`)
|
|
||||||
default:
|
|
||||||
buf.WriteRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i < len(lines)-1 {
|
|
||||||
buf.WriteString(`\n"` + "\n")
|
|
||||||
} else {
|
|
||||||
buf.WriteString(`"`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
128
vendor/github.com/chai2010/gettext-go/gettext/tr.go
generated
vendored
128
vendor/github.com/chai2010/gettext-go/gettext/tr.go
generated
vendored
@@ -1,128 +0,0 @@
|
|||||||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package gettext
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/chai2010/gettext-go/gettext/mo"
|
|
||||||
"github.com/chai2010/gettext-go/gettext/plural"
|
|
||||||
"github.com/chai2010/gettext-go/gettext/po"
|
|
||||||
)
|
|
||||||
|
|
||||||
var nilTranslator = &translator{
|
|
||||||
MessageMap: make(map[string]mo.Message),
|
|
||||||
PluralFormula: plural.Formula("??"),
|
|
||||||
}
|
|
||||||
|
|
||||||
type translator struct {
|
|
||||||
MessageMap map[string]mo.Message
|
|
||||||
PluralFormula func(n int) int
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMoTranslator(name string, data []byte) (*translator, error) {
|
|
||||||
var (
|
|
||||||
f *mo.File
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if len(data) != 0 {
|
|
||||||
f, err = mo.LoadData(data)
|
|
||||||
} else {
|
|
||||||
f, err = mo.Load(name)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var tr = &translator{
|
|
||||||
MessageMap: make(map[string]mo.Message),
|
|
||||||
}
|
|
||||||
for _, v := range f.Messages {
|
|
||||||
tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = v
|
|
||||||
}
|
|
||||||
if lang := f.MimeHeader.Language; lang != "" {
|
|
||||||
tr.PluralFormula = plural.Formula(lang)
|
|
||||||
} else {
|
|
||||||
tr.PluralFormula = plural.Formula("??")
|
|
||||||
}
|
|
||||||
return tr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPoTranslator(name string, data []byte) (*translator, error) {
|
|
||||||
var (
|
|
||||||
f *po.File
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if len(data) != 0 {
|
|
||||||
f, err = po.LoadData(data)
|
|
||||||
} else {
|
|
||||||
f, err = po.Load(name)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var tr = &translator{
|
|
||||||
MessageMap: make(map[string]mo.Message),
|
|
||||||
}
|
|
||||||
for _, v := range f.Messages {
|
|
||||||
tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = mo.Message{
|
|
||||||
MsgContext: v.MsgContext,
|
|
||||||
MsgId: v.MsgId,
|
|
||||||
MsgIdPlural: v.MsgIdPlural,
|
|
||||||
MsgStr: v.MsgStr,
|
|
||||||
MsgStrPlural: v.MsgStrPlural,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if lang := f.MimeHeader.Language; lang != "" {
|
|
||||||
tr.PluralFormula = plural.Formula(lang)
|
|
||||||
} else {
|
|
||||||
tr.PluralFormula = plural.Formula("??")
|
|
||||||
}
|
|
||||||
return tr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *translator) PGettext(msgctxt, msgid string) string {
|
|
||||||
return p.PNGettext(msgctxt, msgid, "", 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *translator) PNGettext(msgctxt, msgid, msgidPlural string, n int) string {
|
|
||||||
n = p.PluralFormula(n)
|
|
||||||
if ss := p.findMsgStrPlural(msgctxt, msgid, msgidPlural); len(ss) != 0 {
|
|
||||||
if n >= len(ss) {
|
|
||||||
n = len(ss) - 1
|
|
||||||
}
|
|
||||||
if ss[n] != "" {
|
|
||||||
return ss[n]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if msgidPlural != "" && n > 0 {
|
|
||||||
return msgidPlural
|
|
||||||
}
|
|
||||||
return msgid
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *translator) findMsgStrPlural(msgctxt, msgid, msgidPlural string) []string {
|
|
||||||
key := p.makeMapKey(msgctxt, msgid)
|
|
||||||
if v, ok := p.MessageMap[key]; ok {
|
|
||||||
if len(v.MsgIdPlural) != 0 {
|
|
||||||
if len(v.MsgStrPlural) != 0 {
|
|
||||||
return v.MsgStrPlural
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if len(v.MsgStr) != 0 {
|
|
||||||
return []string{v.MsgStr}
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *translator) makeMapKey(msgctxt, msgid string) string {
|
|
||||||
if msgctxt != "" {
|
|
||||||
return msgctxt + mo.EotSeparator + msgid
|
|
||||||
}
|
|
||||||
return msgid
|
|
||||||
}
|
|
||||||
24
vendor/github.com/exponent-io/jsonpath/.gitignore
generated
vendored
24
vendor/github.com/exponent-io/jsonpath/.gitignore
generated
vendored
@@ -1,24 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
||||||
*.prof
|
|
||||||
5
vendor/github.com/exponent-io/jsonpath/.travis.yml
generated
vendored
5
vendor/github.com/exponent-io/jsonpath/.travis.yml
generated
vendored
@@ -1,5 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.5
|
|
||||||
- tip
|
|
||||||
21
vendor/github.com/exponent-io/jsonpath/LICENSE
generated
vendored
21
vendor/github.com/exponent-io/jsonpath/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2015 Exponent Labs LLC
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
66
vendor/github.com/exponent-io/jsonpath/README.md
generated
vendored
66
vendor/github.com/exponent-io/jsonpath/README.md
generated
vendored
@@ -1,66 +0,0 @@
|
|||||||
[](https://godoc.org/github.com/exponent-io/jsonpath)
|
|
||||||
[](https://travis-ci.org/exponent-io/jsonpath)
|
|
||||||
|
|
||||||
# jsonpath
|
|
||||||
|
|
||||||
This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used.
|
|
||||||
|
|
||||||
This Decoder has the following enhancements...
|
|
||||||
* The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions).
|
|
||||||
* The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path.
|
|
||||||
* The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token.
|
|
||||||
* The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
go get -u github.com/exponent-io/jsonpath
|
|
||||||
|
|
||||||
## Example Usage
|
|
||||||
|
|
||||||
#### SeekTo
|
|
||||||
|
|
||||||
```go
|
|
||||||
import "github.com/exponent-io/jsonpath"
|
|
||||||
|
|
||||||
var j = []byte(`[
|
|
||||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
|
|
||||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
|
|
||||||
]`)
|
|
||||||
|
|
||||||
w := json.NewDecoder(bytes.NewReader(j))
|
|
||||||
var v interface{}
|
|
||||||
|
|
||||||
w.SeekTo(1, "Point", "G")
|
|
||||||
w.Decode(&v) // v is 218
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Scan with PathActions
|
|
||||||
|
|
||||||
```go
|
|
||||||
var j = []byte(`{"colors":[
|
|
||||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}},
|
|
||||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}}
|
|
||||||
]}`)
|
|
||||||
|
|
||||||
var actions PathActions
|
|
||||||
|
|
||||||
// Extract the value at Point.A
|
|
||||||
actions.Add(func(d *Decoder) error {
|
|
||||||
var alpha int
|
|
||||||
err := d.Decode(&alpha)
|
|
||||||
fmt.Printf("Alpha: %v\n", alpha)
|
|
||||||
return err
|
|
||||||
}, "Point", "A")
|
|
||||||
|
|
||||||
w := NewDecoder(bytes.NewReader(j))
|
|
||||||
w.SeekTo("colors", 0)
|
|
||||||
|
|
||||||
var ok = true
|
|
||||||
var err error
|
|
||||||
for ok {
|
|
||||||
ok, err = w.Scan(&actions)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
210
vendor/github.com/exponent-io/jsonpath/decoder.go
generated
vendored
210
vendor/github.com/exponent-io/jsonpath/decoder.go
generated
vendored
@@ -1,210 +0,0 @@
|
|||||||
package jsonpath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// KeyString is returned from Decoder.Token to represent each key in a JSON object value.
|
|
||||||
type KeyString string
|
|
||||||
|
|
||||||
// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens.
|
|
||||||
type Decoder struct {
|
|
||||||
json.Decoder
|
|
||||||
|
|
||||||
path JsonPath
|
|
||||||
context jsonContext
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDecoder creates a new instance of the extended JSON Decoder.
|
|
||||||
func NewDecoder(r io.Reader) *Decoder {
|
|
||||||
return &Decoder{Decoder: *json.NewDecoder(r)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SeekTo causes the Decoder to move forward to a given path in the JSON structure.
|
|
||||||
//
|
|
||||||
// The path argument must consist of strings or integers. Each string specifies an JSON object key, and
|
|
||||||
// each integer specifies an index into a JSON array.
|
|
||||||
//
|
|
||||||
// Consider the JSON structure
|
|
||||||
//
|
|
||||||
// { "a": [0,"s",12e4,{"b":0,"v":35} ] }
|
|
||||||
//
|
|
||||||
// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object,
|
|
||||||
// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v".
|
|
||||||
// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35.
|
|
||||||
//
|
|
||||||
// SeekTo returns a boolean value indicating whether a match was found.
|
|
||||||
//
|
|
||||||
// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only.
|
|
||||||
func (d *Decoder) SeekTo(path ...interface{}) (bool, error) {
|
|
||||||
|
|
||||||
if len(path) == 0 {
|
|
||||||
return len(d.path) == 0, nil
|
|
||||||
}
|
|
||||||
last := len(path) - 1
|
|
||||||
if i, ok := path[last].(int); ok {
|
|
||||||
path[last] = i - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
if d.path.Equal(path) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
_, err := d.Token()
|
|
||||||
if err == io.EOF {
|
|
||||||
return false, nil
|
|
||||||
} else if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is
|
|
||||||
// equivalent to encoding/json.Decode().
|
|
||||||
func (d *Decoder) Decode(v interface{}) error {
|
|
||||||
switch d.context {
|
|
||||||
case objValue:
|
|
||||||
d.context = objKey
|
|
||||||
break
|
|
||||||
case arrValue:
|
|
||||||
d.path.incTop()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return d.Decoder.Decode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the
|
|
||||||
// position of the most-recently parsed token.
|
|
||||||
func (d *Decoder) Path() JsonPath {
|
|
||||||
p := make(JsonPath, len(d.path))
|
|
||||||
copy(p, d.path)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes
|
|
||||||
// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a
|
|
||||||
// KeyString rather than as a native string.
|
|
||||||
func (d *Decoder) Token() (json.Token, error) {
|
|
||||||
t, err := d.Decoder.Token()
|
|
||||||
if err != nil {
|
|
||||||
return t, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if t == nil {
|
|
||||||
switch d.context {
|
|
||||||
case objValue:
|
|
||||||
d.context = objKey
|
|
||||||
break
|
|
||||||
case arrValue:
|
|
||||||
d.path.incTop()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return t, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch t := t.(type) {
|
|
||||||
case json.Delim:
|
|
||||||
switch t {
|
|
||||||
case json.Delim('{'):
|
|
||||||
if d.context == arrValue {
|
|
||||||
d.path.incTop()
|
|
||||||
}
|
|
||||||
d.path.push("")
|
|
||||||
d.context = objKey
|
|
||||||
break
|
|
||||||
case json.Delim('}'):
|
|
||||||
d.path.pop()
|
|
||||||
d.context = d.path.inferContext()
|
|
||||||
break
|
|
||||||
case json.Delim('['):
|
|
||||||
if d.context == arrValue {
|
|
||||||
d.path.incTop()
|
|
||||||
}
|
|
||||||
d.path.push(-1)
|
|
||||||
d.context = arrValue
|
|
||||||
break
|
|
||||||
case json.Delim(']'):
|
|
||||||
d.path.pop()
|
|
||||||
d.context = d.path.inferContext()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
case float64, json.Number, bool:
|
|
||||||
switch d.context {
|
|
||||||
case objValue:
|
|
||||||
d.context = objKey
|
|
||||||
break
|
|
||||||
case arrValue:
|
|
||||||
d.path.incTop()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
break
|
|
||||||
case string:
|
|
||||||
switch d.context {
|
|
||||||
case objKey:
|
|
||||||
d.path.nameTop(t)
|
|
||||||
d.context = objValue
|
|
||||||
return KeyString(t), err
|
|
||||||
case objValue:
|
|
||||||
d.context = objKey
|
|
||||||
case arrValue:
|
|
||||||
d.path.incTop()
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return t, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array)
|
|
||||||
// invoking each matching PathAction along the way.
|
|
||||||
//
|
|
||||||
// Scan returns true if there are more contiguous values to scan (for example in an array).
|
|
||||||
func (d *Decoder) Scan(ext *PathActions) (bool, error) {
|
|
||||||
|
|
||||||
rootPath := d.Path()
|
|
||||||
|
|
||||||
// If this is an array path, increment the root path in our local copy.
|
|
||||||
if rootPath.inferContext() == arrValue {
|
|
||||||
rootPath.incTop()
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// advance the token position
|
|
||||||
_, err := d.Token()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
match:
|
|
||||||
var relPath JsonPath
|
|
||||||
|
|
||||||
// capture the new JSON path
|
|
||||||
path := d.Path()
|
|
||||||
|
|
||||||
if len(path) > len(rootPath) {
|
|
||||||
// capture the path relative to where the scan started
|
|
||||||
relPath = path[len(rootPath):]
|
|
||||||
} else {
|
|
||||||
// if the path is not longer than the root, then we are done with this scan
|
|
||||||
// return boolean flag indicating if there are more items to scan at the same level
|
|
||||||
return d.Decoder.More(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// match the relative path against the path actions
|
|
||||||
if node := ext.node.match(relPath); node != nil {
|
|
||||||
if node.action != nil {
|
|
||||||
// we have a match so execute the action
|
|
||||||
err = node.action(d)
|
|
||||||
if err != nil {
|
|
||||||
return d.Decoder.More(), err
|
|
||||||
}
|
|
||||||
// The action may have advanced the decoder. If we are in an array, advancing it further would
|
|
||||||
// skip tokens. So, if we are scanning an array, jump to the top without advancing the token.
|
|
||||||
if d.path.inferContext() == arrValue && d.Decoder.More() {
|
|
||||||
goto match
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
67
vendor/github.com/exponent-io/jsonpath/path.go
generated
vendored
67
vendor/github.com/exponent-io/jsonpath/path.go
generated
vendored
@@ -1,67 +0,0 @@
|
|||||||
// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens.
|
|
||||||
package jsonpath
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
type jsonContext int
|
|
||||||
|
|
||||||
const (
|
|
||||||
none jsonContext = iota
|
|
||||||
objKey
|
|
||||||
objValue
|
|
||||||
arrValue
|
|
||||||
)
|
|
||||||
|
|
||||||
// AnyIndex can be used in a pattern to match any array index.
|
|
||||||
const AnyIndex = -2
|
|
||||||
|
|
||||||
// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and
|
|
||||||
// each integer specifies an index into a JSON array.
|
|
||||||
type JsonPath []interface{}
|
|
||||||
|
|
||||||
func (p *JsonPath) push(n interface{}) { *p = append(*p, n) }
|
|
||||||
func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] }
|
|
||||||
|
|
||||||
// increment the index at the top of the stack (must be an array index)
|
|
||||||
func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 }
|
|
||||||
|
|
||||||
// name the key at the top of the stack (must be an object key)
|
|
||||||
func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n }
|
|
||||||
|
|
||||||
// infer the context from the item at the top of the stack
|
|
||||||
func (p *JsonPath) inferContext() jsonContext {
|
|
||||||
if len(*p) == 0 {
|
|
||||||
return none
|
|
||||||
}
|
|
||||||
t := (*p)[len(*p)-1]
|
|
||||||
switch t.(type) {
|
|
||||||
case string:
|
|
||||||
return objKey
|
|
||||||
case int:
|
|
||||||
return arrValue
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("Invalid stack type %T", t))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal tests for equality between two JsonPath types.
|
|
||||||
func (p *JsonPath) Equal(o JsonPath) bool {
|
|
||||||
if len(*p) != len(o) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, v := range *p {
|
|
||||||
if v != o[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *JsonPath) HasPrefix(o JsonPath) bool {
|
|
||||||
for i, v := range o {
|
|
||||||
if v != (*p)[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
61
vendor/github.com/exponent-io/jsonpath/pathaction.go
generated
vendored
61
vendor/github.com/exponent-io/jsonpath/pathaction.go
generated
vendored
@@ -1,61 +0,0 @@
|
|||||||
package jsonpath
|
|
||||||
|
|
||||||
// pathNode is used to construct a trie of paths to be matched
|
|
||||||
type pathNode struct {
|
|
||||||
matchOn interface{} // string, or integer
|
|
||||||
childNodes []pathNode
|
|
||||||
action DecodeAction
|
|
||||||
}
|
|
||||||
|
|
||||||
// match climbs the trie to find a node that matches the given JSON path.
|
|
||||||
func (n *pathNode) match(path JsonPath) *pathNode {
|
|
||||||
var node *pathNode = n
|
|
||||||
for _, ps := range path {
|
|
||||||
found := false
|
|
||||||
for i, n := range node.childNodes {
|
|
||||||
if n.matchOn == ps {
|
|
||||||
node = &node.childNodes[i]
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
} else if _, ok := ps.(int); ok && n.matchOn == AnyIndex {
|
|
||||||
node = &node.childNodes[i]
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathActions represents a collection of DecodeAction functions that should be called at certain path positions
|
|
||||||
// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams.
|
|
||||||
type PathActions struct {
|
|
||||||
node pathNode
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail.
|
|
||||||
type DecodeAction func(d *Decoder) error
|
|
||||||
|
|
||||||
// Add specifies an action to call on the Decoder when the specified path is encountered.
|
|
||||||
func (je *PathActions) Add(action DecodeAction, path ...interface{}) {
|
|
||||||
|
|
||||||
var node *pathNode = &je.node
|
|
||||||
for _, ps := range path {
|
|
||||||
found := false
|
|
||||||
for i, n := range node.childNodes {
|
|
||||||
if n.matchOn == ps {
|
|
||||||
node = &node.childNodes[i]
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
node.childNodes = append(node.childNodes, pathNode{matchOn: ps})
|
|
||||||
node = &node.childNodes[len(node.childNodes)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
node.action = action
|
|
||||||
}
|
|
||||||
294
vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go
generated
vendored
Normal file
294
vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"html/template"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/globalsign/mgo/internal/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetPrefix(name + ": ")
|
||||||
|
|
||||||
|
var g Generator
|
||||||
|
|
||||||
|
fmt.Fprintf(&g, "// Code generated by \"%s.go\"; DO NOT EDIT\n\n", name)
|
||||||
|
|
||||||
|
src := g.generate()
|
||||||
|
|
||||||
|
err := ioutil.WriteFile(fmt.Sprintf("%s.go", strings.TrimSuffix(name, "_generator")), src, 0644)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("writing output: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generator holds the state of the analysis. Primarily used to buffer
|
||||||
|
// the output for format.Source.
|
||||||
|
type Generator struct {
|
||||||
|
bytes.Buffer // Accumulated output.
|
||||||
|
}
|
||||||
|
|
||||||
|
// format returns the gofmt-ed contents of the Generator's buffer.
|
||||||
|
func (g *Generator) format() []byte {
|
||||||
|
src, err := format.Source(g.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
// Should never happen, but can arise when developing this code.
|
||||||
|
// The user can compile the output to see the error.
|
||||||
|
log.Printf("warning: internal error: invalid Go generated: %s", err)
|
||||||
|
log.Printf("warning: compile the package to analyze the error")
|
||||||
|
return g.Bytes()
|
||||||
|
}
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
|
||||||
|
// EVERYTHING ABOVE IS CONSTANT BETWEEN THE GENERATORS
|
||||||
|
|
||||||
|
const name = "bson_corpus_spec_test_generator"
|
||||||
|
|
||||||
|
func (g *Generator) generate() []byte {
|
||||||
|
|
||||||
|
testFiles, err := filepath.Glob("./specdata/specifications/source/bson-corpus/tests/*.json")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error reading bson-corpus files: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tests, err := g.loadTests(testFiles)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error loading tests: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpl, err := g.getTemplate()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error loading template: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpl.Execute(&g.Buffer, tests)
|
||||||
|
|
||||||
|
return g.format()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) loadTests(filenames []string) ([]*testDef, error) {
|
||||||
|
var tests []*testDef
|
||||||
|
for _, filename := range filenames {
|
||||||
|
test, err := g.loadTest(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tests = append(tests, test)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tests, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) loadTest(filename string) (*testDef, error) {
|
||||||
|
content, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var testDef testDef
|
||||||
|
err = json.Unmarshal(content, &testDef)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
names := make(map[string]struct{})
|
||||||
|
|
||||||
|
for i := len(testDef.Valid) - 1; i >= 0; i-- {
|
||||||
|
if testDef.BsonType == "0x05" && testDef.Valid[i].Description == "subtype 0x02" {
|
||||||
|
testDef.Valid = append(testDef.Valid[:i], testDef.Valid[i+1:]...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := cleanupFuncName(testDef.Description + "_" + testDef.Valid[i].Description)
|
||||||
|
nameIdx := name
|
||||||
|
j := 1
|
||||||
|
for {
|
||||||
|
if _, ok := names[nameIdx]; !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
nameIdx = fmt.Sprintf("%s_%d", name, j)
|
||||||
|
}
|
||||||
|
|
||||||
|
names[nameIdx] = struct{}{}
|
||||||
|
|
||||||
|
testDef.Valid[i].TestDef = &testDef
|
||||||
|
testDef.Valid[i].Name = nameIdx
|
||||||
|
testDef.Valid[i].StructTest = testDef.TestKey != "" &&
|
||||||
|
(testDef.BsonType != "0x05" || strings.Contains(testDef.Valid[i].Description, "0x00")) &&
|
||||||
|
!testDef.Deprecated
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := len(testDef.DecodeErrors) - 1; i >= 0; i-- {
|
||||||
|
if strings.Contains(testDef.DecodeErrors[i].Description, "UTF-8") {
|
||||||
|
testDef.DecodeErrors = append(testDef.DecodeErrors[:i], testDef.DecodeErrors[i+1:]...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := cleanupFuncName(testDef.Description + "_" + testDef.DecodeErrors[i].Description)
|
||||||
|
nameIdx := name
|
||||||
|
j := 1
|
||||||
|
for {
|
||||||
|
if _, ok := names[nameIdx]; !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
nameIdx = fmt.Sprintf("%s_%d", name, j)
|
||||||
|
}
|
||||||
|
names[nameIdx] = struct{}{}
|
||||||
|
|
||||||
|
testDef.DecodeErrors[i].Name = nameIdx
|
||||||
|
}
|
||||||
|
|
||||||
|
return &testDef, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Generator) getTemplate() (*template.Template, error) {
|
||||||
|
content := `package bson_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "gopkg.in/check.v1"
|
||||||
|
"github.com/globalsign/mgo/bson"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testValid(c *C, in []byte, expected []byte, result interface{}) {
|
||||||
|
err := bson.Unmarshal(in, result)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
|
out, err := bson.Marshal(result)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
|
c.Assert(string(expected), Equals, string(out), Commentf("roundtrip failed for %T, expected '%x' but got '%x'", result, expected, out))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDecodeSkip(c *C, in []byte) {
|
||||||
|
err := bson.Unmarshal(in, &struct{}{})
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDecodeError(c *C, in []byte, result interface{}) {
|
||||||
|
err := bson.Unmarshal(in, result)
|
||||||
|
c.Assert(err, Not(IsNil))
|
||||||
|
}
|
||||||
|
|
||||||
|
{{range .}}
|
||||||
|
{{range .Valid}}
|
||||||
|
func (s *S) Test{{.Name}}(c *C) {
|
||||||
|
b, err := hex.DecodeString("{{.Bson}}")
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
|
{{if .CanonicalBson}}
|
||||||
|
cb, err := hex.DecodeString("{{.CanonicalBson}}")
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
{{else}}
|
||||||
|
cb := b
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
var resultD bson.D
|
||||||
|
testValid(c, b, cb, &resultD)
|
||||||
|
{{if .StructTest}}var resultS struct {
|
||||||
|
Element {{.TestDef.GoType}} ` + "`bson:\"{{.TestDef.TestKey}}\"`" + `
|
||||||
|
}
|
||||||
|
testValid(c, b, cb, &resultS){{end}}
|
||||||
|
|
||||||
|
testDecodeSkip(c, b)
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{range .DecodeErrors}}
|
||||||
|
func (s *S) Test{{.Name}}(c *C) {
|
||||||
|
b, err := hex.DecodeString("{{.Bson}}")
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
|
var resultD bson.D
|
||||||
|
testDecodeError(c, b, &resultD)
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
`
|
||||||
|
tmpl, err := template.New("").Parse(content)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return tmpl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanupFuncName(name string) string {
|
||||||
|
return strings.Map(func(r rune) rune {
|
||||||
|
if (r >= 48 && r <= 57) || (r >= 65 && r <= 90) || (r >= 97 && r <= 122) {
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
return '_'
|
||||||
|
}, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type testDef struct {
|
||||||
|
Description string `json:"description"`
|
||||||
|
BsonType string `json:"bson_type"`
|
||||||
|
TestKey string `json:"test_key"`
|
||||||
|
Valid []*valid `json:"valid"`
|
||||||
|
DecodeErrors []*decodeError `json:"decodeErrors"`
|
||||||
|
Deprecated bool `json:"deprecated"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testDef) GoType() string {
|
||||||
|
switch t.BsonType {
|
||||||
|
case "0x01":
|
||||||
|
return "float64"
|
||||||
|
case "0x02":
|
||||||
|
return "string"
|
||||||
|
case "0x03":
|
||||||
|
return "bson.D"
|
||||||
|
case "0x04":
|
||||||
|
return "[]interface{}"
|
||||||
|
case "0x05":
|
||||||
|
return "[]byte"
|
||||||
|
case "0x07":
|
||||||
|
return "bson.ObjectId"
|
||||||
|
case "0x08":
|
||||||
|
return "bool"
|
||||||
|
case "0x09":
|
||||||
|
return "time.Time"
|
||||||
|
case "0x0E":
|
||||||
|
return "string"
|
||||||
|
case "0x10":
|
||||||
|
return "int32"
|
||||||
|
case "0x12":
|
||||||
|
return "int64"
|
||||||
|
case "0x13":
|
||||||
|
return "bson.Decimal"
|
||||||
|
default:
|
||||||
|
return "interface{}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type valid struct {
|
||||||
|
Description string `json:"description"`
|
||||||
|
Bson string `json:"bson"`
|
||||||
|
CanonicalBson string `json:"canonical_bson"`
|
||||||
|
|
||||||
|
Name string
|
||||||
|
StructTest bool
|
||||||
|
TestDef *testDef
|
||||||
|
}
|
||||||
|
|
||||||
|
type decodeError struct {
|
||||||
|
Description string `json:"description"`
|
||||||
|
Bson string `json:"bson"`
|
||||||
|
|
||||||
|
Name string
|
||||||
|
}
|
||||||
12
vendor/github.com/golang/mock/AUTHORS
generated
vendored
Normal file
12
vendor/github.com/golang/mock/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# This is the official list of GoMock authors for copyright purposes.
|
||||||
|
# This file is distinct from the CONTRIBUTORS files.
|
||||||
|
# See the latter for an explanation.
|
||||||
|
|
||||||
|
# Names should be added to this file as
|
||||||
|
# Name or Organization <email address>
|
||||||
|
# The email address is not required for organizations.
|
||||||
|
|
||||||
|
# Please keep the list sorted.
|
||||||
|
|
||||||
|
Alex Reece <awreece@gmail.com>
|
||||||
|
Google Inc.
|
||||||
37
vendor/github.com/golang/mock/CONTRIBUTORS
generated
vendored
Normal file
37
vendor/github.com/golang/mock/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# This is the official list of people who can contribute (and typically
|
||||||
|
# have contributed) code to the gomock repository.
|
||||||
|
# The AUTHORS file lists the copyright holders; this file
|
||||||
|
# lists people. For example, Google employees are listed here
|
||||||
|
# but not in AUTHORS, because Google holds the copyright.
|
||||||
|
#
|
||||||
|
# The submission process automatically checks to make sure
|
||||||
|
# that people submitting code are listed in this file (by email address).
|
||||||
|
#
|
||||||
|
# Names should be added to this file only after verifying that
|
||||||
|
# the individual or the individual's organization has agreed to
|
||||||
|
# the appropriate Contributor License Agreement, found here:
|
||||||
|
#
|
||||||
|
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||||
|
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||||
|
#
|
||||||
|
# The agreement for individuals can be filled out on the web.
|
||||||
|
#
|
||||||
|
# When adding J Random Contributor's name to this file,
|
||||||
|
# either J's name or J's organization's name should be
|
||||||
|
# added to the AUTHORS file, depending on whether the
|
||||||
|
# individual or corporate CLA was used.
|
||||||
|
|
||||||
|
# Names should be added to this file like so:
|
||||||
|
# Name <email address>
|
||||||
|
#
|
||||||
|
# An entry with two email addresses specifies that the
|
||||||
|
# first address should be used in the submit logs and
|
||||||
|
# that the second address should be recognized as the
|
||||||
|
# same person when interacting with Rietveld.
|
||||||
|
|
||||||
|
# Please keep the list sorted.
|
||||||
|
|
||||||
|
Aaron Jacobs <jacobsa@google.com> <aaronjjacobs@gmail.com>
|
||||||
|
Alex Reece <awreece@gmail.com>
|
||||||
|
David Symonds <dsymonds@golang.org>
|
||||||
|
Ryan Barrett <ryanb@google.com>
|
||||||
0
vendor/github.com/google/btree/LICENSE → vendor/github.com/golang/mock/LICENSE
generated
vendored
0
vendor/github.com/google/btree/LICENSE → vendor/github.com/golang/mock/LICENSE
generated
vendored
420
vendor/github.com/golang/mock/gomock/call.go
generated
vendored
Normal file
420
vendor/github.com/golang/mock/gomock/call.go
generated
vendored
Normal file
@@ -0,0 +1,420 @@
|
|||||||
|
// Copyright 2010 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package gomock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Call represents an expected call to a mock.
|
||||||
|
type Call struct {
|
||||||
|
t TestHelper // for triggering test failures on invalid call setup
|
||||||
|
|
||||||
|
receiver interface{} // the receiver of the method call
|
||||||
|
method string // the name of the method
|
||||||
|
methodType reflect.Type // the type of the method
|
||||||
|
args []Matcher // the args
|
||||||
|
origin string // file and line number of call setup
|
||||||
|
|
||||||
|
preReqs []*Call // prerequisite calls
|
||||||
|
|
||||||
|
// Expectations
|
||||||
|
minCalls, maxCalls int
|
||||||
|
|
||||||
|
numCalls int // actual number made
|
||||||
|
|
||||||
|
// actions are called when this Call is called. Each action gets the args and
|
||||||
|
// can set the return values by returning a non-nil slice. Actions run in the
|
||||||
|
// order they are created.
|
||||||
|
actions []func([]interface{}) []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCall creates a *Call. It requires the method type in order to support
|
||||||
|
// unexported methods.
|
||||||
|
func newCall(t TestHelper, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// TODO: check arity, types.
|
||||||
|
margs := make([]Matcher, len(args))
|
||||||
|
for i, arg := range args {
|
||||||
|
if m, ok := arg.(Matcher); ok {
|
||||||
|
margs[i] = m
|
||||||
|
} else if arg == nil {
|
||||||
|
// Handle nil specially so that passing a nil interface value
|
||||||
|
// will match the typed nils of concrete args.
|
||||||
|
margs[i] = Nil()
|
||||||
|
} else {
|
||||||
|
margs[i] = Eq(arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
origin := callerInfo(3)
|
||||||
|
actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} {
|
||||||
|
// Synthesize the zero value for each of the return args' types.
|
||||||
|
rets := make([]interface{}, methodType.NumOut())
|
||||||
|
for i := 0; i < methodType.NumOut(); i++ {
|
||||||
|
rets[i] = reflect.Zero(methodType.Out(i)).Interface()
|
||||||
|
}
|
||||||
|
return rets
|
||||||
|
}}
|
||||||
|
return &Call{t: t, receiver: receiver, method: method, methodType: methodType,
|
||||||
|
args: margs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnyTimes allows the expectation to be called 0 or more times
|
||||||
|
func (c *Call) AnyTimes() *Call {
|
||||||
|
c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called, MinTimes also
|
||||||
|
// sets the maximum number of calls to infinity.
|
||||||
|
func (c *Call) MinTimes(n int) *Call {
|
||||||
|
c.minCalls = n
|
||||||
|
if c.maxCalls == 1 {
|
||||||
|
c.maxCalls = 1e8
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called, MaxTimes also
|
||||||
|
// sets the minimum number of calls to 0.
|
||||||
|
func (c *Call) MaxTimes(n int) *Call {
|
||||||
|
c.maxCalls = n
|
||||||
|
if c.minCalls == 1 {
|
||||||
|
c.minCalls = 0
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoAndReturn declares the action to run when the call is matched.
|
||||||
|
// The return values from this function are returned by the mocked function.
|
||||||
|
// It takes an interface{} argument to support n-arity functions.
|
||||||
|
func (c *Call) DoAndReturn(f interface{}) *Call {
|
||||||
|
// TODO: Check arity and types here, rather than dying badly elsewhere.
|
||||||
|
v := reflect.ValueOf(f)
|
||||||
|
|
||||||
|
c.addAction(func(args []interface{}) []interface{} {
|
||||||
|
vargs := make([]reflect.Value, len(args))
|
||||||
|
ft := v.Type()
|
||||||
|
for i := 0; i < len(args); i++ {
|
||||||
|
if args[i] != nil {
|
||||||
|
vargs[i] = reflect.ValueOf(args[i])
|
||||||
|
} else {
|
||||||
|
// Use the zero value for the arg.
|
||||||
|
vargs[i] = reflect.Zero(ft.In(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vrets := v.Call(vargs)
|
||||||
|
rets := make([]interface{}, len(vrets))
|
||||||
|
for i, ret := range vrets {
|
||||||
|
rets[i] = ret.Interface()
|
||||||
|
}
|
||||||
|
return rets
|
||||||
|
})
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do declares the action to run when the call is matched. The function's
|
||||||
|
// return values are ignored to retain backward compatibility. To use the
|
||||||
|
// return values call DoAndReturn.
|
||||||
|
// It takes an interface{} argument to support n-arity functions.
|
||||||
|
func (c *Call) Do(f interface{}) *Call {
|
||||||
|
// TODO: Check arity and types here, rather than dying badly elsewhere.
|
||||||
|
v := reflect.ValueOf(f)
|
||||||
|
|
||||||
|
c.addAction(func(args []interface{}) []interface{} {
|
||||||
|
vargs := make([]reflect.Value, len(args))
|
||||||
|
ft := v.Type()
|
||||||
|
for i := 0; i < len(args); i++ {
|
||||||
|
if args[i] != nil {
|
||||||
|
vargs[i] = reflect.ValueOf(args[i])
|
||||||
|
} else {
|
||||||
|
// Use the zero value for the arg.
|
||||||
|
vargs[i] = reflect.Zero(ft.In(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v.Call(vargs)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return declares the values to be returned by the mocked function call.
|
||||||
|
func (c *Call) Return(rets ...interface{}) *Call {
|
||||||
|
c.t.Helper()
|
||||||
|
|
||||||
|
mt := c.methodType
|
||||||
|
if len(rets) != mt.NumOut() {
|
||||||
|
c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]",
|
||||||
|
c.receiver, c.method, len(rets), mt.NumOut(), c.origin)
|
||||||
|
}
|
||||||
|
for i, ret := range rets {
|
||||||
|
if got, want := reflect.TypeOf(ret), mt.Out(i); got == want {
|
||||||
|
// Identical types; nothing to do.
|
||||||
|
} else if got == nil {
|
||||||
|
// Nil needs special handling.
|
||||||
|
switch want.Kind() {
|
||||||
|
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||||
|
// ok
|
||||||
|
default:
|
||||||
|
c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]",
|
||||||
|
i, c.receiver, c.method, want, c.origin)
|
||||||
|
}
|
||||||
|
} else if got.AssignableTo(want) {
|
||||||
|
// Assignable type relation. Make the assignment now so that the generated code
|
||||||
|
// can return the values with a type assertion.
|
||||||
|
v := reflect.New(want).Elem()
|
||||||
|
v.Set(reflect.ValueOf(ret))
|
||||||
|
rets[i] = v.Interface()
|
||||||
|
} else {
|
||||||
|
c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]",
|
||||||
|
i, c.receiver, c.method, got, want, c.origin)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.addAction(func([]interface{}) []interface{} {
|
||||||
|
return rets
|
||||||
|
})
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Times declares the exact number of times a function call is expected to be executed.
|
||||||
|
func (c *Call) Times(n int) *Call {
|
||||||
|
c.minCalls, c.maxCalls = n, n
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetArg declares an action that will set the nth argument's value,
|
||||||
|
// indirected through a pointer. Or, in the case of a slice, SetArg
|
||||||
|
// will copy value's elements into the nth argument.
|
||||||
|
func (c *Call) SetArg(n int, value interface{}) *Call {
|
||||||
|
c.t.Helper()
|
||||||
|
|
||||||
|
mt := c.methodType
|
||||||
|
// TODO: This will break on variadic methods.
|
||||||
|
// We will need to check those at invocation time.
|
||||||
|
if n < 0 || n >= mt.NumIn() {
|
||||||
|
c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]",
|
||||||
|
n, mt.NumIn(), c.origin)
|
||||||
|
}
|
||||||
|
// Permit setting argument through an interface.
|
||||||
|
// In the interface case, we don't (nay, can't) check the type here.
|
||||||
|
at := mt.In(n)
|
||||||
|
switch at.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
dt := at.Elem()
|
||||||
|
if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) {
|
||||||
|
c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]",
|
||||||
|
n, vt, dt, c.origin)
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
// nothing to do
|
||||||
|
case reflect.Slice:
|
||||||
|
// nothing to do
|
||||||
|
default:
|
||||||
|
c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice type %v [%s]",
|
||||||
|
n, at, c.origin)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.addAction(func(args []interface{}) []interface{} {
|
||||||
|
v := reflect.ValueOf(value)
|
||||||
|
switch reflect.TypeOf(args[n]).Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
setSlice(args[n], v)
|
||||||
|
default:
|
||||||
|
reflect.ValueOf(args[n]).Elem().Set(v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPreReq returns true if other is a direct or indirect prerequisite to c.
|
||||||
|
func (c *Call) isPreReq(other *Call) bool {
|
||||||
|
for _, preReq := range c.preReqs {
|
||||||
|
if other == preReq || preReq.isPreReq(other) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// After declares that the call may only match after preReq has been exhausted.
|
||||||
|
func (c *Call) After(preReq *Call) *Call {
|
||||||
|
c.t.Helper()
|
||||||
|
|
||||||
|
if c == preReq {
|
||||||
|
c.t.Fatalf("A call isn't allowed to be its own prerequisite")
|
||||||
|
}
|
||||||
|
if preReq.isPreReq(c) {
|
||||||
|
c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.preReqs = append(c.preReqs, preReq)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the minimum number of calls have been made.
|
||||||
|
func (c *Call) satisfied() bool {
|
||||||
|
return c.numCalls >= c.minCalls
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true iff the maximum number of calls have been made.
|
||||||
|
func (c *Call) exhausted() bool {
|
||||||
|
return c.numCalls >= c.maxCalls
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Call) String() string {
|
||||||
|
args := make([]string, len(c.args))
|
||||||
|
for i, arg := range c.args {
|
||||||
|
args[i] = arg.String()
|
||||||
|
}
|
||||||
|
arguments := strings.Join(args, ", ")
|
||||||
|
return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests if the given call matches the expected call.
|
||||||
|
// If yes, returns nil. If no, returns error with message explaining why it does not match.
|
||||||
|
func (c *Call) matches(args []interface{}) error {
|
||||||
|
if !c.methodType.IsVariadic() {
|
||||||
|
if len(args) != len(c.args) {
|
||||||
|
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: %d",
|
||||||
|
c.origin, len(args), len(c.args))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, m := range c.args {
|
||||||
|
if !m.Matches(args[i]) {
|
||||||
|
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||||
|
c.origin, strconv.Itoa(i), args[i], m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if len(c.args) < c.methodType.NumIn()-1 {
|
||||||
|
return fmt.Errorf("Expected call at %s has the wrong number of matchers. Got: %d, want: %d",
|
||||||
|
c.origin, len(c.args), c.methodType.NumIn()-1)
|
||||||
|
}
|
||||||
|
if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) {
|
||||||
|
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: %d",
|
||||||
|
c.origin, len(args), len(c.args))
|
||||||
|
}
|
||||||
|
if len(args) < len(c.args)-1 {
|
||||||
|
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d",
|
||||||
|
c.origin, len(args), len(c.args)-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, m := range c.args {
|
||||||
|
if i < c.methodType.NumIn()-1 {
|
||||||
|
// Non-variadic args
|
||||||
|
if !m.Matches(args[i]) {
|
||||||
|
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||||
|
c.origin, strconv.Itoa(i), args[i], m)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// The last arg has a possibility of a variadic argument, so let it branch
|
||||||
|
|
||||||
|
// sample: Foo(a int, b int, c ...int)
|
||||||
|
if i < len(c.args) && i < len(args) {
|
||||||
|
if m.Matches(args[i]) {
|
||||||
|
// Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any())
|
||||||
|
// Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher)
|
||||||
|
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC)
|
||||||
|
// Got Foo(a, b) want Foo(matcherA, matcherB)
|
||||||
|
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The number of actual args don't match the number of matchers,
|
||||||
|
// or the last matcher is a slice and the last arg is not.
|
||||||
|
// If this function still matches it is because the last matcher
|
||||||
|
// matches all the remaining arguments or the lack of any.
|
||||||
|
// Convert the remaining arguments, if any, into a slice of the
|
||||||
|
// expected type.
|
||||||
|
vargsType := c.methodType.In(c.methodType.NumIn() - 1)
|
||||||
|
vargs := reflect.MakeSlice(vargsType, 0, len(args)-i)
|
||||||
|
for _, arg := range args[i:] {
|
||||||
|
vargs = reflect.Append(vargs, reflect.ValueOf(arg))
|
||||||
|
}
|
||||||
|
if m.Matches(vargs.Interface()) {
|
||||||
|
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any())
|
||||||
|
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher)
|
||||||
|
// Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any())
|
||||||
|
// Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Wrong number of matchers or not match. Fail.
|
||||||
|
// Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||||
|
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||||
|
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE)
|
||||||
|
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||||
|
// Got Foo(a, b, c) want Foo(matcherA, matcherB)
|
||||||
|
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||||
|
c.origin, strconv.Itoa(i), args[i:], c.args[i])
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all prerequisite calls have been satisfied.
|
||||||
|
for _, preReqCall := range c.preReqs {
|
||||||
|
if !preReqCall.satisfied() {
|
||||||
|
return fmt.Errorf("Expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v",
|
||||||
|
c.origin, preReqCall, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the call is not exhausted.
|
||||||
|
if c.exhausted() {
|
||||||
|
return fmt.Errorf("Expected call at %s has already been called the max number of times.", c.origin)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dropPrereqs tells the expected Call to not re-check prerequisite calls any
|
||||||
|
// longer, and to return its current set.
|
||||||
|
func (c *Call) dropPrereqs() (preReqs []*Call) {
|
||||||
|
preReqs = c.preReqs
|
||||||
|
c.preReqs = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Call) call(args []interface{}) []func([]interface{}) []interface{} {
|
||||||
|
c.numCalls++
|
||||||
|
return c.actions
|
||||||
|
}
|
||||||
|
|
||||||
|
// InOrder declares that the given calls should occur in order.
|
||||||
|
func InOrder(calls ...*Call) {
|
||||||
|
for i := 1; i < len(calls); i++ {
|
||||||
|
calls[i].After(calls[i-1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setSlice(arg interface{}, v reflect.Value) {
|
||||||
|
va := reflect.ValueOf(arg)
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
va.Index(i).Set(v.Index(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Call) addAction(action func([]interface{}) []interface{}) {
|
||||||
|
c.actions = append(c.actions, action)
|
||||||
|
}
|
||||||
108
vendor/github.com/golang/mock/gomock/callset.go
generated
vendored
Normal file
108
vendor/github.com/golang/mock/gomock/callset.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
// Copyright 2011 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package gomock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// callSet represents a set of expected calls, indexed by receiver and method
|
||||||
|
// name.
|
||||||
|
type callSet struct {
|
||||||
|
// Calls that are still expected.
|
||||||
|
expected map[callSetKey][]*Call
|
||||||
|
// Calls that have been exhausted.
|
||||||
|
exhausted map[callSetKey][]*Call
|
||||||
|
}
|
||||||
|
|
||||||
|
// callSetKey is the key in the maps in callSet
|
||||||
|
type callSetKey struct {
|
||||||
|
receiver interface{}
|
||||||
|
fname string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCallSet() *callSet {
|
||||||
|
return &callSet{make(map[callSetKey][]*Call), make(map[callSetKey][]*Call)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a new expected call.
|
||||||
|
func (cs callSet) Add(call *Call) {
|
||||||
|
key := callSetKey{call.receiver, call.method}
|
||||||
|
m := cs.expected
|
||||||
|
if call.exhausted() {
|
||||||
|
m = cs.exhausted
|
||||||
|
}
|
||||||
|
m[key] = append(m[key], call)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes an expected call.
|
||||||
|
func (cs callSet) Remove(call *Call) {
|
||||||
|
key := callSetKey{call.receiver, call.method}
|
||||||
|
calls := cs.expected[key]
|
||||||
|
for i, c := range calls {
|
||||||
|
if c == call {
|
||||||
|
// maintain order for remaining calls
|
||||||
|
cs.expected[key] = append(calls[:i], calls[i+1:]...)
|
||||||
|
cs.exhausted[key] = append(cs.exhausted[key], call)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindMatch searches for a matching call. Returns error with explanation message if no call matched.
|
||||||
|
func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) {
|
||||||
|
key := callSetKey{receiver, method}
|
||||||
|
|
||||||
|
// Search through the expected calls.
|
||||||
|
expected := cs.expected[key]
|
||||||
|
var callsErrors bytes.Buffer
|
||||||
|
for _, call := range expected {
|
||||||
|
err := call.matches(args)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(&callsErrors, "\n%v", err)
|
||||||
|
} else {
|
||||||
|
return call, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we haven't found a match then search through the exhausted calls so we
|
||||||
|
// get useful error messages.
|
||||||
|
exhausted := cs.exhausted[key]
|
||||||
|
for _, call := range exhausted {
|
||||||
|
if err := call.matches(args); err != nil {
|
||||||
|
fmt.Fprintf(&callsErrors, "\n%v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(expected)+len(exhausted) == 0 {
|
||||||
|
fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf(callsErrors.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Failures returns the calls that are not satisfied.
|
||||||
|
func (cs callSet) Failures() []*Call {
|
||||||
|
failures := make([]*Call, 0, len(cs.expected))
|
||||||
|
for _, calls := range cs.expected {
|
||||||
|
for _, call := range calls {
|
||||||
|
if !call.satisfied() {
|
||||||
|
failures = append(failures, call)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return failures
|
||||||
|
}
|
||||||
235
vendor/github.com/golang/mock/gomock/controller.go
generated
vendored
Normal file
235
vendor/github.com/golang/mock/gomock/controller.go
generated
vendored
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
// Copyright 2010 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// GoMock - a mock framework for Go.
|
||||||
|
//
|
||||||
|
// Standard usage:
|
||||||
|
// (1) Define an interface that you wish to mock.
|
||||||
|
// type MyInterface interface {
|
||||||
|
// SomeMethod(x int64, y string)
|
||||||
|
// }
|
||||||
|
// (2) Use mockgen to generate a mock from the interface.
|
||||||
|
// (3) Use the mock in a test:
|
||||||
|
// func TestMyThing(t *testing.T) {
|
||||||
|
// mockCtrl := gomock.NewController(t)
|
||||||
|
// defer mockCtrl.Finish()
|
||||||
|
//
|
||||||
|
// mockObj := something.NewMockMyInterface(mockCtrl)
|
||||||
|
// mockObj.EXPECT().SomeMethod(4, "blah")
|
||||||
|
// // pass mockObj to a real object and play with it.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// By default, expected calls are not enforced to run in any particular order.
|
||||||
|
// Call order dependency can be enforced by use of InOrder and/or Call.After.
|
||||||
|
// Call.After can create more varied call order dependencies, but InOrder is
|
||||||
|
// often more convenient.
|
||||||
|
//
|
||||||
|
// The following examples create equivalent call order dependencies.
|
||||||
|
//
|
||||||
|
// Example of using Call.After to chain expected call order:
|
||||||
|
//
|
||||||
|
// firstCall := mockObj.EXPECT().SomeMethod(1, "first")
|
||||||
|
// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall)
|
||||||
|
// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall)
|
||||||
|
//
|
||||||
|
// Example of using InOrder to declare expected call order:
|
||||||
|
//
|
||||||
|
// gomock.InOrder(
|
||||||
|
// mockObj.EXPECT().SomeMethod(1, "first"),
|
||||||
|
// mockObj.EXPECT().SomeMethod(2, "second"),
|
||||||
|
// mockObj.EXPECT().SomeMethod(3, "third"),
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// TODO:
|
||||||
|
// - Handle different argument/return types (e.g. ..., chan, map, interface).
|
||||||
|
package gomock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A TestReporter is something that can be used to report test failures.
|
||||||
|
// It is satisfied by the standard library's *testing.T.
|
||||||
|
type TestReporter interface {
|
||||||
|
Errorf(format string, args ...interface{})
|
||||||
|
Fatalf(format string, args ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHelper is a TestReporter that has the Helper method. It is satisfied
|
||||||
|
// by the standard library's *testing.T.
|
||||||
|
type TestHelper interface {
|
||||||
|
TestReporter
|
||||||
|
Helper()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Controller represents the top-level control of a mock ecosystem.
|
||||||
|
// It defines the scope and lifetime of mock objects, as well as their expectations.
|
||||||
|
// It is safe to call Controller's methods from multiple goroutines.
|
||||||
|
type Controller struct {
|
||||||
|
// T should only be called within a generated mock. It is not intended to
|
||||||
|
// be used in user code and may be changed in future versions. T is the
|
||||||
|
// TestReporter passed in when creating the Controller via NewController.
|
||||||
|
// If the TestReporter does not implment a TestHelper it will be wrapped
|
||||||
|
// with a nopTestHelper.
|
||||||
|
T TestHelper
|
||||||
|
mu sync.Mutex
|
||||||
|
expectedCalls *callSet
|
||||||
|
finished bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewController(t TestReporter) *Controller {
|
||||||
|
h, ok := t.(TestHelper)
|
||||||
|
if !ok {
|
||||||
|
h = nopTestHelper{t}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Controller{
|
||||||
|
T: h,
|
||||||
|
expectedCalls: newCallSet(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type cancelReporter struct {
|
||||||
|
TestHelper
|
||||||
|
cancel func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *cancelReporter) Errorf(format string, args ...interface{}) {
|
||||||
|
r.TestHelper.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
func (r *cancelReporter) Fatalf(format string, args ...interface{}) {
|
||||||
|
defer r.cancel()
|
||||||
|
r.TestHelper.Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContext returns a new Controller and a Context, which is cancelled on any
|
||||||
|
// fatal failure.
|
||||||
|
func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) {
|
||||||
|
h, ok := t.(TestHelper)
|
||||||
|
if !ok {
|
||||||
|
h = nopTestHelper{t}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
return NewController(&cancelReporter{h, cancel}), ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
type nopTestHelper struct {
|
||||||
|
TestReporter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h nopTestHelper) Helper() {}
|
||||||
|
|
||||||
|
func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call {
|
||||||
|
ctrl.T.Helper()
|
||||||
|
|
||||||
|
recv := reflect.ValueOf(receiver)
|
||||||
|
for i := 0; i < recv.Type().NumMethod(); i++ {
|
||||||
|
if recv.Type().Method(i).Name == method {
|
||||||
|
return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver)
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
|
||||||
|
ctrl.T.Helper()
|
||||||
|
|
||||||
|
call := newCall(ctrl.T, receiver, method, methodType, args...)
|
||||||
|
|
||||||
|
ctrl.mu.Lock()
|
||||||
|
defer ctrl.mu.Unlock()
|
||||||
|
ctrl.expectedCalls.Add(call)
|
||||||
|
|
||||||
|
return call
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} {
|
||||||
|
ctrl.T.Helper()
|
||||||
|
|
||||||
|
// Nest this code so we can use defer to make sure the lock is released.
|
||||||
|
actions := func() []func([]interface{}) []interface{} {
|
||||||
|
ctrl.T.Helper()
|
||||||
|
ctrl.mu.Lock()
|
||||||
|
defer ctrl.mu.Unlock()
|
||||||
|
|
||||||
|
expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args)
|
||||||
|
if err != nil {
|
||||||
|
origin := callerInfo(2)
|
||||||
|
ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two things happen here:
|
||||||
|
// * the matching call no longer needs to check prerequite calls,
|
||||||
|
// * and the prerequite calls are no longer expected, so remove them.
|
||||||
|
preReqCalls := expected.dropPrereqs()
|
||||||
|
for _, preReqCall := range preReqCalls {
|
||||||
|
ctrl.expectedCalls.Remove(preReqCall)
|
||||||
|
}
|
||||||
|
|
||||||
|
actions := expected.call(args)
|
||||||
|
if expected.exhausted() {
|
||||||
|
ctrl.expectedCalls.Remove(expected)
|
||||||
|
}
|
||||||
|
return actions
|
||||||
|
}()
|
||||||
|
|
||||||
|
var rets []interface{}
|
||||||
|
for _, action := range actions {
|
||||||
|
if r := action(args); r != nil {
|
||||||
|
rets = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rets
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctrl *Controller) Finish() {
|
||||||
|
ctrl.T.Helper()
|
||||||
|
|
||||||
|
ctrl.mu.Lock()
|
||||||
|
defer ctrl.mu.Unlock()
|
||||||
|
|
||||||
|
if ctrl.finished {
|
||||||
|
ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.")
|
||||||
|
}
|
||||||
|
ctrl.finished = true
|
||||||
|
|
||||||
|
// If we're currently panicking, probably because this is a deferred call,
|
||||||
|
// pass through the panic.
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that all remaining expected calls are satisfied.
|
||||||
|
failures := ctrl.expectedCalls.Failures()
|
||||||
|
for _, call := range failures {
|
||||||
|
ctrl.T.Errorf("missing call(s) to %v", call)
|
||||||
|
}
|
||||||
|
if len(failures) != 0 {
|
||||||
|
ctrl.T.Fatalf("aborting test due to missing call(s)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func callerInfo(skip int) string {
|
||||||
|
if _, file, line, ok := runtime.Caller(skip + 1); ok {
|
||||||
|
return fmt.Sprintf("%s:%d", file, line)
|
||||||
|
}
|
||||||
|
return "unknown file"
|
||||||
|
}
|
||||||
122
vendor/github.com/golang/mock/gomock/matchers.go
generated
vendored
Normal file
122
vendor/github.com/golang/mock/gomock/matchers.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2010 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package gomock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Matcher is a representation of a class of values.
|
||||||
|
// It is used to represent the valid or expected arguments to a mocked method.
|
||||||
|
type Matcher interface {
|
||||||
|
// Matches returns whether x is a match.
|
||||||
|
Matches(x interface{}) bool
|
||||||
|
|
||||||
|
// String describes what the matcher matches.
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type anyMatcher struct{}
|
||||||
|
|
||||||
|
func (anyMatcher) Matches(x interface{}) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (anyMatcher) String() string {
|
||||||
|
return "is anything"
|
||||||
|
}
|
||||||
|
|
||||||
|
type eqMatcher struct {
|
||||||
|
x interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e eqMatcher) Matches(x interface{}) bool {
|
||||||
|
return reflect.DeepEqual(e.x, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e eqMatcher) String() string {
|
||||||
|
return fmt.Sprintf("is equal to %v", e.x)
|
||||||
|
}
|
||||||
|
|
||||||
|
type nilMatcher struct{}
|
||||||
|
|
||||||
|
func (nilMatcher) Matches(x interface{}) bool {
|
||||||
|
if x == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
v := reflect.ValueOf(x)
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
|
||||||
|
reflect.Ptr, reflect.Slice:
|
||||||
|
return v.IsNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nilMatcher) String() string {
|
||||||
|
return "is nil"
|
||||||
|
}
|
||||||
|
|
||||||
|
type notMatcher struct {
|
||||||
|
m Matcher
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n notMatcher) Matches(x interface{}) bool {
|
||||||
|
return !n.m.Matches(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n notMatcher) String() string {
|
||||||
|
// TODO: Improve this if we add a NotString method to the Matcher interface.
|
||||||
|
return "not(" + n.m.String() + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
type assignableToTypeOfMatcher struct {
|
||||||
|
targetType reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m assignableToTypeOfMatcher) Matches(x interface{}) bool {
|
||||||
|
return reflect.TypeOf(x).AssignableTo(m.targetType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m assignableToTypeOfMatcher) String() string {
|
||||||
|
return "is assignable to " + m.targetType.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constructors
|
||||||
|
func Any() Matcher { return anyMatcher{} }
|
||||||
|
func Eq(x interface{}) Matcher { return eqMatcher{x} }
|
||||||
|
func Nil() Matcher { return nilMatcher{} }
|
||||||
|
func Not(x interface{}) Matcher {
|
||||||
|
if m, ok := x.(Matcher); ok {
|
||||||
|
return notMatcher{m}
|
||||||
|
}
|
||||||
|
return notMatcher{Eq(x)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssignableToTypeOf is a Matcher that matches if the parameter to the mock
|
||||||
|
// function is assignable to the type of the parameter to this function.
|
||||||
|
//
|
||||||
|
// Example usage:
|
||||||
|
//
|
||||||
|
// dbMock.EXPECT().
|
||||||
|
// Insert(gomock.AssignableToTypeOf(&EmployeeRecord{})).
|
||||||
|
// Return(errors.New("DB error"))
|
||||||
|
//
|
||||||
|
func AssignableToTypeOf(x interface{}) Matcher {
|
||||||
|
return assignableToTypeOfMatcher{reflect.TypeOf(x)}
|
||||||
|
}
|
||||||
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
@@ -1 +0,0 @@
|
|||||||
language: go
|
|
||||||
12
vendor/github.com/google/btree/README.md
generated
vendored
12
vendor/github.com/google/btree/README.md
generated
vendored
@@ -1,12 +0,0 @@
|
|||||||
# BTree implementation for Go
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
This package provides an in-memory B-Tree implementation for Go, useful as
|
|
||||||
an ordered, mutable data structure.
|
|
||||||
|
|
||||||
The API is based off of the wonderful
|
|
||||||
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
|
|
||||||
act as a drop-in replacement for gollrb trees.
|
|
||||||
|
|
||||||
See http://godoc.org/github.com/google/btree for documentation.
|
|
||||||
890
vendor/github.com/google/btree/btree.go
generated
vendored
890
vendor/github.com/google/btree/btree.go
generated
vendored
@@ -1,890 +0,0 @@
|
|||||||
// Copyright 2014 Google Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package btree implements in-memory B-Trees of arbitrary degree.
|
|
||||||
//
|
|
||||||
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
|
||||||
// It is not meant for persistent storage solutions.
|
|
||||||
//
|
|
||||||
// It has a flatter structure than an equivalent red-black or other binary tree,
|
|
||||||
// which in some cases yields better memory usage and/or performance.
|
|
||||||
// See some discussion on the matter here:
|
|
||||||
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
|
|
||||||
// Note, though, that this project is in no way related to the C++ B-Tree
|
|
||||||
// implementation written about there.
|
|
||||||
//
|
|
||||||
// Within this tree, each node contains a slice of items and a (possibly nil)
|
|
||||||
// slice of children. For basic numeric values or raw structs, this can cause
|
|
||||||
// efficiency differences when compared to equivalent C++ template code that
|
|
||||||
// stores values in arrays within the node:
|
|
||||||
// * Due to the overhead of storing values as interfaces (each
|
|
||||||
// value needs to be stored as the value itself, then 2 words for the
|
|
||||||
// interface pointing to that value and its type), resulting in higher
|
|
||||||
// memory use.
|
|
||||||
// * Since interfaces can point to values anywhere in memory, values are
|
|
||||||
// most likely not stored in contiguous blocks, resulting in a higher
|
|
||||||
// number of cache misses.
|
|
||||||
// These issues don't tend to matter, though, when working with strings or other
|
|
||||||
// heap-allocated structures, since C++-equivalent structures also must store
|
|
||||||
// pointers and also distribute their values across the heap.
|
|
||||||
//
|
|
||||||
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
|
|
||||||
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
|
|
||||||
// widely used ordered tree implementation in the Go ecosystem currently.
|
|
||||||
// Its functions, therefore, exactly mirror those of
|
|
||||||
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
|
|
||||||
// support storing multiple equivalent values.
|
|
||||||
package btree
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Item represents a single object in the tree.
|
|
||||||
type Item interface {
|
|
||||||
// Less tests whether the current item is less than the given argument.
|
|
||||||
//
|
|
||||||
// This must provide a strict weak ordering.
|
|
||||||
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
|
|
||||||
// hold one of either a or b in the tree).
|
|
||||||
Less(than Item) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultFreeListSize = 32
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
nilItems = make(items, 16)
|
|
||||||
nilChildren = make(children, 16)
|
|
||||||
)
|
|
||||||
|
|
||||||
// FreeList represents a free list of btree nodes. By default each
|
|
||||||
// BTree has its own FreeList, but multiple BTrees can share the same
|
|
||||||
// FreeList.
|
|
||||||
// Two Btrees using the same freelist are safe for concurrent write access.
|
|
||||||
type FreeList struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
freelist []*node
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFreeList creates a new free list.
|
|
||||||
// size is the maximum size of the returned free list.
|
|
||||||
func NewFreeList(size int) *FreeList {
|
|
||||||
return &FreeList{freelist: make([]*node, 0, size)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FreeList) newNode() (n *node) {
|
|
||||||
f.mu.Lock()
|
|
||||||
index := len(f.freelist) - 1
|
|
||||||
if index < 0 {
|
|
||||||
f.mu.Unlock()
|
|
||||||
return new(node)
|
|
||||||
}
|
|
||||||
n = f.freelist[index]
|
|
||||||
f.freelist[index] = nil
|
|
||||||
f.freelist = f.freelist[:index]
|
|
||||||
f.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// freeNode adds the given node to the list, returning true if it was added
|
|
||||||
// and false if it was discarded.
|
|
||||||
func (f *FreeList) freeNode(n *node) (out bool) {
|
|
||||||
f.mu.Lock()
|
|
||||||
if len(f.freelist) < cap(f.freelist) {
|
|
||||||
f.freelist = append(f.freelist, n)
|
|
||||||
out = true
|
|
||||||
}
|
|
||||||
f.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
|
|
||||||
// the tree. When this function returns false, iteration will stop and the
|
|
||||||
// associated Ascend* function will immediately return.
|
|
||||||
type ItemIterator func(i Item) bool
|
|
||||||
|
|
||||||
// New creates a new B-Tree with the given degree.
|
|
||||||
//
|
|
||||||
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
|
|
||||||
// and 2-4 children).
|
|
||||||
func New(degree int) *BTree {
|
|
||||||
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWithFreeList creates a new B-Tree that uses the given node free list.
|
|
||||||
func NewWithFreeList(degree int, f *FreeList) *BTree {
|
|
||||||
if degree <= 1 {
|
|
||||||
panic("bad degree")
|
|
||||||
}
|
|
||||||
return &BTree{
|
|
||||||
degree: degree,
|
|
||||||
cow: ©OnWriteContext{freelist: f},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// items stores items in a node.
|
|
||||||
type items []Item
|
|
||||||
|
|
||||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
|
||||||
// forward.
|
|
||||||
func (s *items) insertAt(index int, item Item) {
|
|
||||||
*s = append(*s, nil)
|
|
||||||
if index < len(*s) {
|
|
||||||
copy((*s)[index+1:], (*s)[index:])
|
|
||||||
}
|
|
||||||
(*s)[index] = item
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeAt removes a value at a given index, pulling all subsequent values
|
|
||||||
// back.
|
|
||||||
func (s *items) removeAt(index int) Item {
|
|
||||||
item := (*s)[index]
|
|
||||||
copy((*s)[index:], (*s)[index+1:])
|
|
||||||
(*s)[len(*s)-1] = nil
|
|
||||||
*s = (*s)[:len(*s)-1]
|
|
||||||
return item
|
|
||||||
}
|
|
||||||
|
|
||||||
// pop removes and returns the last element in the list.
|
|
||||||
func (s *items) pop() (out Item) {
|
|
||||||
index := len(*s) - 1
|
|
||||||
out = (*s)[index]
|
|
||||||
(*s)[index] = nil
|
|
||||||
*s = (*s)[:index]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// truncate truncates this instance at index so that it contains only the
|
|
||||||
// first index items. index must be less than or equal to length.
|
|
||||||
func (s *items) truncate(index int) {
|
|
||||||
var toClear items
|
|
||||||
*s, toClear = (*s)[:index], (*s)[index:]
|
|
||||||
for len(toClear) > 0 {
|
|
||||||
toClear = toClear[copy(toClear, nilItems):]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// find returns the index where the given item should be inserted into this
|
|
||||||
// list. 'found' is true if the item already exists in the list at the given
|
|
||||||
// index.
|
|
||||||
func (s items) find(item Item) (index int, found bool) {
|
|
||||||
i := sort.Search(len(s), func(i int) bool {
|
|
||||||
return item.Less(s[i])
|
|
||||||
})
|
|
||||||
if i > 0 && !s[i-1].Less(item) {
|
|
||||||
return i - 1, true
|
|
||||||
}
|
|
||||||
return i, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// children stores child nodes in a node.
|
|
||||||
type children []*node
|
|
||||||
|
|
||||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
|
||||||
// forward.
|
|
||||||
func (s *children) insertAt(index int, n *node) {
|
|
||||||
*s = append(*s, nil)
|
|
||||||
if index < len(*s) {
|
|
||||||
copy((*s)[index+1:], (*s)[index:])
|
|
||||||
}
|
|
||||||
(*s)[index] = n
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeAt removes a value at a given index, pulling all subsequent values
|
|
||||||
// back.
|
|
||||||
func (s *children) removeAt(index int) *node {
|
|
||||||
n := (*s)[index]
|
|
||||||
copy((*s)[index:], (*s)[index+1:])
|
|
||||||
(*s)[len(*s)-1] = nil
|
|
||||||
*s = (*s)[:len(*s)-1]
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// pop removes and returns the last element in the list.
|
|
||||||
func (s *children) pop() (out *node) {
|
|
||||||
index := len(*s) - 1
|
|
||||||
out = (*s)[index]
|
|
||||||
(*s)[index] = nil
|
|
||||||
*s = (*s)[:index]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// truncate truncates this instance at index so that it contains only the
|
|
||||||
// first index children. index must be less than or equal to length.
|
|
||||||
func (s *children) truncate(index int) {
|
|
||||||
var toClear children
|
|
||||||
*s, toClear = (*s)[:index], (*s)[index:]
|
|
||||||
for len(toClear) > 0 {
|
|
||||||
toClear = toClear[copy(toClear, nilChildren):]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// node is an internal node in a tree.
|
|
||||||
//
|
|
||||||
// It must at all times maintain the invariant that either
|
|
||||||
// * len(children) == 0, len(items) unconstrained
|
|
||||||
// * len(children) == len(items) + 1
|
|
||||||
type node struct {
|
|
||||||
items items
|
|
||||||
children children
|
|
||||||
cow *copyOnWriteContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) mutableFor(cow *copyOnWriteContext) *node {
|
|
||||||
if n.cow == cow {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
out := cow.newNode()
|
|
||||||
if cap(out.items) >= len(n.items) {
|
|
||||||
out.items = out.items[:len(n.items)]
|
|
||||||
} else {
|
|
||||||
out.items = make(items, len(n.items), cap(n.items))
|
|
||||||
}
|
|
||||||
copy(out.items, n.items)
|
|
||||||
// Copy children
|
|
||||||
if cap(out.children) >= len(n.children) {
|
|
||||||
out.children = out.children[:len(n.children)]
|
|
||||||
} else {
|
|
||||||
out.children = make(children, len(n.children), cap(n.children))
|
|
||||||
}
|
|
||||||
copy(out.children, n.children)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) mutableChild(i int) *node {
|
|
||||||
c := n.children[i].mutableFor(n.cow)
|
|
||||||
n.children[i] = c
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// split splits the given node at the given index. The current node shrinks,
|
|
||||||
// and this function returns the item that existed at that index and a new node
|
|
||||||
// containing all items/children after it.
|
|
||||||
func (n *node) split(i int) (Item, *node) {
|
|
||||||
item := n.items[i]
|
|
||||||
next := n.cow.newNode()
|
|
||||||
next.items = append(next.items, n.items[i+1:]...)
|
|
||||||
n.items.truncate(i)
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
next.children = append(next.children, n.children[i+1:]...)
|
|
||||||
n.children.truncate(i + 1)
|
|
||||||
}
|
|
||||||
return item, next
|
|
||||||
}
|
|
||||||
|
|
||||||
// maybeSplitChild checks if a child should be split, and if so splits it.
|
|
||||||
// Returns whether or not a split occurred.
|
|
||||||
func (n *node) maybeSplitChild(i, maxItems int) bool {
|
|
||||||
if len(n.children[i].items) < maxItems {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
first := n.mutableChild(i)
|
|
||||||
item, second := first.split(maxItems / 2)
|
|
||||||
n.items.insertAt(i, item)
|
|
||||||
n.children.insertAt(i+1, second)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert inserts an item into the subtree rooted at this node, making sure
|
|
||||||
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
|
|
||||||
// be found/replaced by insert, it will be returned.
|
|
||||||
func (n *node) insert(item Item, maxItems int) Item {
|
|
||||||
i, found := n.items.find(item)
|
|
||||||
if found {
|
|
||||||
out := n.items[i]
|
|
||||||
n.items[i] = item
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
if len(n.children) == 0 {
|
|
||||||
n.items.insertAt(i, item)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if n.maybeSplitChild(i, maxItems) {
|
|
||||||
inTree := n.items[i]
|
|
||||||
switch {
|
|
||||||
case item.Less(inTree):
|
|
||||||
// no change, we want first split node
|
|
||||||
case inTree.Less(item):
|
|
||||||
i++ // we want second split node
|
|
||||||
default:
|
|
||||||
out := n.items[i]
|
|
||||||
n.items[i] = item
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n.mutableChild(i).insert(item, maxItems)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get finds the given key in the subtree and returns it.
|
|
||||||
func (n *node) get(key Item) Item {
|
|
||||||
i, found := n.items.find(key)
|
|
||||||
if found {
|
|
||||||
return n.items[i]
|
|
||||||
} else if len(n.children) > 0 {
|
|
||||||
return n.children[i].get(key)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// min returns the first item in the subtree.
|
|
||||||
func min(n *node) Item {
|
|
||||||
if n == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for len(n.children) > 0 {
|
|
||||||
n = n.children[0]
|
|
||||||
}
|
|
||||||
if len(n.items) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return n.items[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// max returns the last item in the subtree.
|
|
||||||
func max(n *node) Item {
|
|
||||||
if n == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for len(n.children) > 0 {
|
|
||||||
n = n.children[len(n.children)-1]
|
|
||||||
}
|
|
||||||
if len(n.items) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return n.items[len(n.items)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// toRemove details what item to remove in a node.remove call.
|
|
||||||
type toRemove int
|
|
||||||
|
|
||||||
const (
|
|
||||||
removeItem toRemove = iota // removes the given item
|
|
||||||
removeMin // removes smallest item in the subtree
|
|
||||||
removeMax // removes largest item in the subtree
|
|
||||||
)
|
|
||||||
|
|
||||||
// remove removes an item from the subtree rooted at this node.
|
|
||||||
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
|
|
||||||
var i int
|
|
||||||
var found bool
|
|
||||||
switch typ {
|
|
||||||
case removeMax:
|
|
||||||
if len(n.children) == 0 {
|
|
||||||
return n.items.pop()
|
|
||||||
}
|
|
||||||
i = len(n.items)
|
|
||||||
case removeMin:
|
|
||||||
if len(n.children) == 0 {
|
|
||||||
return n.items.removeAt(0)
|
|
||||||
}
|
|
||||||
i = 0
|
|
||||||
case removeItem:
|
|
||||||
i, found = n.items.find(item)
|
|
||||||
if len(n.children) == 0 {
|
|
||||||
if found {
|
|
||||||
return n.items.removeAt(i)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("invalid type")
|
|
||||||
}
|
|
||||||
// If we get to here, we have children.
|
|
||||||
if len(n.children[i].items) <= minItems {
|
|
||||||
return n.growChildAndRemove(i, item, minItems, typ)
|
|
||||||
}
|
|
||||||
child := n.mutableChild(i)
|
|
||||||
// Either we had enough items to begin with, or we've done some
|
|
||||||
// merging/stealing, because we've got enough now and we're ready to return
|
|
||||||
// stuff.
|
|
||||||
if found {
|
|
||||||
// The item exists at index 'i', and the child we've selected can give us a
|
|
||||||
// predecessor, since if we've gotten here it's got > minItems items in it.
|
|
||||||
out := n.items[i]
|
|
||||||
// We use our special-case 'remove' call with typ=maxItem to pull the
|
|
||||||
// predecessor of item i (the rightmost leaf of our immediate left child)
|
|
||||||
// and set it into where we pulled the item from.
|
|
||||||
n.items[i] = child.remove(nil, minItems, removeMax)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
// Final recursive call. Once we're here, we know that the item isn't in this
|
|
||||||
// node and that the child is big enough to remove from.
|
|
||||||
return child.remove(item, minItems, typ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
|
|
||||||
// item from it while keeping it at minItems, then calls remove to actually
|
|
||||||
// remove it.
|
|
||||||
//
|
|
||||||
// Most documentation says we have to do two sets of special casing:
|
|
||||||
// 1) item is in this node
|
|
||||||
// 2) item is in child
|
|
||||||
// In both cases, we need to handle the two subcases:
|
|
||||||
// A) node has enough values that it can spare one
|
|
||||||
// B) node doesn't have enough values
|
|
||||||
// For the latter, we have to check:
|
|
||||||
// a) left sibling has node to spare
|
|
||||||
// b) right sibling has node to spare
|
|
||||||
// c) we must merge
|
|
||||||
// To simplify our code here, we handle cases #1 and #2 the same:
|
|
||||||
// If a node doesn't have enough items, we make sure it does (using a,b,c).
|
|
||||||
// We then simply redo our remove call, and the second time (regardless of
|
|
||||||
// whether we're in case 1 or 2), we'll have enough items and can guarantee
|
|
||||||
// that we hit case A.
|
|
||||||
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
|
|
||||||
if i > 0 && len(n.children[i-1].items) > minItems {
|
|
||||||
// Steal from left child
|
|
||||||
child := n.mutableChild(i)
|
|
||||||
stealFrom := n.mutableChild(i - 1)
|
|
||||||
stolenItem := stealFrom.items.pop()
|
|
||||||
child.items.insertAt(0, n.items[i-1])
|
|
||||||
n.items[i-1] = stolenItem
|
|
||||||
if len(stealFrom.children) > 0 {
|
|
||||||
child.children.insertAt(0, stealFrom.children.pop())
|
|
||||||
}
|
|
||||||
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
|
|
||||||
// steal from right child
|
|
||||||
child := n.mutableChild(i)
|
|
||||||
stealFrom := n.mutableChild(i + 1)
|
|
||||||
stolenItem := stealFrom.items.removeAt(0)
|
|
||||||
child.items = append(child.items, n.items[i])
|
|
||||||
n.items[i] = stolenItem
|
|
||||||
if len(stealFrom.children) > 0 {
|
|
||||||
child.children = append(child.children, stealFrom.children.removeAt(0))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if i >= len(n.items) {
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
child := n.mutableChild(i)
|
|
||||||
// merge with right child
|
|
||||||
mergeItem := n.items.removeAt(i)
|
|
||||||
mergeChild := n.children.removeAt(i + 1)
|
|
||||||
child.items = append(child.items, mergeItem)
|
|
||||||
child.items = append(child.items, mergeChild.items...)
|
|
||||||
child.children = append(child.children, mergeChild.children...)
|
|
||||||
n.cow.freeNode(mergeChild)
|
|
||||||
}
|
|
||||||
return n.remove(item, minItems, typ)
|
|
||||||
}
|
|
||||||
|
|
||||||
type direction int
|
|
||||||
|
|
||||||
const (
|
|
||||||
descend = direction(-1)
|
|
||||||
ascend = direction(+1)
|
|
||||||
)
|
|
||||||
|
|
||||||
// iterate provides a simple method for iterating over elements in the tree.
|
|
||||||
//
|
|
||||||
// When ascending, the 'start' should be less than 'stop' and when descending,
|
|
||||||
// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
|
|
||||||
// will force the iterator to include the first item when it equals 'start',
|
|
||||||
// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
|
|
||||||
// "greaterThan" or "lessThan" queries.
|
|
||||||
func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
|
|
||||||
var ok, found bool
|
|
||||||
var index int
|
|
||||||
switch dir {
|
|
||||||
case ascend:
|
|
||||||
if start != nil {
|
|
||||||
index, _ = n.items.find(start)
|
|
||||||
}
|
|
||||||
for i := index; i < len(n.items); i++ {
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
|
||||||
return hit, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
|
|
||||||
hit = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
hit = true
|
|
||||||
if stop != nil && !n.items[i].Less(stop) {
|
|
||||||
return hit, false
|
|
||||||
}
|
|
||||||
if !iter(n.items[i]) {
|
|
||||||
return hit, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
|
||||||
return hit, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case descend:
|
|
||||||
if start != nil {
|
|
||||||
index, found = n.items.find(start)
|
|
||||||
if !found {
|
|
||||||
index = index - 1
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
index = len(n.items) - 1
|
|
||||||
}
|
|
||||||
for i := index; i >= 0; i-- {
|
|
||||||
if start != nil && !n.items[i].Less(start) {
|
|
||||||
if !includeStart || hit || start.Less(n.items[i]) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
|
||||||
return hit, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if stop != nil && !stop.Less(n.items[i]) {
|
|
||||||
return hit, false // continue
|
|
||||||
}
|
|
||||||
hit = true
|
|
||||||
if !iter(n.items[i]) {
|
|
||||||
return hit, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(n.children) > 0 {
|
|
||||||
if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
|
||||||
return hit, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hit, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used for testing/debugging purposes.
|
|
||||||
func (n *node) print(w io.Writer, level int) {
|
|
||||||
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
|
|
||||||
for _, c := range n.children {
|
|
||||||
c.print(w, level+1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BTree is an implementation of a B-Tree.
|
|
||||||
//
|
|
||||||
// BTree stores Item instances in an ordered structure, allowing easy insertion,
|
|
||||||
// removal, and iteration.
|
|
||||||
//
|
|
||||||
// Write operations are not safe for concurrent mutation by multiple
|
|
||||||
// goroutines, but Read operations are.
|
|
||||||
type BTree struct {
|
|
||||||
degree int
|
|
||||||
length int
|
|
||||||
root *node
|
|
||||||
cow *copyOnWriteContext
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyOnWriteContext pointers determine node ownership... a tree with a write
|
|
||||||
// context equivalent to a node's write context is allowed to modify that node.
|
|
||||||
// A tree whose write context does not match a node's is not allowed to modify
|
|
||||||
// it, and must create a new, writable copy (IE: it's a Clone).
|
|
||||||
//
|
|
||||||
// When doing any write operation, we maintain the invariant that the current
|
|
||||||
// node's context is equal to the context of the tree that requested the write.
|
|
||||||
// We do this by, before we descend into any node, creating a copy with the
|
|
||||||
// correct context if the contexts don't match.
|
|
||||||
//
|
|
||||||
// Since the node we're currently visiting on any write has the requesting
|
|
||||||
// tree's context, that node is modifiable in place. Children of that node may
|
|
||||||
// not share context, but before we descend into them, we'll make a mutable
|
|
||||||
// copy.
|
|
||||||
type copyOnWriteContext struct {
|
|
||||||
freelist *FreeList
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone clones the btree, lazily. Clone should not be called concurrently,
|
|
||||||
// but the original tree (t) and the new tree (t2) can be used concurrently
|
|
||||||
// once the Clone call completes.
|
|
||||||
//
|
|
||||||
// The internal tree structure of b is marked read-only and shared between t and
|
|
||||||
// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
|
|
||||||
// whenever one of b's original nodes would have been modified. Read operations
|
|
||||||
// should have no performance degredation. Write operations for both t and t2
|
|
||||||
// will initially experience minor slow-downs caused by additional allocs and
|
|
||||||
// copies due to the aforementioned copy-on-write logic, but should converge to
|
|
||||||
// the original performance characteristics of the original tree.
|
|
||||||
func (t *BTree) Clone() (t2 *BTree) {
|
|
||||||
// Create two entirely new copy-on-write contexts.
|
|
||||||
// This operation effectively creates three trees:
|
|
||||||
// the original, shared nodes (old b.cow)
|
|
||||||
// the new b.cow nodes
|
|
||||||
// the new out.cow nodes
|
|
||||||
cow1, cow2 := *t.cow, *t.cow
|
|
||||||
out := *t
|
|
||||||
t.cow = &cow1
|
|
||||||
out.cow = &cow2
|
|
||||||
return &out
|
|
||||||
}
|
|
||||||
|
|
||||||
// maxItems returns the max number of items to allow per node.
|
|
||||||
func (t *BTree) maxItems() int {
|
|
||||||
return t.degree*2 - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// minItems returns the min number of items to allow per node (ignored for the
|
|
||||||
// root node).
|
|
||||||
func (t *BTree) minItems() int {
|
|
||||||
return t.degree - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *copyOnWriteContext) newNode() (n *node) {
|
|
||||||
n = c.freelist.newNode()
|
|
||||||
n.cow = c
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type freeType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
|
|
||||||
ftStored // node was stored in the freelist for later use
|
|
||||||
ftNotOwned // node was ignored by COW, since it's owned by another one
|
|
||||||
)
|
|
||||||
|
|
||||||
// freeNode frees a node within a given COW context, if it's owned by that
|
|
||||||
// context. It returns what happened to the node (see freeType const
|
|
||||||
// documentation).
|
|
||||||
func (c *copyOnWriteContext) freeNode(n *node) freeType {
|
|
||||||
if n.cow == c {
|
|
||||||
// clear to allow GC
|
|
||||||
n.items.truncate(0)
|
|
||||||
n.children.truncate(0)
|
|
||||||
n.cow = nil
|
|
||||||
if c.freelist.freeNode(n) {
|
|
||||||
return ftStored
|
|
||||||
} else {
|
|
||||||
return ftFreelistFull
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return ftNotOwned
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
|
|
||||||
// already equals the given one, it is removed from the tree and returned.
|
|
||||||
// Otherwise, nil is returned.
|
|
||||||
//
|
|
||||||
// nil cannot be added to the tree (will panic).
|
|
||||||
func (t *BTree) ReplaceOrInsert(item Item) Item {
|
|
||||||
if item == nil {
|
|
||||||
panic("nil item being added to BTree")
|
|
||||||
}
|
|
||||||
if t.root == nil {
|
|
||||||
t.root = t.cow.newNode()
|
|
||||||
t.root.items = append(t.root.items, item)
|
|
||||||
t.length++
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
t.root = t.root.mutableFor(t.cow)
|
|
||||||
if len(t.root.items) >= t.maxItems() {
|
|
||||||
item2, second := t.root.split(t.maxItems() / 2)
|
|
||||||
oldroot := t.root
|
|
||||||
t.root = t.cow.newNode()
|
|
||||||
t.root.items = append(t.root.items, item2)
|
|
||||||
t.root.children = append(t.root.children, oldroot, second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out := t.root.insert(item, t.maxItems())
|
|
||||||
if out == nil {
|
|
||||||
t.length++
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes an item equal to the passed in item from the tree, returning
|
|
||||||
// it. If no such item exists, returns nil.
|
|
||||||
func (t *BTree) Delete(item Item) Item {
|
|
||||||
return t.deleteItem(item, removeItem)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteMin removes the smallest item in the tree and returns it.
|
|
||||||
// If no such item exists, returns nil.
|
|
||||||
func (t *BTree) DeleteMin() Item {
|
|
||||||
return t.deleteItem(nil, removeMin)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteMax removes the largest item in the tree and returns it.
|
|
||||||
// If no such item exists, returns nil.
|
|
||||||
func (t *BTree) DeleteMax() Item {
|
|
||||||
return t.deleteItem(nil, removeMax)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
|
|
||||||
if t.root == nil || len(t.root.items) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
t.root = t.root.mutableFor(t.cow)
|
|
||||||
out := t.root.remove(item, t.minItems(), typ)
|
|
||||||
if len(t.root.items) == 0 && len(t.root.children) > 0 {
|
|
||||||
oldroot := t.root
|
|
||||||
t.root = t.root.children[0]
|
|
||||||
t.cow.freeNode(oldroot)
|
|
||||||
}
|
|
||||||
if out != nil {
|
|
||||||
t.length--
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// AscendRange calls the iterator for every value in the tree within the range
|
|
||||||
// [greaterOrEqual, lessThan), until iterator returns false.
|
|
||||||
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AscendLessThan calls the iterator for every value in the tree within the range
|
|
||||||
// [first, pivot), until iterator returns false.
|
|
||||||
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(ascend, nil, pivot, false, false, iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AscendGreaterOrEqual calls the iterator for every value in the tree within
|
|
||||||
// the range [pivot, last], until iterator returns false.
|
|
||||||
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(ascend, pivot, nil, true, false, iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ascend calls the iterator for every value in the tree within the range
|
|
||||||
// [first, last], until iterator returns false.
|
|
||||||
func (t *BTree) Ascend(iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(ascend, nil, nil, false, false, iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DescendRange calls the iterator for every value in the tree within the range
|
|
||||||
// [lessOrEqual, greaterThan), until iterator returns false.
|
|
||||||
func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DescendLessOrEqual calls the iterator for every value in the tree within the range
|
|
||||||
// [pivot, first], until iterator returns false.
|
|
||||||
func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(descend, pivot, nil, true, false, iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DescendGreaterThan calls the iterator for every value in the tree within
|
|
||||||
// the range (pivot, last], until iterator returns false.
|
|
||||||
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(descend, nil, pivot, false, false, iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Descend calls the iterator for every value in the tree within the range
|
|
||||||
// [last, first], until iterator returns false.
|
|
||||||
func (t *BTree) Descend(iterator ItemIterator) {
|
|
||||||
if t.root == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.root.iterate(descend, nil, nil, false, false, iterator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks for the key item in the tree, returning it. It returns nil if
|
|
||||||
// unable to find that item.
|
|
||||||
func (t *BTree) Get(key Item) Item {
|
|
||||||
if t.root == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return t.root.get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Min returns the smallest item in the tree, or nil if the tree is empty.
|
|
||||||
func (t *BTree) Min() Item {
|
|
||||||
return min(t.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max returns the largest item in the tree, or nil if the tree is empty.
|
|
||||||
func (t *BTree) Max() Item {
|
|
||||||
return max(t.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has returns true if the given key is in the tree.
|
|
||||||
func (t *BTree) Has(key Item) bool {
|
|
||||||
return t.Get(key) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of items currently in the tree.
|
|
||||||
func (t *BTree) Len() int {
|
|
||||||
return t.length
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear removes all items from the btree. If addNodesToFreelist is true,
|
|
||||||
// t's nodes are added to its freelist as part of this call, until the freelist
|
|
||||||
// is full. Otherwise, the root node is simply dereferenced and the subtree
|
|
||||||
// left to Go's normal GC processes.
|
|
||||||
//
|
|
||||||
// This can be much faster
|
|
||||||
// than calling Delete on all elements, because that requires finding/removing
|
|
||||||
// each element in the tree and updating the tree accordingly. It also is
|
|
||||||
// somewhat faster than creating a new tree to replace the old one, because
|
|
||||||
// nodes from the old tree are reclaimed into the freelist for use by the new
|
|
||||||
// one, instead of being lost to the garbage collector.
|
|
||||||
//
|
|
||||||
// This call takes:
|
|
||||||
// O(1): when addNodesToFreelist is false, this is a single operation.
|
|
||||||
// O(1): when the freelist is already full, it breaks out immediately
|
|
||||||
// O(freelist size): when the freelist is empty and the nodes are all owned
|
|
||||||
// by this tree, nodes are added to the freelist until full.
|
|
||||||
// O(tree size): when all nodes are owned by another tree, all nodes are
|
|
||||||
// iterated over looking for nodes to add to the freelist, and due to
|
|
||||||
// ownership, none are.
|
|
||||||
func (t *BTree) Clear(addNodesToFreelist bool) {
|
|
||||||
if t.root != nil && addNodesToFreelist {
|
|
||||||
t.root.reset(t.cow)
|
|
||||||
}
|
|
||||||
t.root, t.length = nil, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset returns a subtree to the freelist. It breaks out immediately if the
|
|
||||||
// freelist is full, since the only benefit of iterating is to fill that
|
|
||||||
// freelist up. Returns true if parent reset call should continue.
|
|
||||||
func (n *node) reset(c *copyOnWriteContext) bool {
|
|
||||||
for _, child := range n.children {
|
|
||||||
if !child.reset(c) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c.freeNode(n) != ftFreelistFull
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int implements the Item interface for integers.
|
|
||||||
type Int int
|
|
||||||
|
|
||||||
// Less returns true if int(a) < int(b).
|
|
||||||
func (a Int) Less(b Item) bool {
|
|
||||||
return a < b.(Int)
|
|
||||||
}
|
|
||||||
18
vendor/github.com/gregjones/httpcache/.travis.yml
generated
vendored
18
vendor/github.com/gregjones/httpcache/.travis.yml
generated
vendored
@@ -1,18 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.6.x
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- master
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- go: master
|
|
||||||
fast_finish: true
|
|
||||||
install:
|
|
||||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
|
||||||
script:
|
|
||||||
- go get -t -v ./...
|
|
||||||
- diff -u <(echo -n) <(gofmt -d .)
|
|
||||||
- go tool vet .
|
|
||||||
- go test -v -race ./...
|
|
||||||
7
vendor/github.com/gregjones/httpcache/LICENSE.txt
generated
vendored
7
vendor/github.com/gregjones/httpcache/LICENSE.txt
generated
vendored
@@ -1,7 +0,0 @@
|
|||||||
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
24
vendor/github.com/gregjones/httpcache/README.md
generated
vendored
24
vendor/github.com/gregjones/httpcache/README.md
generated
vendored
@@ -1,24 +0,0 @@
|
|||||||
httpcache
|
|
||||||
=========
|
|
||||||
|
|
||||||
[](https://travis-ci.org/gregjones/httpcache) [](https://godoc.org/github.com/gregjones/httpcache)
|
|
||||||
|
|
||||||
Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
|
|
||||||
|
|
||||||
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
|
|
||||||
|
|
||||||
Cache Backends
|
|
||||||
--------------
|
|
||||||
|
|
||||||
- The built-in 'memory' cache stores responses in an in-memory map.
|
|
||||||
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
|
|
||||||
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
|
|
||||||
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
|
|
||||||
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
|
|
||||||
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
|
|
||||||
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
- [MIT License](LICENSE.txt)
|
|
||||||
61
vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
generated
vendored
61
vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
generated
vendored
@@ -1,61 +0,0 @@
|
|||||||
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
|
|
||||||
// to supplement an in-memory map with persistent storage
|
|
||||||
//
|
|
||||||
package diskcache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"github.com/peterbourgon/diskv"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
|
|
||||||
type Cache struct {
|
|
||||||
d *diskv.Diskv
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the response corresponding to key if present
|
|
||||||
func (c *Cache) Get(key string) (resp []byte, ok bool) {
|
|
||||||
key = keyToFilename(key)
|
|
||||||
resp, err := c.d.Read(key)
|
|
||||||
if err != nil {
|
|
||||||
return []byte{}, false
|
|
||||||
}
|
|
||||||
return resp, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set saves a response to the cache as key
|
|
||||||
func (c *Cache) Set(key string, resp []byte) {
|
|
||||||
key = keyToFilename(key)
|
|
||||||
c.d.WriteStream(key, bytes.NewReader(resp), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the response with key from the cache
|
|
||||||
func (c *Cache) Delete(key string) {
|
|
||||||
key = keyToFilename(key)
|
|
||||||
c.d.Erase(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyToFilename(key string) string {
|
|
||||||
h := md5.New()
|
|
||||||
io.WriteString(h, key)
|
|
||||||
return hex.EncodeToString(h.Sum(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new Cache that will store files in basePath
|
|
||||||
func New(basePath string) *Cache {
|
|
||||||
return &Cache{
|
|
||||||
d: diskv.New(diskv.Options{
|
|
||||||
BasePath: basePath,
|
|
||||||
CacheSizeMax: 100 * 1024 * 1024, // 100MB
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
|
|
||||||
// storage.
|
|
||||||
func NewWithDiskv(d *diskv.Diskv) *Cache {
|
|
||||||
return &Cache{d}
|
|
||||||
}
|
|
||||||
553
vendor/github.com/gregjones/httpcache/httpcache.go
generated
vendored
553
vendor/github.com/gregjones/httpcache/httpcache.go
generated
vendored
@@ -1,553 +0,0 @@
|
|||||||
// Package httpcache provides a http.RoundTripper implementation that works as a
|
|
||||||
// mostly RFC-compliant cache for http responses.
|
|
||||||
//
|
|
||||||
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
|
|
||||||
// and not for a shared proxy).
|
|
||||||
//
|
|
||||||
package httpcache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httputil"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
stale = iota
|
|
||||||
fresh
|
|
||||||
transparent
|
|
||||||
// XFromCache is the header added to responses that are returned from the cache
|
|
||||||
XFromCache = "X-From-Cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Cache interface is used by the Transport to store and retrieve responses.
|
|
||||||
type Cache interface {
|
|
||||||
// Get returns the []byte representation of a cached response and a bool
|
|
||||||
// set to true if the value isn't empty
|
|
||||||
Get(key string) (responseBytes []byte, ok bool)
|
|
||||||
// Set stores the []byte representation of a response against a key
|
|
||||||
Set(key string, responseBytes []byte)
|
|
||||||
// Delete removes the value associated with the key
|
|
||||||
Delete(key string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cacheKey returns the cache key for req.
|
|
||||||
func cacheKey(req *http.Request) string {
|
|
||||||
return req.URL.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CachedResponse returns the cached http.Response for req if present, and nil
|
|
||||||
// otherwise.
|
|
||||||
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
|
|
||||||
cachedVal, ok := c.Get(cacheKey(req))
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b := bytes.NewBuffer(cachedVal)
|
|
||||||
return http.ReadResponse(bufio.NewReader(b), req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
|
|
||||||
type MemoryCache struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
items map[string][]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the []byte representation of the response and true if present, false if not
|
|
||||||
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
|
|
||||||
c.mu.RLock()
|
|
||||||
resp, ok = c.items[key]
|
|
||||||
c.mu.RUnlock()
|
|
||||||
return resp, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set saves response resp to the cache with key
|
|
||||||
func (c *MemoryCache) Set(key string, resp []byte) {
|
|
||||||
c.mu.Lock()
|
|
||||||
c.items[key] = resp
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes key from the cache
|
|
||||||
func (c *MemoryCache) Delete(key string) {
|
|
||||||
c.mu.Lock()
|
|
||||||
delete(c.items, key)
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMemoryCache returns a new Cache that will store items in an in-memory map
|
|
||||||
func NewMemoryCache() *MemoryCache {
|
|
||||||
c := &MemoryCache{items: map[string][]byte{}}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transport is an implementation of http.RoundTripper that will return values from a cache
|
|
||||||
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
|
|
||||||
// to repeated requests allowing servers to return 304 / Not Modified
|
|
||||||
type Transport struct {
|
|
||||||
// The RoundTripper interface actually used to make requests
|
|
||||||
// If nil, http.DefaultTransport is used
|
|
||||||
Transport http.RoundTripper
|
|
||||||
Cache Cache
|
|
||||||
// If true, responses returned from the cache will be given an extra header, X-From-Cache
|
|
||||||
MarkCachedResponses bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTransport returns a new Transport with the
|
|
||||||
// provided Cache implementation and MarkCachedResponses set to true
|
|
||||||
func NewTransport(c Cache) *Transport {
|
|
||||||
return &Transport{Cache: c, MarkCachedResponses: true}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client returns an *http.Client that caches responses.
|
|
||||||
func (t *Transport) Client() *http.Client {
|
|
||||||
return &http.Client{Transport: t}
|
|
||||||
}
|
|
||||||
|
|
||||||
// varyMatches will return false unless all of the cached values for the headers listed in Vary
|
|
||||||
// match the new request
|
|
||||||
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
|
|
||||||
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
|
|
||||||
header = http.CanonicalHeaderKey(header)
|
|
||||||
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// RoundTrip takes a Request and returns a Response
|
|
||||||
//
|
|
||||||
// If there is a fresh Response already in cache, then it will be returned without connecting to
|
|
||||||
// the server.
|
|
||||||
//
|
|
||||||
// If there is a stale Response, then any validators it contains will be set on the new request
|
|
||||||
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
|
|
||||||
// will be returned.
|
|
||||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
|
||||||
cacheKey := cacheKey(req)
|
|
||||||
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
|
|
||||||
var cachedResp *http.Response
|
|
||||||
if cacheable {
|
|
||||||
cachedResp, err = CachedResponse(t.Cache, req)
|
|
||||||
} else {
|
|
||||||
// Need to invalidate an existing value
|
|
||||||
t.Cache.Delete(cacheKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
transport := t.Transport
|
|
||||||
if transport == nil {
|
|
||||||
transport = http.DefaultTransport
|
|
||||||
}
|
|
||||||
|
|
||||||
if cacheable && cachedResp != nil && err == nil {
|
|
||||||
if t.MarkCachedResponses {
|
|
||||||
cachedResp.Header.Set(XFromCache, "1")
|
|
||||||
}
|
|
||||||
|
|
||||||
if varyMatches(cachedResp, req) {
|
|
||||||
// Can only use cached value if the new request doesn't Vary significantly
|
|
||||||
freshness := getFreshness(cachedResp.Header, req.Header)
|
|
||||||
if freshness == fresh {
|
|
||||||
return cachedResp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if freshness == stale {
|
|
||||||
var req2 *http.Request
|
|
||||||
// Add validators if caller hasn't already done so
|
|
||||||
etag := cachedResp.Header.Get("etag")
|
|
||||||
if etag != "" && req.Header.Get("etag") == "" {
|
|
||||||
req2 = cloneRequest(req)
|
|
||||||
req2.Header.Set("if-none-match", etag)
|
|
||||||
}
|
|
||||||
lastModified := cachedResp.Header.Get("last-modified")
|
|
||||||
if lastModified != "" && req.Header.Get("last-modified") == "" {
|
|
||||||
if req2 == nil {
|
|
||||||
req2 = cloneRequest(req)
|
|
||||||
}
|
|
||||||
req2.Header.Set("if-modified-since", lastModified)
|
|
||||||
}
|
|
||||||
if req2 != nil {
|
|
||||||
req = req2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err = transport.RoundTrip(req)
|
|
||||||
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
|
|
||||||
// Replace the 304 response with the one from cache, but update with some new headers
|
|
||||||
endToEndHeaders := getEndToEndHeaders(resp.Header)
|
|
||||||
for _, header := range endToEndHeaders {
|
|
||||||
cachedResp.Header[header] = resp.Header[header]
|
|
||||||
}
|
|
||||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
|
||||||
cachedResp.StatusCode = http.StatusOK
|
|
||||||
|
|
||||||
resp = cachedResp
|
|
||||||
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
|
|
||||||
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
|
|
||||||
// In case of transport failure and stale-if-error activated, returns cached content
|
|
||||||
// when available
|
|
||||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
|
||||||
cachedResp.StatusCode = http.StatusOK
|
|
||||||
return cachedResp, nil
|
|
||||||
} else {
|
|
||||||
if err != nil || resp.StatusCode != http.StatusOK {
|
|
||||||
t.Cache.Delete(cacheKey)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
reqCacheControl := parseCacheControl(req.Header)
|
|
||||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
|
||||||
resp = newGatewayTimeoutResponse(req)
|
|
||||||
} else {
|
|
||||||
resp, err = transport.RoundTrip(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
|
|
||||||
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
|
|
||||||
varyKey = http.CanonicalHeaderKey(varyKey)
|
|
||||||
fakeHeader := "X-Varied-" + varyKey
|
|
||||||
reqValue := req.Header.Get(varyKey)
|
|
||||||
if reqValue != "" {
|
|
||||||
resp.Header.Set(fakeHeader, reqValue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch req.Method {
|
|
||||||
case "GET":
|
|
||||||
// Delay caching until EOF is reached.
|
|
||||||
resp.Body = &cachingReadCloser{
|
|
||||||
R: resp.Body,
|
|
||||||
OnEOF: func(r io.Reader) {
|
|
||||||
resp := *resp
|
|
||||||
resp.Body = ioutil.NopCloser(r)
|
|
||||||
respBytes, err := httputil.DumpResponse(&resp, true)
|
|
||||||
if err == nil {
|
|
||||||
t.Cache.Set(cacheKey, respBytes)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
respBytes, err := httputil.DumpResponse(resp, true)
|
|
||||||
if err == nil {
|
|
||||||
t.Cache.Set(cacheKey, respBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.Cache.Delete(cacheKey)
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
|
|
||||||
var ErrNoDateHeader = errors.New("no Date header")
|
|
||||||
|
|
||||||
// Date parses and returns the value of the Date header.
|
|
||||||
func Date(respHeaders http.Header) (date time.Time, err error) {
|
|
||||||
dateHeader := respHeaders.Get("date")
|
|
||||||
if dateHeader == "" {
|
|
||||||
err = ErrNoDateHeader
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.Parse(time.RFC1123, dateHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
type realClock struct{}
|
|
||||||
|
|
||||||
func (c *realClock) since(d time.Time) time.Duration {
|
|
||||||
return time.Since(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
type timer interface {
|
|
||||||
since(d time.Time) time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
var clock timer = &realClock{}
|
|
||||||
|
|
||||||
// getFreshness will return one of fresh/stale/transparent based on the cache-control
|
|
||||||
// values of the request and the response
|
|
||||||
//
|
|
||||||
// fresh indicates the response can be returned
|
|
||||||
// stale indicates that the response needs validating before it is returned
|
|
||||||
// transparent indicates the response should not be used to fulfil the request
|
|
||||||
//
|
|
||||||
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
|
|
||||||
// signficant. Similarly, smax-age isn't used.
|
|
||||||
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
|
|
||||||
respCacheControl := parseCacheControl(respHeaders)
|
|
||||||
reqCacheControl := parseCacheControl(reqHeaders)
|
|
||||||
if _, ok := reqCacheControl["no-cache"]; ok {
|
|
||||||
return transparent
|
|
||||||
}
|
|
||||||
if _, ok := respCacheControl["no-cache"]; ok {
|
|
||||||
return stale
|
|
||||||
}
|
|
||||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
|
||||||
return fresh
|
|
||||||
}
|
|
||||||
|
|
||||||
date, err := Date(respHeaders)
|
|
||||||
if err != nil {
|
|
||||||
return stale
|
|
||||||
}
|
|
||||||
currentAge := clock.since(date)
|
|
||||||
|
|
||||||
var lifetime time.Duration
|
|
||||||
var zeroDuration time.Duration
|
|
||||||
|
|
||||||
// If a response includes both an Expires header and a max-age directive,
|
|
||||||
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
|
|
||||||
if maxAge, ok := respCacheControl["max-age"]; ok {
|
|
||||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
|
||||||
if err != nil {
|
|
||||||
lifetime = zeroDuration
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
expiresHeader := respHeaders.Get("Expires")
|
|
||||||
if expiresHeader != "" {
|
|
||||||
expires, err := time.Parse(time.RFC1123, expiresHeader)
|
|
||||||
if err != nil {
|
|
||||||
lifetime = zeroDuration
|
|
||||||
} else {
|
|
||||||
lifetime = expires.Sub(date)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if maxAge, ok := reqCacheControl["max-age"]; ok {
|
|
||||||
// the client is willing to accept a response whose age is no greater than the specified time in seconds
|
|
||||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
|
||||||
if err != nil {
|
|
||||||
lifetime = zeroDuration
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
|
|
||||||
// the client wants a response that will still be fresh for at least the specified number of seconds.
|
|
||||||
minfreshDuration, err := time.ParseDuration(minfresh + "s")
|
|
||||||
if err == nil {
|
|
||||||
currentAge = time.Duration(currentAge + minfreshDuration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if maxstale, ok := reqCacheControl["max-stale"]; ok {
|
|
||||||
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
|
|
||||||
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
|
|
||||||
// its expiration time by no more than the specified number of seconds.
|
|
||||||
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
|
|
||||||
//
|
|
||||||
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
|
|
||||||
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
|
|
||||||
// return-value available here.
|
|
||||||
if maxstale == "" {
|
|
||||||
return fresh
|
|
||||||
}
|
|
||||||
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
|
|
||||||
if err == nil {
|
|
||||||
currentAge = time.Duration(currentAge - maxstaleDuration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if lifetime > currentAge {
|
|
||||||
return fresh
|
|
||||||
}
|
|
||||||
|
|
||||||
return stale
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if either the request or the response includes the stale-if-error
|
|
||||||
// cache control extension: https://tools.ietf.org/html/rfc5861
|
|
||||||
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
|
|
||||||
respCacheControl := parseCacheControl(respHeaders)
|
|
||||||
reqCacheControl := parseCacheControl(reqHeaders)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
lifetime := time.Duration(-1)
|
|
||||||
|
|
||||||
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
|
|
||||||
if staleMaxAge != "" {
|
|
||||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
|
|
||||||
if staleMaxAge != "" {
|
|
||||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if lifetime >= 0 {
|
|
||||||
date, err := Date(respHeaders)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
currentAge := clock.since(date)
|
|
||||||
if lifetime > currentAge {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEndToEndHeaders(respHeaders http.Header) []string {
|
|
||||||
// These headers are always hop-by-hop
|
|
||||||
hopByHopHeaders := map[string]struct{}{
|
|
||||||
"Connection": struct{}{},
|
|
||||||
"Keep-Alive": struct{}{},
|
|
||||||
"Proxy-Authenticate": struct{}{},
|
|
||||||
"Proxy-Authorization": struct{}{},
|
|
||||||
"Te": struct{}{},
|
|
||||||
"Trailers": struct{}{},
|
|
||||||
"Transfer-Encoding": struct{}{},
|
|
||||||
"Upgrade": struct{}{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
|
|
||||||
// any header listed in connection, if present, is also considered hop-by-hop
|
|
||||||
if strings.Trim(extra, " ") != "" {
|
|
||||||
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
endToEndHeaders := []string{}
|
|
||||||
for respHeader, _ := range respHeaders {
|
|
||||||
if _, ok := hopByHopHeaders[respHeader]; !ok {
|
|
||||||
endToEndHeaders = append(endToEndHeaders, respHeader)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return endToEndHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
|
|
||||||
if _, ok := respCacheControl["no-store"]; ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if _, ok := reqCacheControl["no-store"]; ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
|
|
||||||
var braw bytes.Buffer
|
|
||||||
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
|
|
||||||
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
// cloneRequest returns a clone of the provided *http.Request.
|
|
||||||
// The clone is a shallow copy of the struct and its Header map.
|
|
||||||
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
|
|
||||||
func cloneRequest(r *http.Request) *http.Request {
|
|
||||||
// shallow copy of the struct
|
|
||||||
r2 := new(http.Request)
|
|
||||||
*r2 = *r
|
|
||||||
// deep copy of the Header
|
|
||||||
r2.Header = make(http.Header)
|
|
||||||
for k, s := range r.Header {
|
|
||||||
r2.Header[k] = s
|
|
||||||
}
|
|
||||||
return r2
|
|
||||||
}
|
|
||||||
|
|
||||||
type cacheControl map[string]string
|
|
||||||
|
|
||||||
func parseCacheControl(headers http.Header) cacheControl {
|
|
||||||
cc := cacheControl{}
|
|
||||||
ccHeader := headers.Get("Cache-Control")
|
|
||||||
for _, part := range strings.Split(ccHeader, ",") {
|
|
||||||
part = strings.Trim(part, " ")
|
|
||||||
if part == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.ContainsRune(part, '=') {
|
|
||||||
keyval := strings.Split(part, "=")
|
|
||||||
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
|
|
||||||
} else {
|
|
||||||
cc[part] = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cc
|
|
||||||
}
|
|
||||||
|
|
||||||
// headerAllCommaSepValues returns all comma-separated values (each
|
|
||||||
// with whitespace trimmed) for header name in headers. According to
|
|
||||||
// Section 4.2 of the HTTP/1.1 spec
|
|
||||||
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
|
|
||||||
// values from multiple occurrences of a header should be concatenated, if
|
|
||||||
// the header's value is a comma-separated list.
|
|
||||||
func headerAllCommaSepValues(headers http.Header, name string) []string {
|
|
||||||
var vals []string
|
|
||||||
for _, val := range headers[http.CanonicalHeaderKey(name)] {
|
|
||||||
fields := strings.Split(val, ",")
|
|
||||||
for i, f := range fields {
|
|
||||||
fields[i] = strings.TrimSpace(f)
|
|
||||||
}
|
|
||||||
vals = append(vals, fields...)
|
|
||||||
}
|
|
||||||
return vals
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
|
|
||||||
// handler with a full copy of the content read from R when EOF is
|
|
||||||
// reached.
|
|
||||||
type cachingReadCloser struct {
|
|
||||||
// Underlying ReadCloser.
|
|
||||||
R io.ReadCloser
|
|
||||||
// OnEOF is called with a copy of the content of R when EOF is reached.
|
|
||||||
OnEOF func(io.Reader)
|
|
||||||
|
|
||||||
buf bytes.Buffer // buf stores a copy of the content of R.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads the next len(p) bytes from R or until R is drained. The
|
|
||||||
// return value n is the number of bytes read. If R has no data to
|
|
||||||
// return, err is io.EOF and OnEOF is called with a full copy of what
|
|
||||||
// has been read so far.
|
|
||||||
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = r.R.Read(p)
|
|
||||||
r.buf.Write(p[:n])
|
|
||||||
if err == io.EOF {
|
|
||||||
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *cachingReadCloser) Close() error {
|
|
||||||
return r.R.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
|
|
||||||
func NewMemoryCacheTransport() *Transport {
|
|
||||||
c := NewMemoryCache()
|
|
||||||
t := NewTransport(c)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
Normal file
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
Normal file
@@ -0,0 +1,476 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/parser"
|
||||||
|
"go/printer"
|
||||||
|
"go/token"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
var inFiles = []string{"cpuid.go", "cpuid_test.go"}
|
||||||
|
var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
|
||||||
|
var fileSet = token.NewFileSet()
|
||||||
|
var reWrites = []rewrite{
|
||||||
|
initRewrite("CPUInfo -> cpuInfo"),
|
||||||
|
initRewrite("Vendor -> vendor"),
|
||||||
|
initRewrite("Flags -> flags"),
|
||||||
|
initRewrite("Detect -> detect"),
|
||||||
|
initRewrite("CPU -> cpu"),
|
||||||
|
}
|
||||||
|
var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
|
||||||
|
// cpuid_test.go
|
||||||
|
"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
var excludePrefixes = []string{"test", "benchmark"}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
Package := "private"
|
||||||
|
parserMode := parser.ParseComments
|
||||||
|
exported := make(map[string]rewrite)
|
||||||
|
for _, file := range inFiles {
|
||||||
|
in, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("opening input", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
src, err := ioutil.ReadAll(in)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("reading input", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("parsing input", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, rw := range reWrites {
|
||||||
|
astfile = rw(astfile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inspect the AST and print all identifiers and literals.
|
||||||
|
var startDecl token.Pos
|
||||||
|
var endDecl token.Pos
|
||||||
|
ast.Inspect(astfile, func(n ast.Node) bool {
|
||||||
|
var s string
|
||||||
|
switch x := n.(type) {
|
||||||
|
case *ast.Ident:
|
||||||
|
if x.IsExported() {
|
||||||
|
t := strings.ToLower(x.Name)
|
||||||
|
for _, pre := range excludePrefixes {
|
||||||
|
if strings.HasPrefix(t, pre) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if excludeNames[t] != true {
|
||||||
|
//if x.Pos() > startDecl && x.Pos() < endDecl {
|
||||||
|
exported[x.Name] = initRewrite(x.Name + " -> " + t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case *ast.GenDecl:
|
||||||
|
if x.Tok == token.CONST && x.Lparen > 0 {
|
||||||
|
startDecl = x.Lparen
|
||||||
|
endDecl = x.Rparen
|
||||||
|
// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s != "" {
|
||||||
|
fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, rw := range exported {
|
||||||
|
astfile = rw(astfile)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
printer.Fprint(&buf, fileSet, astfile)
|
||||||
|
|
||||||
|
// Remove package documentation and insert information
|
||||||
|
s := buf.String()
|
||||||
|
ind := strings.Index(buf.String(), "\npackage cpuid")
|
||||||
|
s = s[ind:]
|
||||||
|
s = "// Generated, DO NOT EDIT,\n" +
|
||||||
|
"// but copy it to your own project and rename the package.\n" +
|
||||||
|
"// See more at http://github.com/klauspost/cpuid\n" +
|
||||||
|
s
|
||||||
|
|
||||||
|
outputName := Package + string(os.PathSeparator) + file
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(outputName, []byte(s), 0644)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("writing output: %s", err)
|
||||||
|
}
|
||||||
|
log.Println("Generated", outputName)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range copyFiles {
|
||||||
|
dst := ""
|
||||||
|
if strings.HasPrefix(file, "cpuid") {
|
||||||
|
dst = Package + string(os.PathSeparator) + file
|
||||||
|
} else {
|
||||||
|
dst = Package + string(os.PathSeparator) + "cpuid_" + file
|
||||||
|
}
|
||||||
|
err := copyFile(file, dst)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("copying file: %s", err)
|
||||||
|
}
|
||||||
|
log.Println("Copied", dst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFile copies a file from src to dst. If src and dst files exist, and are
|
||||||
|
// the same, then return success. Copy the file contents from src to dst.
|
||||||
|
func copyFile(src, dst string) (err error) {
|
||||||
|
sfi, err := os.Stat(src)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !sfi.Mode().IsRegular() {
|
||||||
|
// cannot copy non-regular files (e.g., directories,
|
||||||
|
// symlinks, devices, etc.)
|
||||||
|
return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
|
||||||
|
}
|
||||||
|
dfi, err := os.Stat(dst)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !(dfi.Mode().IsRegular()) {
|
||||||
|
return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
|
||||||
|
}
|
||||||
|
if os.SameFile(sfi, dfi) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = copyFileContents(src, dst)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFileContents copies the contents of the file named src to the file named
|
||||||
|
// by dst. The file will be created if it does not already exist. If the
|
||||||
|
// destination file exists, all it's contents will be replaced by the contents
|
||||||
|
// of the source file.
|
||||||
|
func copyFileContents(src, dst string) (err error) {
|
||||||
|
in, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer in.Close()
|
||||||
|
out, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
cerr := out.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = cerr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if _, err = io.Copy(out, in); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = out.Sync()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type rewrite func(*ast.File) *ast.File
|
||||||
|
|
||||||
|
// Mostly copied from gofmt
|
||||||
|
func initRewrite(rewriteRule string) rewrite {
|
||||||
|
f := strings.Split(rewriteRule, "->")
|
||||||
|
if len(f) != 2 {
|
||||||
|
fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
pattern := parseExpr(f[0], "pattern")
|
||||||
|
replace := parseExpr(f[1], "replacement")
|
||||||
|
return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseExpr parses s as an expression.
|
||||||
|
// It might make sense to expand this to allow statement patterns,
|
||||||
|
// but there are problems with preserving formatting and also
|
||||||
|
// with what a wildcard for a statement looks like.
|
||||||
|
func parseExpr(s, what string) ast.Expr {
|
||||||
|
x, err := parser.ParseExpr(s)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep this function for debugging.
|
||||||
|
/*
|
||||||
|
func dump(msg string, val reflect.Value) {
|
||||||
|
fmt.Printf("%s:\n", msg)
|
||||||
|
ast.Print(fileSet, val.Interface())
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
|
||||||
|
func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
|
||||||
|
cmap := ast.NewCommentMap(fileSet, p, p.Comments)
|
||||||
|
m := make(map[string]reflect.Value)
|
||||||
|
pat := reflect.ValueOf(pattern)
|
||||||
|
repl := reflect.ValueOf(replace)
|
||||||
|
|
||||||
|
var rewriteVal func(val reflect.Value) reflect.Value
|
||||||
|
rewriteVal = func(val reflect.Value) reflect.Value {
|
||||||
|
// don't bother if val is invalid to start with
|
||||||
|
if !val.IsValid() {
|
||||||
|
return reflect.Value{}
|
||||||
|
}
|
||||||
|
for k := range m {
|
||||||
|
delete(m, k)
|
||||||
|
}
|
||||||
|
val = apply(rewriteVal, val)
|
||||||
|
if match(m, pat, val) {
|
||||||
|
val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
|
||||||
|
r.Comments = cmap.Filter(r).Comments() // recreate comments list
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
|
||||||
|
func set(x, y reflect.Value) {
|
||||||
|
// don't bother if x cannot be set or y is invalid
|
||||||
|
if !x.CanSet() || !y.IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if x := recover(); x != nil {
|
||||||
|
if s, ok := x.(string); ok &&
|
||||||
|
(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
|
||||||
|
// x cannot be set to y - ignore this rewrite
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic(x)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
x.Set(y)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values/types for special cases.
|
||||||
|
var (
|
||||||
|
objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
|
||||||
|
scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
|
||||||
|
|
||||||
|
identType = reflect.TypeOf((*ast.Ident)(nil))
|
||||||
|
objectPtrType = reflect.TypeOf((*ast.Object)(nil))
|
||||||
|
positionType = reflect.TypeOf(token.NoPos)
|
||||||
|
callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
|
||||||
|
scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
// apply replaces each AST field x in val with f(x), returning val.
|
||||||
|
// To avoid extra conversions, f operates on the reflect.Value form.
|
||||||
|
func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
|
||||||
|
if !val.IsValid() {
|
||||||
|
return reflect.Value{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// *ast.Objects introduce cycles and are likely incorrect after
|
||||||
|
// rewrite; don't follow them but replace with nil instead
|
||||||
|
if val.Type() == objectPtrType {
|
||||||
|
return objectPtrNil
|
||||||
|
}
|
||||||
|
|
||||||
|
// similarly for scopes: they are likely incorrect after a rewrite;
|
||||||
|
// replace them with nil
|
||||||
|
if val.Type() == scopePtrType {
|
||||||
|
return scopePtrNil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := reflect.Indirect(val); v.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
e := v.Index(i)
|
||||||
|
set(e, f(e))
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
e := v.Field(i)
|
||||||
|
set(e, f(e))
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
e := v.Elem()
|
||||||
|
set(v, f(e))
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWildcard(s string) bool {
|
||||||
|
rune, size := utf8.DecodeRuneInString(s)
|
||||||
|
return size == len(s) && unicode.IsLower(rune)
|
||||||
|
}
|
||||||
|
|
||||||
|
// match returns true if pattern matches val,
|
||||||
|
// recording wildcard submatches in m.
|
||||||
|
// If m == nil, match checks whether pattern == val.
|
||||||
|
func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
|
||||||
|
// Wildcard matches any expression. If it appears multiple
|
||||||
|
// times in the pattern, it must match the same expression
|
||||||
|
// each time.
|
||||||
|
if m != nil && pattern.IsValid() && pattern.Type() == identType {
|
||||||
|
name := pattern.Interface().(*ast.Ident).Name
|
||||||
|
if isWildcard(name) && val.IsValid() {
|
||||||
|
// wildcards only match valid (non-nil) expressions.
|
||||||
|
if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
|
||||||
|
if old, ok := m[name]; ok {
|
||||||
|
return match(nil, old, val)
|
||||||
|
}
|
||||||
|
m[name] = val
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, pattern and val must match recursively.
|
||||||
|
if !pattern.IsValid() || !val.IsValid() {
|
||||||
|
return !pattern.IsValid() && !val.IsValid()
|
||||||
|
}
|
||||||
|
if pattern.Type() != val.Type() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special cases.
|
||||||
|
switch pattern.Type() {
|
||||||
|
case identType:
|
||||||
|
// For identifiers, only the names need to match
|
||||||
|
// (and none of the other *ast.Object information).
|
||||||
|
// This is a common case, handle it all here instead
|
||||||
|
// of recursing down any further via reflection.
|
||||||
|
p := pattern.Interface().(*ast.Ident)
|
||||||
|
v := val.Interface().(*ast.Ident)
|
||||||
|
return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
|
||||||
|
case objectPtrType, positionType:
|
||||||
|
// object pointers and token positions always match
|
||||||
|
return true
|
||||||
|
case callExprType:
|
||||||
|
// For calls, the Ellipsis fields (token.Position) must
|
||||||
|
// match since that is how f(x) and f(x...) are different.
|
||||||
|
// Check them here but fall through for the remaining fields.
|
||||||
|
p := pattern.Interface().(*ast.CallExpr)
|
||||||
|
v := val.Interface().(*ast.CallExpr)
|
||||||
|
if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := reflect.Indirect(pattern)
|
||||||
|
v := reflect.Indirect(val)
|
||||||
|
if !p.IsValid() || !v.IsValid() {
|
||||||
|
return !p.IsValid() && !v.IsValid()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch p.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
if p.Len() != v.Len() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := 0; i < p.Len(); i++ {
|
||||||
|
if !match(m, p.Index(i), v.Index(i)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
for i := 0; i < p.NumField(); i++ {
|
||||||
|
if !match(m, p.Field(i), v.Field(i)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
return match(m, p.Elem(), v.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle token integers, etc.
|
||||||
|
return p.Interface() == v.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
// subst returns a copy of pattern with values from m substituted in place
|
||||||
|
// of wildcards and pos used as the position of tokens from the pattern.
|
||||||
|
// if m == nil, subst returns a copy of pattern and doesn't change the line
|
||||||
|
// number information.
|
||||||
|
func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
|
||||||
|
if !pattern.IsValid() {
|
||||||
|
return reflect.Value{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wildcard gets replaced with map value.
|
||||||
|
if m != nil && pattern.Type() == identType {
|
||||||
|
name := pattern.Interface().(*ast.Ident).Name
|
||||||
|
if isWildcard(name) {
|
||||||
|
if old, ok := m[name]; ok {
|
||||||
|
return subst(nil, old, reflect.Value{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pos.IsValid() && pattern.Type() == positionType {
|
||||||
|
// use new position only if old position was valid in the first place
|
||||||
|
if old := pattern.Interface().(token.Pos); !old.IsValid() {
|
||||||
|
return pattern
|
||||||
|
}
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise copy.
|
||||||
|
switch p := pattern; p.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
|
||||||
|
for i := 0; i < p.Len(); i++ {
|
||||||
|
v.Index(i).Set(subst(m, p.Index(i), pos))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
v := reflect.New(p.Type()).Elem()
|
||||||
|
for i := 0; i < p.NumField(); i++ {
|
||||||
|
v.Field(i).Set(subst(m, p.Field(i), pos))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
v := reflect.New(p.Type()).Elem()
|
||||||
|
if elem := p.Elem(); elem.IsValid() {
|
||||||
|
v.Set(subst(m, elem, pos).Addr())
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
v := reflect.New(p.Type()).Elem()
|
||||||
|
if elem := p.Elem(); elem.IsValid() {
|
||||||
|
v.Set(subst(m, elem, pos))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
return pattern
|
||||||
|
}
|
||||||
11
vendor/github.com/liggitt/tabwriter/.travis.yml
generated
vendored
11
vendor/github.com/liggitt/tabwriter/.travis.yml
generated
vendored
@@ -1,11 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- "1.8"
|
|
||||||
- "1.9"
|
|
||||||
- "1.10"
|
|
||||||
- "1.11"
|
|
||||||
- "1.12"
|
|
||||||
- master
|
|
||||||
|
|
||||||
script: go test -v ./...
|
|
||||||
27
vendor/github.com/liggitt/tabwriter/LICENSE
generated
vendored
27
vendor/github.com/liggitt/tabwriter/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
7
vendor/github.com/liggitt/tabwriter/README.md
generated
vendored
7
vendor/github.com/liggitt/tabwriter/README.md
generated
vendored
@@ -1,7 +0,0 @@
|
|||||||
This repo is a drop-in replacement for the golang [text/tabwriter](https://golang.org/pkg/text/tabwriter/) package.
|
|
||||||
|
|
||||||
It is based on that package at [cf2c2ea8](https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a/src/text/tabwriter) and inherits its license.
|
|
||||||
|
|
||||||
The following additional features are supported:
|
|
||||||
* `RememberWidths` flag allows remembering maximum widths seen per column even after Flush() is called.
|
|
||||||
* `RememberedWidths() []int` and `SetRememberedWidths([]int) *Writer` allows obtaining and transferring remembered column width between writers.
|
|
||||||
637
vendor/github.com/liggitt/tabwriter/tabwriter.go
generated
vendored
637
vendor/github.com/liggitt/tabwriter/tabwriter.go
generated
vendored
@@ -1,637 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package tabwriter implements a write filter (tabwriter.Writer) that
|
|
||||||
// translates tabbed columns in input into properly aligned text.
|
|
||||||
//
|
|
||||||
// It is a drop-in replacement for the golang text/tabwriter package (https://golang.org/pkg/text/tabwriter),
|
|
||||||
// based on that package at https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a
|
|
||||||
// with support for additional features.
|
|
||||||
//
|
|
||||||
// The package is using the Elastic Tabstops algorithm described at
|
|
||||||
// http://nickgravgaard.com/elastictabstops/index.html.
|
|
||||||
package tabwriter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// Filter implementation
|
|
||||||
|
|
||||||
// A cell represents a segment of text terminated by tabs or line breaks.
|
|
||||||
// The text itself is stored in a separate buffer; cell only describes the
|
|
||||||
// segment's size in bytes, its width in runes, and whether it's an htab
|
|
||||||
// ('\t') terminated cell.
|
|
||||||
//
|
|
||||||
type cell struct {
|
|
||||||
size int // cell size in bytes
|
|
||||||
width int // cell width in runes
|
|
||||||
htab bool // true if the cell is terminated by an htab ('\t')
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Writer is a filter that inserts padding around tab-delimited
|
|
||||||
// columns in its input to align them in the output.
|
|
||||||
//
|
|
||||||
// The Writer treats incoming bytes as UTF-8-encoded text consisting
|
|
||||||
// of cells terminated by horizontal ('\t') or vertical ('\v') tabs,
|
|
||||||
// and newline ('\n') or formfeed ('\f') characters; both newline and
|
|
||||||
// formfeed act as line breaks.
|
|
||||||
//
|
|
||||||
// Tab-terminated cells in contiguous lines constitute a column. The
|
|
||||||
// Writer inserts padding as needed to make all cells in a column have
|
|
||||||
// the same width, effectively aligning the columns. It assumes that
|
|
||||||
// all characters have the same width, except for tabs for which a
|
|
||||||
// tabwidth must be specified. Column cells must be tab-terminated, not
|
|
||||||
// tab-separated: non-tab terminated trailing text at the end of a line
|
|
||||||
// forms a cell but that cell is not part of an aligned column.
|
|
||||||
// For instance, in this example (where | stands for a horizontal tab):
|
|
||||||
//
|
|
||||||
// aaaa|bbb|d
|
|
||||||
// aa |b |dd
|
|
||||||
// a |
|
|
||||||
// aa |cccc|eee
|
|
||||||
//
|
|
||||||
// the b and c are in distinct columns (the b column is not contiguous
|
|
||||||
// all the way). The d and e are not in a column at all (there's no
|
|
||||||
// terminating tab, nor would the column be contiguous).
|
|
||||||
//
|
|
||||||
// The Writer assumes that all Unicode code points have the same width;
|
|
||||||
// this may not be true in some fonts or if the string contains combining
|
|
||||||
// characters.
|
|
||||||
//
|
|
||||||
// If DiscardEmptyColumns is set, empty columns that are terminated
|
|
||||||
// entirely by vertical (or "soft") tabs are discarded. Columns
|
|
||||||
// terminated by horizontal (or "hard") tabs are not affected by
|
|
||||||
// this flag.
|
|
||||||
//
|
|
||||||
// If a Writer is configured to filter HTML, HTML tags and entities
|
|
||||||
// are passed through. The widths of tags and entities are
|
|
||||||
// assumed to be zero (tags) and one (entities) for formatting purposes.
|
|
||||||
//
|
|
||||||
// A segment of text may be escaped by bracketing it with Escape
|
|
||||||
// characters. The tabwriter passes escaped text segments through
|
|
||||||
// unchanged. In particular, it does not interpret any tabs or line
|
|
||||||
// breaks within the segment. If the StripEscape flag is set, the
|
|
||||||
// Escape characters are stripped from the output; otherwise they
|
|
||||||
// are passed through as well. For the purpose of formatting, the
|
|
||||||
// width of the escaped text is always computed excluding the Escape
|
|
||||||
// characters.
|
|
||||||
//
|
|
||||||
// The formfeed character acts like a newline but it also terminates
|
|
||||||
// all columns in the current line (effectively calling Flush). Tab-
|
|
||||||
// terminated cells in the next line start new columns. Unless found
|
|
||||||
// inside an HTML tag or inside an escaped text segment, formfeed
|
|
||||||
// characters appear as newlines in the output.
|
|
||||||
//
|
|
||||||
// The Writer must buffer input internally, because proper spacing
|
|
||||||
// of one line may depend on the cells in future lines. Clients must
|
|
||||||
// call Flush when done calling Write.
|
|
||||||
//
|
|
||||||
type Writer struct {
|
|
||||||
// configuration
|
|
||||||
output io.Writer
|
|
||||||
minwidth int
|
|
||||||
tabwidth int
|
|
||||||
padding int
|
|
||||||
padbytes [8]byte
|
|
||||||
flags uint
|
|
||||||
|
|
||||||
// current state
|
|
||||||
buf []byte // collected text excluding tabs or line breaks
|
|
||||||
pos int // buffer position up to which cell.width of incomplete cell has been computed
|
|
||||||
cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
|
|
||||||
endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
|
|
||||||
lines [][]cell // list of lines; each line is a list of cells
|
|
||||||
widths []int // list of column widths in runes - re-used during formatting
|
|
||||||
|
|
||||||
maxwidths []int // list of max column widths in runes
|
|
||||||
}
|
|
||||||
|
|
||||||
// addLine adds a new line.
|
|
||||||
// flushed is a hint indicating whether the underlying writer was just flushed.
|
|
||||||
// If so, the previous line is not likely to be a good indicator of the new line's cells.
|
|
||||||
func (b *Writer) addLine(flushed bool) {
|
|
||||||
// Grow slice instead of appending,
|
|
||||||
// as that gives us an opportunity
|
|
||||||
// to re-use an existing []cell.
|
|
||||||
if n := len(b.lines) + 1; n <= cap(b.lines) {
|
|
||||||
b.lines = b.lines[:n]
|
|
||||||
b.lines[n-1] = b.lines[n-1][:0]
|
|
||||||
} else {
|
|
||||||
b.lines = append(b.lines, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !flushed {
|
|
||||||
// The previous line is probably a good indicator
|
|
||||||
// of how many cells the current line will have.
|
|
||||||
// If the current line's capacity is smaller than that,
|
|
||||||
// abandon it and make a new one.
|
|
||||||
if n := len(b.lines); n >= 2 {
|
|
||||||
if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) {
|
|
||||||
b.lines[n-1] = make([]cell, 0, prev)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset the current state.
|
|
||||||
func (b *Writer) reset() {
|
|
||||||
b.buf = b.buf[:0]
|
|
||||||
b.pos = 0
|
|
||||||
b.cell = cell{}
|
|
||||||
b.endChar = 0
|
|
||||||
b.lines = b.lines[0:0]
|
|
||||||
b.widths = b.widths[0:0]
|
|
||||||
b.addLine(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal representation (current state):
|
|
||||||
//
|
|
||||||
// - all text written is appended to buf; tabs and line breaks are stripped away
|
|
||||||
// - at any given time there is a (possibly empty) incomplete cell at the end
|
|
||||||
// (the cell starts after a tab or line break)
|
|
||||||
// - cell.size is the number of bytes belonging to the cell so far
|
|
||||||
// - cell.width is text width in runes of that cell from the start of the cell to
|
|
||||||
// position pos; html tags and entities are excluded from this width if html
|
|
||||||
// filtering is enabled
|
|
||||||
// - the sizes and widths of processed text are kept in the lines list
|
|
||||||
// which contains a list of cells for each line
|
|
||||||
// - the widths list is a temporary list with current widths used during
|
|
||||||
// formatting; it is kept in Writer because it's re-used
|
|
||||||
//
|
|
||||||
// |<---------- size ---------->|
|
|
||||||
// | |
|
|
||||||
// |<- width ->|<- ignored ->| |
|
|
||||||
// | | | |
|
|
||||||
// [---processed---tab------------<tag>...</tag>...]
|
|
||||||
// ^ ^ ^
|
|
||||||
// | | |
|
|
||||||
// buf start of incomplete cell pos
|
|
||||||
|
|
||||||
// Formatting can be controlled with these flags.
|
|
||||||
const (
|
|
||||||
// Ignore html tags and treat entities (starting with '&'
|
|
||||||
// and ending in ';') as single characters (width = 1).
|
|
||||||
FilterHTML uint = 1 << iota
|
|
||||||
|
|
||||||
// Strip Escape characters bracketing escaped text segments
|
|
||||||
// instead of passing them through unchanged with the text.
|
|
||||||
StripEscape
|
|
||||||
|
|
||||||
// Force right-alignment of cell content.
|
|
||||||
// Default is left-alignment.
|
|
||||||
AlignRight
|
|
||||||
|
|
||||||
// Handle empty columns as if they were not present in
|
|
||||||
// the input in the first place.
|
|
||||||
DiscardEmptyColumns
|
|
||||||
|
|
||||||
// Always use tabs for indentation columns (i.e., padding of
|
|
||||||
// leading empty cells on the left) independent of padchar.
|
|
||||||
TabIndent
|
|
||||||
|
|
||||||
// Print a vertical bar ('|') between columns (after formatting).
|
|
||||||
// Discarded columns appear as zero-width columns ("||").
|
|
||||||
Debug
|
|
||||||
|
|
||||||
// Remember maximum widths seen per column even after Flush() is called.
|
|
||||||
RememberWidths
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Writer must be initialized with a call to Init. The first parameter (output)
|
|
||||||
// specifies the filter output. The remaining parameters control the formatting:
|
|
||||||
//
|
|
||||||
// minwidth minimal cell width including any padding
|
|
||||||
// tabwidth width of tab characters (equivalent number of spaces)
|
|
||||||
// padding padding added to a cell before computing its width
|
|
||||||
// padchar ASCII char used for padding
|
|
||||||
// if padchar == '\t', the Writer will assume that the
|
|
||||||
// width of a '\t' in the formatted output is tabwidth,
|
|
||||||
// and cells are left-aligned independent of align_left
|
|
||||||
// (for correct-looking results, tabwidth must correspond
|
|
||||||
// to the tab width in the viewer displaying the result)
|
|
||||||
// flags formatting control
|
|
||||||
//
|
|
||||||
func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
|
||||||
if minwidth < 0 || tabwidth < 0 || padding < 0 {
|
|
||||||
panic("negative minwidth, tabwidth, or padding")
|
|
||||||
}
|
|
||||||
b.output = output
|
|
||||||
b.minwidth = minwidth
|
|
||||||
b.tabwidth = tabwidth
|
|
||||||
b.padding = padding
|
|
||||||
for i := range b.padbytes {
|
|
||||||
b.padbytes[i] = padchar
|
|
||||||
}
|
|
||||||
if padchar == '\t' {
|
|
||||||
// tab padding enforces left-alignment
|
|
||||||
flags &^= AlignRight
|
|
||||||
}
|
|
||||||
b.flags = flags
|
|
||||||
|
|
||||||
b.reset()
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// debugging support (keep code around)
|
|
||||||
func (b *Writer) dump() {
|
|
||||||
pos := 0
|
|
||||||
for i, line := range b.lines {
|
|
||||||
print("(", i, ") ")
|
|
||||||
for _, c := range line {
|
|
||||||
print("[", string(b.buf[pos:pos+c.size]), "]")
|
|
||||||
pos += c.size
|
|
||||||
}
|
|
||||||
print("\n")
|
|
||||||
}
|
|
||||||
print("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// local error wrapper so we can distinguish errors we want to return
|
|
||||||
// as errors from genuine panics (which we don't want to return as errors)
|
|
||||||
type osError struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Writer) write0(buf []byte) {
|
|
||||||
n, err := b.output.Write(buf)
|
|
||||||
if n != len(buf) && err == nil {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
panic(osError{err})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Writer) writeN(src []byte, n int) {
|
|
||||||
for n > len(src) {
|
|
||||||
b.write0(src)
|
|
||||||
n -= len(src)
|
|
||||||
}
|
|
||||||
b.write0(src[0:n])
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
newline = []byte{'\n'}
|
|
||||||
tabs = []byte("\t\t\t\t\t\t\t\t")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
|
|
||||||
if b.padbytes[0] == '\t' || useTabs {
|
|
||||||
// padding is done with tabs
|
|
||||||
if b.tabwidth == 0 {
|
|
||||||
return // tabs have no width - can't do any padding
|
|
||||||
}
|
|
||||||
// make cellw the smallest multiple of b.tabwidth
|
|
||||||
cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
|
|
||||||
n := cellw - textw // amount of padding
|
|
||||||
if n < 0 {
|
|
||||||
panic("internal error")
|
|
||||||
}
|
|
||||||
b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// padding is done with non-tab characters
|
|
||||||
b.writeN(b.padbytes[0:], cellw-textw)
|
|
||||||
}
|
|
||||||
|
|
||||||
var vbar = []byte{'|'}
|
|
||||||
|
|
||||||
func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
|
|
||||||
pos = pos0
|
|
||||||
for i := line0; i < line1; i++ {
|
|
||||||
line := b.lines[i]
|
|
||||||
|
|
||||||
// if TabIndent is set, use tabs to pad leading empty cells
|
|
||||||
useTabs := b.flags&TabIndent != 0
|
|
||||||
|
|
||||||
for j, c := range line {
|
|
||||||
if j > 0 && b.flags&Debug != 0 {
|
|
||||||
// indicate column break
|
|
||||||
b.write0(vbar)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.size == 0 {
|
|
||||||
// empty cell
|
|
||||||
if j < len(b.widths) {
|
|
||||||
b.writePadding(c.width, b.widths[j], useTabs)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// non-empty cell
|
|
||||||
useTabs = false
|
|
||||||
if b.flags&AlignRight == 0 { // align left
|
|
||||||
b.write0(b.buf[pos : pos+c.size])
|
|
||||||
pos += c.size
|
|
||||||
if j < len(b.widths) {
|
|
||||||
b.writePadding(c.width, b.widths[j], false)
|
|
||||||
}
|
|
||||||
} else { // align right
|
|
||||||
if j < len(b.widths) {
|
|
||||||
b.writePadding(c.width, b.widths[j], false)
|
|
||||||
}
|
|
||||||
b.write0(b.buf[pos : pos+c.size])
|
|
||||||
pos += c.size
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i+1 == len(b.lines) {
|
|
||||||
// last buffered line - we don't have a newline, so just write
|
|
||||||
// any outstanding buffered data
|
|
||||||
b.write0(b.buf[pos : pos+b.cell.size])
|
|
||||||
pos += b.cell.size
|
|
||||||
} else {
|
|
||||||
// not the last line - write newline
|
|
||||||
b.write0(newline)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format the text between line0 and line1 (excluding line1); pos
|
|
||||||
// is the buffer position corresponding to the beginning of line0.
|
|
||||||
// Returns the buffer position corresponding to the beginning of
|
|
||||||
// line1 and an error, if any.
|
|
||||||
//
|
|
||||||
func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
|
|
||||||
pos = pos0
|
|
||||||
column := len(b.widths)
|
|
||||||
for this := line0; this < line1; this++ {
|
|
||||||
line := b.lines[this]
|
|
||||||
|
|
||||||
if column >= len(line)-1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// cell exists in this column => this line
|
|
||||||
// has more cells than the previous line
|
|
||||||
// (the last cell per line is ignored because cells are
|
|
||||||
// tab-terminated; the last cell per line describes the
|
|
||||||
// text before the newline/formfeed and does not belong
|
|
||||||
// to a column)
|
|
||||||
|
|
||||||
// print unprinted lines until beginning of block
|
|
||||||
pos = b.writeLines(pos, line0, this)
|
|
||||||
line0 = this
|
|
||||||
|
|
||||||
// column block begin
|
|
||||||
width := b.minwidth // minimal column width
|
|
||||||
discardable := true // true if all cells in this column are empty and "soft"
|
|
||||||
for ; this < line1; this++ {
|
|
||||||
line = b.lines[this]
|
|
||||||
if column >= len(line)-1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// cell exists in this column
|
|
||||||
c := line[column]
|
|
||||||
// update width
|
|
||||||
if w := c.width + b.padding; w > width {
|
|
||||||
width = w
|
|
||||||
}
|
|
||||||
// update discardable
|
|
||||||
if c.width > 0 || c.htab {
|
|
||||||
discardable = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// column block end
|
|
||||||
|
|
||||||
// discard empty columns if necessary
|
|
||||||
if discardable && b.flags&DiscardEmptyColumns != 0 {
|
|
||||||
width = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.flags&RememberWidths != 0 {
|
|
||||||
if len(b.maxwidths) < len(b.widths) {
|
|
||||||
b.maxwidths = append(b.maxwidths, b.widths[len(b.maxwidths):]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case len(b.maxwidths) == len(b.widths):
|
|
||||||
b.maxwidths = append(b.maxwidths, width)
|
|
||||||
case b.maxwidths[len(b.widths)] > width:
|
|
||||||
width = b.maxwidths[len(b.widths)]
|
|
||||||
case b.maxwidths[len(b.widths)] < width:
|
|
||||||
b.maxwidths[len(b.widths)] = width
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// format and print all columns to the right of this column
|
|
||||||
// (we know the widths of this column and all columns to the left)
|
|
||||||
b.widths = append(b.widths, width) // push width
|
|
||||||
pos = b.format(pos, line0, this)
|
|
||||||
b.widths = b.widths[0 : len(b.widths)-1] // pop width
|
|
||||||
line0 = this
|
|
||||||
}
|
|
||||||
|
|
||||||
// print unprinted lines until end
|
|
||||||
return b.writeLines(pos, line0, line1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append text to current cell.
|
|
||||||
func (b *Writer) append(text []byte) {
|
|
||||||
b.buf = append(b.buf, text...)
|
|
||||||
b.cell.size += len(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the cell width.
|
|
||||||
func (b *Writer) updateWidth() {
|
|
||||||
b.cell.width += utf8.RuneCount(b.buf[b.pos:])
|
|
||||||
b.pos = len(b.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// To escape a text segment, bracket it with Escape characters.
|
|
||||||
// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
|
|
||||||
// does not terminate a cell and constitutes a single character of
|
|
||||||
// width one for formatting purposes.
|
|
||||||
//
|
|
||||||
// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
|
|
||||||
//
|
|
||||||
const Escape = '\xff'
|
|
||||||
|
|
||||||
// Start escaped mode.
|
|
||||||
func (b *Writer) startEscape(ch byte) {
|
|
||||||
switch ch {
|
|
||||||
case Escape:
|
|
||||||
b.endChar = Escape
|
|
||||||
case '<':
|
|
||||||
b.endChar = '>'
|
|
||||||
case '&':
|
|
||||||
b.endChar = ';'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Terminate escaped mode. If the escaped text was an HTML tag, its width
|
|
||||||
// is assumed to be zero for formatting purposes; if it was an HTML entity,
|
|
||||||
// its width is assumed to be one. In all other cases, the width is the
|
|
||||||
// unicode width of the text.
|
|
||||||
//
|
|
||||||
func (b *Writer) endEscape() {
|
|
||||||
switch b.endChar {
|
|
||||||
case Escape:
|
|
||||||
b.updateWidth()
|
|
||||||
if b.flags&StripEscape == 0 {
|
|
||||||
b.cell.width -= 2 // don't count the Escape chars
|
|
||||||
}
|
|
||||||
case '>': // tag of zero width
|
|
||||||
case ';':
|
|
||||||
b.cell.width++ // entity, count as one rune
|
|
||||||
}
|
|
||||||
b.pos = len(b.buf)
|
|
||||||
b.endChar = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Terminate the current cell by adding it to the list of cells of the
|
|
||||||
// current line. Returns the number of cells in that line.
|
|
||||||
//
|
|
||||||
func (b *Writer) terminateCell(htab bool) int {
|
|
||||||
b.cell.htab = htab
|
|
||||||
line := &b.lines[len(b.lines)-1]
|
|
||||||
*line = append(*line, b.cell)
|
|
||||||
b.cell = cell{}
|
|
||||||
return len(*line)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handlePanic(err *error, op string) {
|
|
||||||
if e := recover(); e != nil {
|
|
||||||
if nerr, ok := e.(osError); ok {
|
|
||||||
*err = nerr.err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic("tabwriter: panic during " + op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RememberedWidths returns a copy of the remembered per-column maximum widths.
|
|
||||||
// Requires use of the RememberWidths flag, and is not threadsafe.
|
|
||||||
func (b *Writer) RememberedWidths() []int {
|
|
||||||
retval := make([]int, len(b.maxwidths))
|
|
||||||
copy(retval, b.maxwidths)
|
|
||||||
return retval
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRememberedWidths sets the remembered per-column maximum widths.
|
|
||||||
// Requires use of the RememberWidths flag, and is not threadsafe.
|
|
||||||
func (b *Writer) SetRememberedWidths(widths []int) *Writer {
|
|
||||||
b.maxwidths = make([]int, len(widths))
|
|
||||||
copy(b.maxwidths, widths)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush should be called after the last call to Write to ensure
|
|
||||||
// that any data buffered in the Writer is written to output. Any
|
|
||||||
// incomplete escape sequence at the end is considered
|
|
||||||
// complete for formatting purposes.
|
|
||||||
func (b *Writer) Flush() error {
|
|
||||||
return b.flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Writer) flush() (err error) {
|
|
||||||
defer b.reset() // even in the presence of errors
|
|
||||||
defer handlePanic(&err, "Flush")
|
|
||||||
|
|
||||||
// add current cell if not empty
|
|
||||||
if b.cell.size > 0 {
|
|
||||||
if b.endChar != 0 {
|
|
||||||
// inside escape - terminate it even if incomplete
|
|
||||||
b.endEscape()
|
|
||||||
}
|
|
||||||
b.terminateCell(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// format contents of buffer
|
|
||||||
b.format(0, 0, len(b.lines))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var hbar = []byte("---\n")
|
|
||||||
|
|
||||||
// Write writes buf to the writer b.
|
|
||||||
// The only errors returned are ones encountered
|
|
||||||
// while writing to the underlying output stream.
|
|
||||||
//
|
|
||||||
func (b *Writer) Write(buf []byte) (n int, err error) {
|
|
||||||
defer handlePanic(&err, "Write")
|
|
||||||
|
|
||||||
// split text into cells
|
|
||||||
n = 0
|
|
||||||
for i, ch := range buf {
|
|
||||||
if b.endChar == 0 {
|
|
||||||
// outside escape
|
|
||||||
switch ch {
|
|
||||||
case '\t', '\v', '\n', '\f':
|
|
||||||
// end of cell
|
|
||||||
b.append(buf[n:i])
|
|
||||||
b.updateWidth()
|
|
||||||
n = i + 1 // ch consumed
|
|
||||||
ncells := b.terminateCell(ch == '\t')
|
|
||||||
if ch == '\n' || ch == '\f' {
|
|
||||||
// terminate line
|
|
||||||
b.addLine(ch == '\f')
|
|
||||||
if ch == '\f' || ncells == 1 {
|
|
||||||
// A '\f' always forces a flush. Otherwise, if the previous
|
|
||||||
// line has only one cell which does not have an impact on
|
|
||||||
// the formatting of the following lines (the last cell per
|
|
||||||
// line is ignored by format()), thus we can flush the
|
|
||||||
// Writer contents.
|
|
||||||
if err = b.Flush(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ch == '\f' && b.flags&Debug != 0 {
|
|
||||||
// indicate section break
|
|
||||||
b.write0(hbar)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case Escape:
|
|
||||||
// start of escaped sequence
|
|
||||||
b.append(buf[n:i])
|
|
||||||
b.updateWidth()
|
|
||||||
n = i
|
|
||||||
if b.flags&StripEscape != 0 {
|
|
||||||
n++ // strip Escape
|
|
||||||
}
|
|
||||||
b.startEscape(Escape)
|
|
||||||
|
|
||||||
case '<', '&':
|
|
||||||
// possibly an html tag/entity
|
|
||||||
if b.flags&FilterHTML != 0 {
|
|
||||||
// begin of tag/entity
|
|
||||||
b.append(buf[n:i])
|
|
||||||
b.updateWidth()
|
|
||||||
n = i
|
|
||||||
b.startEscape(ch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// inside escape
|
|
||||||
if ch == b.endChar {
|
|
||||||
// end of tag/entity
|
|
||||||
j := i + 1
|
|
||||||
if ch == Escape && b.flags&StripEscape != 0 {
|
|
||||||
j = i // strip Escape
|
|
||||||
}
|
|
||||||
b.append(buf[n:j])
|
|
||||||
n = i + 1 // ch consumed
|
|
||||||
b.endEscape()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// append leftover text
|
|
||||||
b.append(buf[n:])
|
|
||||||
n = len(buf)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter allocates and initializes a new tabwriter.Writer.
|
|
||||||
// The parameters are the same as for the Init function.
|
|
||||||
//
|
|
||||||
func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
|
||||||
return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
|
|
||||||
}
|
|
||||||
169
vendor/github.com/marten-seemann/qtls/generate_cert.go
generated
vendored
Normal file
169
vendor/github.com/marten-seemann/qtls/generate_cert.go
generated
vendored
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// Generate a self-signed X.509 certificate for a TLS server. Outputs to
|
||||||
|
// 'cert.pem' and 'key.pem' and will overwrite existing files.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix"
|
||||||
|
"encoding/pem"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for")
|
||||||
|
validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011")
|
||||||
|
validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for")
|
||||||
|
isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority")
|
||||||
|
rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate. Ignored if --ecdsa-curve is set")
|
||||||
|
ecdsaCurve = flag.String("ecdsa-curve", "", "ECDSA curve to use to generate a key. Valid values are P224, P256 (recommended), P384, P521")
|
||||||
|
)
|
||||||
|
|
||||||
|
func publicKey(priv interface{}) interface{} {
|
||||||
|
switch k := priv.(type) {
|
||||||
|
case *rsa.PrivateKey:
|
||||||
|
return &k.PublicKey
|
||||||
|
case *ecdsa.PrivateKey:
|
||||||
|
return &k.PublicKey
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func pemBlockForKey(priv interface{}) *pem.Block {
|
||||||
|
switch k := priv.(type) {
|
||||||
|
case *rsa.PrivateKey:
|
||||||
|
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
|
||||||
|
case *ecdsa.PrivateKey:
|
||||||
|
b, err := x509.MarshalECPrivateKey(k)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if len(*host) == 0 {
|
||||||
|
log.Fatalf("Missing required --host parameter")
|
||||||
|
}
|
||||||
|
|
||||||
|
var priv interface{}
|
||||||
|
var err error
|
||||||
|
switch *ecdsaCurve {
|
||||||
|
case "":
|
||||||
|
priv, err = rsa.GenerateKey(rand.Reader, *rsaBits)
|
||||||
|
case "P224":
|
||||||
|
priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
|
||||||
|
case "P256":
|
||||||
|
priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
case "P384":
|
||||||
|
priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||||
|
case "P521":
|
||||||
|
priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(os.Stderr, "Unrecognized elliptic curve: %q", *ecdsaCurve)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to generate private key: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var notBefore time.Time
|
||||||
|
if len(*validFrom) == 0 {
|
||||||
|
notBefore = time.Now()
|
||||||
|
} else {
|
||||||
|
notBefore, err = time.Parse("Jan 2 15:04:05 2006", *validFrom)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to parse creation date: %s\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
notAfter := notBefore.Add(*validFor)
|
||||||
|
|
||||||
|
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||||
|
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to generate serial number: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
template := x509.Certificate{
|
||||||
|
SerialNumber: serialNumber,
|
||||||
|
Subject: pkix.Name{
|
||||||
|
Organization: []string{"Acme Co"},
|
||||||
|
},
|
||||||
|
NotBefore: notBefore,
|
||||||
|
NotAfter: notAfter,
|
||||||
|
|
||||||
|
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||||
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||||
|
BasicConstraintsValid: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
hosts := strings.Split(*host, ",")
|
||||||
|
for _, h := range hosts {
|
||||||
|
if ip := net.ParseIP(h); ip != nil {
|
||||||
|
template.IPAddresses = append(template.IPAddresses, ip)
|
||||||
|
} else {
|
||||||
|
template.DNSNames = append(template.DNSNames, h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if *isCA {
|
||||||
|
template.IsCA = true
|
||||||
|
template.KeyUsage |= x509.KeyUsageCertSign
|
||||||
|
}
|
||||||
|
|
||||||
|
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to create certificate: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
certOut, err := os.Create("cert.pem")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to open cert.pem for writing: %s", err)
|
||||||
|
}
|
||||||
|
if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
|
||||||
|
log.Fatalf("failed to write data to cert.pem: %s", err)
|
||||||
|
}
|
||||||
|
if err := certOut.Close(); err != nil {
|
||||||
|
log.Fatalf("error closing cert.pem: %s", err)
|
||||||
|
}
|
||||||
|
log.Print("wrote cert.pem\n")
|
||||||
|
|
||||||
|
keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("failed to open key.pem for writing:", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := pem.Encode(keyOut, pemBlockForKey(priv)); err != nil {
|
||||||
|
log.Fatalf("failed to write data to key.pem: %s", err)
|
||||||
|
}
|
||||||
|
if err := keyOut.Close(); err != nil {
|
||||||
|
log.Fatalf("error closing key.pem: %s", err)
|
||||||
|
}
|
||||||
|
log.Print("wrote key.pem\n")
|
||||||
|
}
|
||||||
144
vendor/github.com/miekg/dns/duplicate_generate.go
generated
vendored
Normal file
144
vendor/github.com/miekg/dns/duplicate_generate.go
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
//+build ignore
|
||||||
|
|
||||||
|
// types_generate.go is meant to run with go generate. It will use
|
||||||
|
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||||
|
// it will generate conversion tables (TypeToRR and TypeToString) and banal
|
||||||
|
// methods (len, Header, copy) based on the struct tags. The generated source is
|
||||||
|
// written to ztypes.go, and is meant to be checked into git.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"go/importer"
|
||||||
|
"go/types"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var packageHdr = `
|
||||||
|
// Code generated by "go run duplicate_generate.go"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package dns
|
||||||
|
|
||||||
|
`
|
||||||
|
|
||||||
|
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||||
|
st, ok := t.Underlying().(*types.Struct)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||||
|
return st, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Anonymous() {
|
||||||
|
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||||
|
return st, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Import and type-check the package
|
||||||
|
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||||
|
fatalIfErr(err)
|
||||||
|
scope := pkg.Scope()
|
||||||
|
|
||||||
|
// Collect actual types (*X)
|
||||||
|
var namedTypes []string
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
if o == nil || !o.Exported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if name == "PrivateRR" || name == "OPT" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
namedTypes = append(namedTypes, o.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
b.WriteString(packageHdr)
|
||||||
|
|
||||||
|
// Generate the duplicate check for each type.
|
||||||
|
fmt.Fprint(b, "// isDuplicate() functions\n\n")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||||
|
if isEmbedded {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "func (r1 *%s) isDuplicate(_r2 RR) bool {\n", name)
|
||||||
|
fmt.Fprintf(b, "r2, ok := _r2.(*%s)\n", name)
|
||||||
|
fmt.Fprint(b, "if !ok { return false }\n")
|
||||||
|
fmt.Fprint(b, "_ = r2\n")
|
||||||
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
|
field := st.Field(i).Name()
|
||||||
|
o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) }
|
||||||
|
o3 := func(s string) { fmt.Fprintf(b, s+"\n", field, field, field) }
|
||||||
|
|
||||||
|
// For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are
|
||||||
|
// *indirectly* defined as a slice in the net package).
|
||||||
|
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}")
|
||||||
|
|
||||||
|
if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` {
|
||||||
|
o3(`for i := 0; i < len(r1.%s); i++ {
|
||||||
|
if !isDuplicateName(r1.%s[i], r2.%s[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
o3(`for i := 0; i < len(r1.%s); i++ {
|
||||||
|
if r1.%s[i] != r2.%s[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch st.Tag(i) {
|
||||||
|
case `dns:"-"`:
|
||||||
|
// ignored
|
||||||
|
case `dns:"a"`, `dns:"aaaa"`:
|
||||||
|
o2("if !r1.%s.Equal(r2.%s) {\nreturn false\n}")
|
||||||
|
case `dns:"cdomain-name"`, `dns:"domain-name"`:
|
||||||
|
o2("if !isDuplicateName(r1.%s, r2.%s) {\nreturn false\n}")
|
||||||
|
default:
|
||||||
|
o2("if r1.%s != r2.%s {\nreturn false\n}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "return true\n}\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// gofmt
|
||||||
|
res, err := format.Source(b.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
b.WriteTo(os.Stderr)
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// write result
|
||||||
|
f, err := os.Create("zduplicate.go")
|
||||||
|
fatalIfErr(err)
|
||||||
|
defer f.Close()
|
||||||
|
f.Write(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalIfErr(err error) {
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
328
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
Normal file
328
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
Normal file
@@ -0,0 +1,328 @@
|
|||||||
|
//+build ignore
|
||||||
|
|
||||||
|
// msg_generate.go is meant to run with go generate. It will use
|
||||||
|
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||||
|
// it will generate pack/unpack methods based on the struct tags. The generated source is
|
||||||
|
// written to zmsg.go, and is meant to be checked into git.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"go/importer"
|
||||||
|
"go/types"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var packageHdr = `
|
||||||
|
// Code generated by "go run msg_generate.go"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package dns
|
||||||
|
|
||||||
|
`
|
||||||
|
|
||||||
|
// getTypeStruct will take a type and the package scope, and return the
|
||||||
|
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||||
|
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||||
|
// the RR interface). The bool return value indicates if embedded structs were
|
||||||
|
// resolved.
|
||||||
|
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||||
|
st, ok := t.Underlying().(*types.Struct)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||||
|
return st, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Anonymous() {
|
||||||
|
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||||
|
return st, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Import and type-check the package
|
||||||
|
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||||
|
fatalIfErr(err)
|
||||||
|
scope := pkg.Scope()
|
||||||
|
|
||||||
|
// Collect actual types (*X)
|
||||||
|
var namedTypes []string
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
if o == nil || !o.Exported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if name == "PrivateRR" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if corresponding TypeX exists
|
||||||
|
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||||
|
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
namedTypes = append(namedTypes, o.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
b.WriteString(packageHdr)
|
||||||
|
|
||||||
|
fmt.Fprint(b, "// pack*() functions\n\n")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
st, _ := getTypeStruct(o.Type(), scope)
|
||||||
|
|
||||||
|
fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {\n", name)
|
||||||
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
|
o := func(s string) {
|
||||||
|
fmt.Fprintf(b, s, st.Field(i).Name())
|
||||||
|
fmt.Fprint(b, `if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
switch st.Tag(i) {
|
||||||
|
case `dns:"-"`: // ignored
|
||||||
|
case `dns:"txt"`:
|
||||||
|
o("off, err = packStringTxt(rr.%s, msg, off)\n")
|
||||||
|
case `dns:"opt"`:
|
||||||
|
o("off, err = packDataOpt(rr.%s, msg, off)\n")
|
||||||
|
case `dns:"nsec"`:
|
||||||
|
o("off, err = packDataNsec(rr.%s, msg, off)\n")
|
||||||
|
case `dns:"domain-name"`:
|
||||||
|
o("off, err = packDataDomainNames(rr.%s, msg, off, compression, false)\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case st.Tag(i) == `dns:"-"`: // ignored
|
||||||
|
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||||
|
o("off, err = packDomainName(rr.%s, msg, off, compression, compress)\n")
|
||||||
|
case st.Tag(i) == `dns:"domain-name"`:
|
||||||
|
o("off, err = packDomainName(rr.%s, msg, off, compression, false)\n")
|
||||||
|
case st.Tag(i) == `dns:"a"`:
|
||||||
|
o("off, err = packDataA(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == `dns:"aaaa"`:
|
||||||
|
o("off, err = packDataAAAA(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == `dns:"uint48"`:
|
||||||
|
o("off, err = packUint48(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == `dns:"txt"`:
|
||||||
|
o("off, err = packString(rr.%s, msg, off)\n")
|
||||||
|
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"base32"`:
|
||||||
|
o("off, err = packStringBase32(rr.%s, msg, off)\n")
|
||||||
|
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"base64"`:
|
||||||
|
o("off, err = packStringBase64(rr.%s, msg, off)\n")
|
||||||
|
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`):
|
||||||
|
// directly write instead of using o() so we get the error check in the correct place
|
||||||
|
field := st.Field(i).Name()
|
||||||
|
fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty
|
||||||
|
if rr.%s != "-" {
|
||||||
|
off, err = packStringHex(rr.%s, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, field, field)
|
||||||
|
continue
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"hex"`:
|
||||||
|
o("off, err = packStringHex(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == `dns:"any"`:
|
||||||
|
o("off, err = packStringAny(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == `dns:"octet"`:
|
||||||
|
o("off, err = packStringOctet(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == "":
|
||||||
|
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||||
|
case types.Uint8:
|
||||||
|
o("off, err = packUint8(rr.%s, msg, off)\n")
|
||||||
|
case types.Uint16:
|
||||||
|
o("off, err = packUint16(rr.%s, msg, off)\n")
|
||||||
|
case types.Uint32:
|
||||||
|
o("off, err = packUint32(rr.%s, msg, off)\n")
|
||||||
|
case types.Uint64:
|
||||||
|
o("off, err = packUint64(rr.%s, msg, off)\n")
|
||||||
|
case types.String:
|
||||||
|
o("off, err = packString(rr.%s, msg, off)\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintln(b, "return off, nil }\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprint(b, "// unpack*() functions\n\n")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
st, _ := getTypeStruct(o.Type(), scope)
|
||||||
|
|
||||||
|
fmt.Fprintf(b, "func (rr *%s) unpack(msg []byte, off int) (off1 int, err error) {\n", name)
|
||||||
|
fmt.Fprint(b, `rdStart := off
|
||||||
|
_ = rdStart
|
||||||
|
|
||||||
|
`)
|
||||||
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
|
o := func(s string) {
|
||||||
|
fmt.Fprintf(b, s, st.Field(i).Name())
|
||||||
|
fmt.Fprint(b, `if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// size-* are special, because they reference a struct member we should use for the length.
|
||||||
|
if strings.HasPrefix(st.Tag(i), `dns:"size-`) {
|
||||||
|
structMember := structMember(st.Tag(i))
|
||||||
|
structTag := structTag(st.Tag(i))
|
||||||
|
switch structTag {
|
||||||
|
case "hex":
|
||||||
|
fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||||
|
case "base32":
|
||||||
|
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||||
|
case "base64":
|
||||||
|
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
fmt.Fprint(b, `if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
switch st.Tag(i) {
|
||||||
|
case `dns:"-"`: // ignored
|
||||||
|
case `dns:"txt"`:
|
||||||
|
o("rr.%s, off, err = unpackStringTxt(msg, off)\n")
|
||||||
|
case `dns:"opt"`:
|
||||||
|
o("rr.%s, off, err = unpackDataOpt(msg, off)\n")
|
||||||
|
case `dns:"nsec"`:
|
||||||
|
o("rr.%s, off, err = unpackDataNsec(msg, off)\n")
|
||||||
|
case `dns:"domain-name"`:
|
||||||
|
o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch st.Tag(i) {
|
||||||
|
case `dns:"-"`: // ignored
|
||||||
|
case `dns:"cdomain-name"`:
|
||||||
|
fallthrough
|
||||||
|
case `dns:"domain-name"`:
|
||||||
|
o("rr.%s, off, err = UnpackDomainName(msg, off)\n")
|
||||||
|
case `dns:"a"`:
|
||||||
|
o("rr.%s, off, err = unpackDataA(msg, off)\n")
|
||||||
|
case `dns:"aaaa"`:
|
||||||
|
o("rr.%s, off, err = unpackDataAAAA(msg, off)\n")
|
||||||
|
case `dns:"uint48"`:
|
||||||
|
o("rr.%s, off, err = unpackUint48(msg, off)\n")
|
||||||
|
case `dns:"txt"`:
|
||||||
|
o("rr.%s, off, err = unpackString(msg, off)\n")
|
||||||
|
case `dns:"base32"`:
|
||||||
|
o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||||
|
case `dns:"base64"`:
|
||||||
|
o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||||
|
case `dns:"hex"`:
|
||||||
|
o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||||
|
case `dns:"any"`:
|
||||||
|
o("rr.%s, off, err = unpackStringAny(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||||
|
case `dns:"octet"`:
|
||||||
|
o("rr.%s, off, err = unpackStringOctet(msg, off)\n")
|
||||||
|
case "":
|
||||||
|
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||||
|
case types.Uint8:
|
||||||
|
o("rr.%s, off, err = unpackUint8(msg, off)\n")
|
||||||
|
case types.Uint16:
|
||||||
|
o("rr.%s, off, err = unpackUint16(msg, off)\n")
|
||||||
|
case types.Uint32:
|
||||||
|
o("rr.%s, off, err = unpackUint32(msg, off)\n")
|
||||||
|
case types.Uint64:
|
||||||
|
o("rr.%s, off, err = unpackUint64(msg, off)\n")
|
||||||
|
case types.String:
|
||||||
|
o("rr.%s, off, err = unpackString(msg, off)\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
// If we've hit len(msg) we return without error.
|
||||||
|
if i < st.NumFields()-1 {
|
||||||
|
fmt.Fprintf(b, `if off == len(msg) {
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "return off, nil }\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// gofmt
|
||||||
|
res, err := format.Source(b.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
b.WriteTo(os.Stderr)
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// write result
|
||||||
|
f, err := os.Create("zmsg.go")
|
||||||
|
fatalIfErr(err)
|
||||||
|
defer f.Close()
|
||||||
|
f.Write(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string.
|
||||||
|
func structMember(s string) string {
|
||||||
|
fields := strings.Split(s, ":")
|
||||||
|
if len(fields) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
f := fields[len(fields)-1]
|
||||||
|
// f should have a closing "
|
||||||
|
if len(f) > 1 {
|
||||||
|
return f[:len(f)-1]
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// structTag will take a tag like dns:"size-base32:SaltLength" and return base32.
|
||||||
|
func structTag(s string) string {
|
||||||
|
fields := strings.Split(s, ":")
|
||||||
|
if len(fields) < 2 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fields[1][len("\"size-"):]
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalIfErr(err error) {
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
287
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
Normal file
287
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
//+build ignore
|
||||||
|
|
||||||
|
// types_generate.go is meant to run with go generate. It will use
|
||||||
|
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||||
|
// it will generate conversion tables (TypeToRR and TypeToString) and banal
|
||||||
|
// methods (len, Header, copy) based on the struct tags. The generated source is
|
||||||
|
// written to ztypes.go, and is meant to be checked into git.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"go/importer"
|
||||||
|
"go/types"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
var skipLen = map[string]struct{}{
|
||||||
|
"NSEC": {},
|
||||||
|
"NSEC3": {},
|
||||||
|
"OPT": {},
|
||||||
|
"CSYNC": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
var packageHdr = `
|
||||||
|
// Code generated by "go run types_generate.go"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
`
|
||||||
|
|
||||||
|
var TypeToRR = template.Must(template.New("TypeToRR").Parse(`
|
||||||
|
// TypeToRR is a map of constructors for each RR type.
|
||||||
|
var TypeToRR = map[uint16]func() RR{
|
||||||
|
{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) },
|
||||||
|
{{end}}{{end}} }
|
||||||
|
|
||||||
|
`))
|
||||||
|
|
||||||
|
var typeToString = template.Must(template.New("typeToString").Parse(`
|
||||||
|
// TypeToString is a map of strings for each RR type.
|
||||||
|
var TypeToString = map[uint16]string{
|
||||||
|
{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}",
|
||||||
|
{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR",
|
||||||
|
}
|
||||||
|
|
||||||
|
`))
|
||||||
|
|
||||||
|
var headerFunc = template.Must(template.New("headerFunc").Parse(`
|
||||||
|
{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
`))
|
||||||
|
|
||||||
|
// getTypeStruct will take a type and the package scope, and return the
|
||||||
|
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||||
|
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||||
|
// the RR interface). The bool return value indicates if embedded structs were
|
||||||
|
// resolved.
|
||||||
|
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||||
|
st, ok := t.Underlying().(*types.Struct)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||||
|
return st, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Anonymous() {
|
||||||
|
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||||
|
return st, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Import and type-check the package
|
||||||
|
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||||
|
fatalIfErr(err)
|
||||||
|
scope := pkg.Scope()
|
||||||
|
|
||||||
|
// Collect constants like TypeX
|
||||||
|
var numberedTypes []string
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
if o == nil || !o.Exported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b, ok := o.Type().(*types.Basic)
|
||||||
|
if !ok || b.Kind() != types.Uint16 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(o.Name(), "Type") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := strings.TrimPrefix(o.Name(), "Type")
|
||||||
|
if name == "PrivateRR" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
numberedTypes = append(numberedTypes, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect actual types (*X)
|
||||||
|
var namedTypes []string
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
if o == nil || !o.Exported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if name == "PrivateRR" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if corresponding TypeX exists
|
||||||
|
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||||
|
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
namedTypes = append(namedTypes, o.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
b.WriteString(packageHdr)
|
||||||
|
|
||||||
|
// Generate TypeToRR
|
||||||
|
fatalIfErr(TypeToRR.Execute(b, namedTypes))
|
||||||
|
|
||||||
|
// Generate typeToString
|
||||||
|
fatalIfErr(typeToString.Execute(b, numberedTypes))
|
||||||
|
|
||||||
|
// Generate headerFunc
|
||||||
|
fatalIfErr(headerFunc.Execute(b, namedTypes))
|
||||||
|
|
||||||
|
// Generate len()
|
||||||
|
fmt.Fprint(b, "// len() functions\n")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
if _, ok := skipLen[name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||||
|
if isEmbedded {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "func (rr *%s) len(off int, compression map[string]struct{}) int {\n", name)
|
||||||
|
fmt.Fprintf(b, "l := rr.Hdr.len(off, compression)\n")
|
||||||
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
|
o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
|
||||||
|
|
||||||
|
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
switch st.Tag(i) {
|
||||||
|
case `dns:"-"`:
|
||||||
|
// ignored
|
||||||
|
case `dns:"cdomain-name"`:
|
||||||
|
o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, true) }\n")
|
||||||
|
case `dns:"domain-name"`:
|
||||||
|
o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, false) }\n")
|
||||||
|
case `dns:"txt"`:
|
||||||
|
o("for _, x := range rr.%s { l += len(x) + 1 }\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case st.Tag(i) == `dns:"-"`:
|
||||||
|
// ignored
|
||||||
|
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||||
|
o("l += domainNameLen(rr.%s, off+l, compression, true)\n")
|
||||||
|
case st.Tag(i) == `dns:"domain-name"`:
|
||||||
|
o("l += domainNameLen(rr.%s, off+l, compression, false)\n")
|
||||||
|
case st.Tag(i) == `dns:"octet"`:
|
||||||
|
o("l += len(rr.%s)\n")
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"base64"`:
|
||||||
|
o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored
|
||||||
|
o("l += len(rr.%s)/2\n")
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"hex"`:
|
||||||
|
o("l += len(rr.%s)/2 + 1\n")
|
||||||
|
case st.Tag(i) == `dns:"any"`:
|
||||||
|
o("l += len(rr.%s)\n")
|
||||||
|
case st.Tag(i) == `dns:"a"`:
|
||||||
|
o("if len(rr.%s) != 0 { l += net.IPv4len }\n")
|
||||||
|
case st.Tag(i) == `dns:"aaaa"`:
|
||||||
|
o("if len(rr.%s) != 0 { l += net.IPv6len }\n")
|
||||||
|
case st.Tag(i) == `dns:"txt"`:
|
||||||
|
o("for _, t := range rr.%s { l += len(t) + 1 }\n")
|
||||||
|
case st.Tag(i) == `dns:"uint48"`:
|
||||||
|
o("l += 6 // %s\n")
|
||||||
|
case st.Tag(i) == "":
|
||||||
|
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||||
|
case types.Uint8:
|
||||||
|
o("l++ // %s\n")
|
||||||
|
case types.Uint16:
|
||||||
|
o("l += 2 // %s\n")
|
||||||
|
case types.Uint32:
|
||||||
|
o("l += 4 // %s\n")
|
||||||
|
case types.Uint64:
|
||||||
|
o("l += 8 // %s\n")
|
||||||
|
case types.String:
|
||||||
|
o("l += len(rr.%s) + 1\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "return l }\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate copy()
|
||||||
|
fmt.Fprint(b, "// copy() functions\n")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||||
|
if isEmbedded {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
|
||||||
|
fields := []string{"rr.Hdr"}
|
||||||
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
|
f := st.Field(i).Name()
|
||||||
|
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
t := sl.Underlying().String()
|
||||||
|
t = strings.TrimPrefix(t, "[]")
|
||||||
|
if strings.Contains(t, ".") {
|
||||||
|
splits := strings.Split(t, ".")
|
||||||
|
t = splits[len(splits)-1]
|
||||||
|
}
|
||||||
|
// For the EDNS0 interface (used in the OPT RR), we need to call the copy method on each element.
|
||||||
|
if t == "EDNS0" {
|
||||||
|
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s));\nfor i,e := range rr.%s {\n %s[i] = e.copy()\n}\n",
|
||||||
|
f, t, f, f, f)
|
||||||
|
fields = append(fields, f)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
|
||||||
|
f, t, f, f, f)
|
||||||
|
fields = append(fields, f)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if st.Field(i).Type().String() == "net.IP" {
|
||||||
|
fields = append(fields, "copyIP(rr."+f+")")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields = append(fields, "rr."+f)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ","))
|
||||||
|
fmt.Fprintf(b, "}\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// gofmt
|
||||||
|
res, err := format.Source(b.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
b.WriteTo(os.Stderr)
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// write result
|
||||||
|
f, err := os.Create("ztypes.go")
|
||||||
|
fatalIfErr(err)
|
||||||
|
defer f.Close()
|
||||||
|
f.Write(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalIfErr(err error) {
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
21
vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
generated
vendored
21
vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2014 Mitchell Hashimoto
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
39
vendor/github.com/mitchellh/go-wordwrap/README.md
generated
vendored
39
vendor/github.com/mitchellh/go-wordwrap/README.md
generated
vendored
@@ -1,39 +0,0 @@
|
|||||||
# go-wordwrap
|
|
||||||
|
|
||||||
`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that
|
|
||||||
automatically wraps words into multiple lines. The primary use case for this
|
|
||||||
is in formatting CLI output, but of course word wrapping is a generally useful
|
|
||||||
thing to do.
|
|
||||||
|
|
||||||
## Installation and Usage
|
|
||||||
|
|
||||||
Install using `go get github.com/mitchellh/go-wordwrap`.
|
|
||||||
|
|
||||||
Full documentation is available at
|
|
||||||
http://godoc.org/github.com/mitchellh/go-wordwrap
|
|
||||||
|
|
||||||
Below is an example of its usage ignoring errors:
|
|
||||||
|
|
||||||
```go
|
|
||||||
wrapped := wordwrap.WrapString("foo bar baz", 3)
|
|
||||||
fmt.Println(wrapped)
|
|
||||||
```
|
|
||||||
|
|
||||||
Would output:
|
|
||||||
|
|
||||||
```
|
|
||||||
foo
|
|
||||||
bar
|
|
||||||
baz
|
|
||||||
```
|
|
||||||
|
|
||||||
## Word Wrap Algorithm
|
|
||||||
|
|
||||||
This library doesn't use any clever algorithm for word wrapping. The wrapping
|
|
||||||
is actually very naive: whenever there is whitespace or an explicit linebreak.
|
|
||||||
The goal of this library is for word wrapping CLI output, so the input is
|
|
||||||
typically pretty well controlled human language. Because of this, the naive
|
|
||||||
approach typically works just fine.
|
|
||||||
|
|
||||||
In the future, we'd like to make the algorithm more advanced. We would do
|
|
||||||
so without breaking the API.
|
|
||||||
1
vendor/github.com/mitchellh/go-wordwrap/go.mod
generated
vendored
1
vendor/github.com/mitchellh/go-wordwrap/go.mod
generated
vendored
@@ -1 +0,0 @@
|
|||||||
module github.com/mitchellh/go-wordwrap
|
|
||||||
73
vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
generated
vendored
73
vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
generated
vendored
@@ -1,73 +0,0 @@
|
|||||||
package wordwrap
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WrapString wraps the given string within lim width in characters.
|
|
||||||
//
|
|
||||||
// Wrapping is currently naive and only happens at white-space. A future
|
|
||||||
// version of the library will implement smarter wrapping. This means that
|
|
||||||
// pathological cases can dramatically reach past the limit, such as a very
|
|
||||||
// long word.
|
|
||||||
func WrapString(s string, lim uint) string {
|
|
||||||
// Initialize a buffer with a slightly larger size to account for breaks
|
|
||||||
init := make([]byte, 0, len(s))
|
|
||||||
buf := bytes.NewBuffer(init)
|
|
||||||
|
|
||||||
var current uint
|
|
||||||
var wordBuf, spaceBuf bytes.Buffer
|
|
||||||
|
|
||||||
for _, char := range s {
|
|
||||||
if char == '\n' {
|
|
||||||
if wordBuf.Len() == 0 {
|
|
||||||
if current+uint(spaceBuf.Len()) > lim {
|
|
||||||
current = 0
|
|
||||||
} else {
|
|
||||||
current += uint(spaceBuf.Len())
|
|
||||||
spaceBuf.WriteTo(buf)
|
|
||||||
}
|
|
||||||
spaceBuf.Reset()
|
|
||||||
} else {
|
|
||||||
current += uint(spaceBuf.Len() + wordBuf.Len())
|
|
||||||
spaceBuf.WriteTo(buf)
|
|
||||||
spaceBuf.Reset()
|
|
||||||
wordBuf.WriteTo(buf)
|
|
||||||
wordBuf.Reset()
|
|
||||||
}
|
|
||||||
buf.WriteRune(char)
|
|
||||||
current = 0
|
|
||||||
} else if unicode.IsSpace(char) {
|
|
||||||
if spaceBuf.Len() == 0 || wordBuf.Len() > 0 {
|
|
||||||
current += uint(spaceBuf.Len() + wordBuf.Len())
|
|
||||||
spaceBuf.WriteTo(buf)
|
|
||||||
spaceBuf.Reset()
|
|
||||||
wordBuf.WriteTo(buf)
|
|
||||||
wordBuf.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
spaceBuf.WriteRune(char)
|
|
||||||
} else {
|
|
||||||
|
|
||||||
wordBuf.WriteRune(char)
|
|
||||||
|
|
||||||
if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim {
|
|
||||||
buf.WriteRune('\n')
|
|
||||||
current = 0
|
|
||||||
spaceBuf.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if wordBuf.Len() == 0 {
|
|
||||||
if current+uint(spaceBuf.Len()) <= lim {
|
|
||||||
spaceBuf.WriteTo(buf)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
spaceBuf.WriteTo(buf)
|
|
||||||
wordBuf.WriteTo(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
19
vendor/github.com/peterbourgon/diskv/LICENSE
generated
vendored
19
vendor/github.com/peterbourgon/diskv/LICENSE
generated
vendored
@@ -1,19 +0,0 @@
|
|||||||
Copyright (c) 2011-2012 Peter Bourgon
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
141
vendor/github.com/peterbourgon/diskv/README.md
generated
vendored
141
vendor/github.com/peterbourgon/diskv/README.md
generated
vendored
@@ -1,141 +0,0 @@
|
|||||||
# What is diskv?
|
|
||||||
|
|
||||||
Diskv (disk-vee) is a simple, persistent key-value store written in the Go
|
|
||||||
language. It starts with an incredibly simple API for storing arbitrary data on
|
|
||||||
a filesystem by key, and builds several layers of performance-enhancing
|
|
||||||
abstraction on top. The end result is a conceptually simple, but highly
|
|
||||||
performant, disk-backed storage system.
|
|
||||||
|
|
||||||
[![Build Status][1]][2]
|
|
||||||
|
|
||||||
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
|
|
||||||
[2]: https://drone.io/github.com/peterbourgon/diskv/latest
|
|
||||||
|
|
||||||
|
|
||||||
# Installing
|
|
||||||
|
|
||||||
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
|
|
||||||
Then,
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ go get github.com/peterbourgon/diskv
|
|
||||||
```
|
|
||||||
|
|
||||||
[3]: http://golang.org
|
|
||||||
[4]: http://golang.org/doc/install/source
|
|
||||||
[5]: http://golang.org/doc/install
|
|
||||||
|
|
||||||
|
|
||||||
# Usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/peterbourgon/diskv"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Simplest transform function: put all the data files into the base dir.
|
|
||||||
flatTransform := func(s string) []string { return []string{} }
|
|
||||||
|
|
||||||
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
|
|
||||||
d := diskv.New(diskv.Options{
|
|
||||||
BasePath: "my-data-dir",
|
|
||||||
Transform: flatTransform,
|
|
||||||
CacheSizeMax: 1024 * 1024,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Write three bytes to the key "alpha".
|
|
||||||
key := "alpha"
|
|
||||||
d.Write(key, []byte{'1', '2', '3'})
|
|
||||||
|
|
||||||
// Read the value back out of the store.
|
|
||||||
value, _ := d.Read(key)
|
|
||||||
fmt.Printf("%v\n", value)
|
|
||||||
|
|
||||||
// Erase the key+value from the store (and the disk).
|
|
||||||
d.Erase(key)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
More complex examples can be found in the "examples" subdirectory.
|
|
||||||
|
|
||||||
|
|
||||||
# Theory
|
|
||||||
|
|
||||||
## Basic idea
|
|
||||||
|
|
||||||
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
|
|
||||||
The data is written to a single file on disk, with the same name as the key.
|
|
||||||
The key determines where that file will be stored, via a user-provided
|
|
||||||
`TransformFunc`, which takes a key and returns a slice (`[]string`)
|
|
||||||
corresponding to a path list where the key file will be stored. The simplest
|
|
||||||
TransformFunc,
|
|
||||||
|
|
||||||
```go
|
|
||||||
func SimpleTransform (key string) []string {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
will place all keys in the same, base directory. The design is inspired by
|
|
||||||
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
|
|
||||||
behavior is available in the content-addressable-storage example.
|
|
||||||
|
|
||||||
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
|
|
||||||
|
|
||||||
**Note** that your TransformFunc should ensure that one valid key doesn't
|
|
||||||
transform to a subset of another valid key. That is, it shouldn't be possible
|
|
||||||
to construct valid keys that resolve to directory names. As a concrete example,
|
|
||||||
if your TransformFunc splits on every 3 characters, then
|
|
||||||
|
|
||||||
```go
|
|
||||||
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
|
|
||||||
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory
|
|
||||||
```
|
|
||||||
|
|
||||||
This will be addressed in an upcoming version of diskv.
|
|
||||||
|
|
||||||
Probably the most important design principle behind diskv is that your data is
|
|
||||||
always flatly available on the disk. diskv will never do anything that would
|
|
||||||
prevent you from accessing, copying, backing up, or otherwise interacting with
|
|
||||||
your data via common UNIX commandline tools.
|
|
||||||
|
|
||||||
## Adding a cache
|
|
||||||
|
|
||||||
An in-memory caching layer is provided by combining the BasicStore
|
|
||||||
functionality with a simple map structure, and keeping it up-to-date as
|
|
||||||
appropriate. Since the map structure in Go is not threadsafe, it's combined
|
|
||||||
with a RWMutex to provide safe concurrent access.
|
|
||||||
|
|
||||||
## Adding order
|
|
||||||
|
|
||||||
diskv is a key-value store and therefore inherently unordered. An ordering
|
|
||||||
system can be injected into the store by passing something which satisfies the
|
|
||||||
diskv.Index interface. (A default implementation, using Google's
|
|
||||||
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
|
|
||||||
user-provided Less function) index of the keys, which can be queried.
|
|
||||||
|
|
||||||
[7]: https://github.com/google/btree
|
|
||||||
|
|
||||||
## Adding compression
|
|
||||||
|
|
||||||
Something which implements the diskv.Compression interface may be passed
|
|
||||||
during store creation, so that all Writes and Reads are filtered through
|
|
||||||
a compression/decompression pipeline. Several default implementations,
|
|
||||||
using stdlib compression algorithms, are provided. Note that data is cached
|
|
||||||
compressed; the cost of decompression is borne with each Read.
|
|
||||||
|
|
||||||
## Streaming
|
|
||||||
|
|
||||||
diskv also now provides ReadStream and WriteStream methods, to allow very large
|
|
||||||
data to be handled efficiently.
|
|
||||||
|
|
||||||
|
|
||||||
# Future plans
|
|
||||||
|
|
||||||
* Needs plenty of robust testing: huge datasets, etc...
|
|
||||||
* More thorough benchmarking
|
|
||||||
* Your suggestions for use-cases I haven't thought of
|
|
||||||
64
vendor/github.com/peterbourgon/diskv/compression.go
generated
vendored
64
vendor/github.com/peterbourgon/diskv/compression.go
generated
vendored
@@ -1,64 +0,0 @@
|
|||||||
package diskv
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/flate"
|
|
||||||
"compress/gzip"
|
|
||||||
"compress/zlib"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Compression is an interface that Diskv uses to implement compression of
|
|
||||||
// data. Writer takes a destination io.Writer and returns a WriteCloser that
|
|
||||||
// compresses all data written through it. Reader takes a source io.Reader and
|
|
||||||
// returns a ReadCloser that decompresses all data read through it. You may
|
|
||||||
// define these methods on your own type, or use one of the NewCompression
|
|
||||||
// helpers.
|
|
||||||
type Compression interface {
|
|
||||||
Writer(dst io.Writer) (io.WriteCloser, error)
|
|
||||||
Reader(src io.Reader) (io.ReadCloser, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGzipCompression returns a Gzip-based Compression.
|
|
||||||
func NewGzipCompression() Compression {
|
|
||||||
return NewGzipCompressionLevel(flate.DefaultCompression)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
|
|
||||||
func NewGzipCompressionLevel(level int) Compression {
|
|
||||||
return &genericCompression{
|
|
||||||
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
|
|
||||||
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewZlibCompression returns a Zlib-based Compression.
|
|
||||||
func NewZlibCompression() Compression {
|
|
||||||
return NewZlibCompressionLevel(flate.DefaultCompression)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
|
|
||||||
func NewZlibCompressionLevel(level int) Compression {
|
|
||||||
return NewZlibCompressionLevelDict(level, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
|
|
||||||
// level, based on the given dictionary.
|
|
||||||
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
|
|
||||||
return &genericCompression{
|
|
||||||
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
|
|
||||||
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type genericCompression struct {
|
|
||||||
wf func(w io.Writer) (io.WriteCloser, error)
|
|
||||||
rf func(r io.Reader) (io.ReadCloser, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
|
|
||||||
return g.wf(dst)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
|
|
||||||
return g.rf(src)
|
|
||||||
}
|
|
||||||
624
vendor/github.com/peterbourgon/diskv/diskv.go
generated
vendored
624
vendor/github.com/peterbourgon/diskv/diskv.go
generated
vendored
@@ -1,624 +0,0 @@
|
|||||||
// Diskv (disk-vee) is a simple, persistent, key-value store.
|
|
||||||
// It stores all data flatly on the filesystem.
|
|
||||||
|
|
||||||
package diskv
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultBasePath = "diskv"
|
|
||||||
defaultFilePerm os.FileMode = 0666
|
|
||||||
defaultPathPerm os.FileMode = 0777
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultTransform = func(s string) []string { return []string{} }
|
|
||||||
errCanceled = errors.New("canceled")
|
|
||||||
errEmptyKey = errors.New("empty key")
|
|
||||||
errBadKey = errors.New("bad key")
|
|
||||||
errImportDirectory = errors.New("can't import a directory")
|
|
||||||
)
|
|
||||||
|
|
||||||
// TransformFunction transforms a key into a slice of strings, with each
|
|
||||||
// element in the slice representing a directory in the file path where the
|
|
||||||
// key's entry will eventually be stored.
|
|
||||||
//
|
|
||||||
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
|
|
||||||
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
|
|
||||||
type TransformFunction func(s string) []string
|
|
||||||
|
|
||||||
// Options define a set of properties that dictate Diskv behavior.
|
|
||||||
// All values are optional.
|
|
||||||
type Options struct {
|
|
||||||
BasePath string
|
|
||||||
Transform TransformFunction
|
|
||||||
CacheSizeMax uint64 // bytes
|
|
||||||
PathPerm os.FileMode
|
|
||||||
FilePerm os.FileMode
|
|
||||||
// If TempDir is set, it will enable filesystem atomic writes by
|
|
||||||
// writing temporary files to that location before being moved
|
|
||||||
// to BasePath.
|
|
||||||
// Note that TempDir MUST be on the same device/partition as
|
|
||||||
// BasePath.
|
|
||||||
TempDir string
|
|
||||||
|
|
||||||
Index Index
|
|
||||||
IndexLess LessFunction
|
|
||||||
|
|
||||||
Compression Compression
|
|
||||||
}
|
|
||||||
|
|
||||||
// Diskv implements the Diskv interface. You shouldn't construct Diskv
|
|
||||||
// structures directly; instead, use the New constructor.
|
|
||||||
type Diskv struct {
|
|
||||||
Options
|
|
||||||
mu sync.RWMutex
|
|
||||||
cache map[string][]byte
|
|
||||||
cacheSize uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns an initialized Diskv structure, ready to use.
|
|
||||||
// If the path identified by baseDir already contains data,
|
|
||||||
// it will be accessible, but not yet cached.
|
|
||||||
func New(o Options) *Diskv {
|
|
||||||
if o.BasePath == "" {
|
|
||||||
o.BasePath = defaultBasePath
|
|
||||||
}
|
|
||||||
if o.Transform == nil {
|
|
||||||
o.Transform = defaultTransform
|
|
||||||
}
|
|
||||||
if o.PathPerm == 0 {
|
|
||||||
o.PathPerm = defaultPathPerm
|
|
||||||
}
|
|
||||||
if o.FilePerm == 0 {
|
|
||||||
o.FilePerm = defaultFilePerm
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &Diskv{
|
|
||||||
Options: o,
|
|
||||||
cache: map[string][]byte{},
|
|
||||||
cacheSize: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.Index != nil && d.IndexLess != nil {
|
|
||||||
d.Index.Initialize(d.IndexLess, d.Keys(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write synchronously writes the key-value pair to disk, making it immediately
|
|
||||||
// available for reads. Write relies on the filesystem to perform an eventual
|
|
||||||
// sync to physical media. If you need stronger guarantees, see WriteStream.
|
|
||||||
func (d *Diskv) Write(key string, val []byte) error {
|
|
||||||
return d.WriteStream(key, bytes.NewBuffer(val), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteStream writes the data represented by the io.Reader to the disk, under
|
|
||||||
// the provided key. If sync is true, WriteStream performs an explicit sync on
|
|
||||||
// the file as soon as it's written.
|
|
||||||
//
|
|
||||||
// bytes.Buffer provides io.Reader semantics for basic data types.
|
|
||||||
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
|
|
||||||
if len(key) <= 0 {
|
|
||||||
return errEmptyKey
|
|
||||||
}
|
|
||||||
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
|
|
||||||
return d.writeStreamWithLock(key, r, sync)
|
|
||||||
}
|
|
||||||
|
|
||||||
// createKeyFileWithLock either creates the key file directly, or
|
|
||||||
// creates a temporary file in TempDir if it is set.
|
|
||||||
func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
|
|
||||||
if d.TempDir != "" {
|
|
||||||
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
|
|
||||||
return nil, fmt.Errorf("temp mkdir: %s", err)
|
|
||||||
}
|
|
||||||
f, err := ioutil.TempFile(d.TempDir, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("temp file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := f.Chmod(d.FilePerm); err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return nil, fmt.Errorf("chmod: %s", err)
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
|
|
||||||
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("open file: %s", err)
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeStream does no input validation checking.
|
|
||||||
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
|
|
||||||
if err := d.ensurePathWithLock(key); err != nil {
|
|
||||||
return fmt.Errorf("ensure path: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := d.createKeyFileWithLock(key)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("create key file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
wc := io.WriteCloser(&nopWriteCloser{f})
|
|
||||||
if d.Compression != nil {
|
|
||||||
wc, err = d.Compression.Writer(f)
|
|
||||||
if err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("compression writer: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(wc, r); err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("i/o copy: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := wc.Close(); err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("compression close: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sync {
|
|
||||||
if err := f.Sync(); err != nil {
|
|
||||||
f.Close() // error deliberately ignored
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("file sync: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := f.Close(); err != nil {
|
|
||||||
return fmt.Errorf("file close: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.Name() != d.completeFilename(key) {
|
|
||||||
if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
|
|
||||||
os.Remove(f.Name()) // error deliberately ignored
|
|
||||||
return fmt.Errorf("rename: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.Index != nil {
|
|
||||||
d.Index.Insert(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.bustCacheWithLock(key) // cache only on read
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Import imports the source file into diskv under the destination key. If the
|
|
||||||
// destination key already exists, it's overwritten. If move is true, the
|
|
||||||
// source file is removed after a successful import.
|
|
||||||
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
|
|
||||||
if dstKey == "" {
|
|
||||||
return errEmptyKey
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi, err := os.Stat(srcFilename); err != nil {
|
|
||||||
return err
|
|
||||||
} else if fi.IsDir() {
|
|
||||||
return errImportDirectory
|
|
||||||
}
|
|
||||||
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
|
|
||||||
if err := d.ensurePathWithLock(dstKey); err != nil {
|
|
||||||
return fmt.Errorf("ensure path: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if move {
|
|
||||||
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
|
|
||||||
d.bustCacheWithLock(dstKey)
|
|
||||||
return nil
|
|
||||||
} else if err != syscall.EXDEV {
|
|
||||||
// If it failed due to being on a different device, fall back to copying
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Open(srcFilename)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
err = d.writeStreamWithLock(dstKey, f, false)
|
|
||||||
if err == nil && move {
|
|
||||||
err = os.Remove(srcFilename)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads the key and returns the value.
|
|
||||||
// If the key is available in the cache, Read won't touch the disk.
|
|
||||||
// If the key is not in the cache, Read will have the side-effect of
|
|
||||||
// lazily caching the value.
|
|
||||||
func (d *Diskv) Read(key string) ([]byte, error) {
|
|
||||||
rc, err := d.ReadStream(key, false)
|
|
||||||
if err != nil {
|
|
||||||
return []byte{}, err
|
|
||||||
}
|
|
||||||
defer rc.Close()
|
|
||||||
return ioutil.ReadAll(rc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
|
|
||||||
// If the value is cached from a previous read, and direct is false,
|
|
||||||
// ReadStream will use the cached value. Otherwise, it will return a handle to
|
|
||||||
// the file on disk, and cache the data on read.
|
|
||||||
//
|
|
||||||
// If direct is true, ReadStream will lazily delete any cached value for the
|
|
||||||
// key, and return a direct handle to the file on disk.
|
|
||||||
//
|
|
||||||
// If compression is enabled, ReadStream taps into the io.Reader stream prior
|
|
||||||
// to decompression, and caches the compressed data.
|
|
||||||
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
|
|
||||||
d.mu.RLock()
|
|
||||||
defer d.mu.RUnlock()
|
|
||||||
|
|
||||||
if val, ok := d.cache[key]; ok {
|
|
||||||
if !direct {
|
|
||||||
buf := bytes.NewBuffer(val)
|
|
||||||
if d.Compression != nil {
|
|
||||||
return d.Compression.Reader(buf)
|
|
||||||
}
|
|
||||||
return ioutil.NopCloser(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
d.uncacheWithLock(key, uint64(len(val)))
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.readWithRLock(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// read ignores the cache, and returns an io.ReadCloser representing the
|
|
||||||
// decompressed data for the given key, streamed from the disk. Clients should
|
|
||||||
// acquire a read lock on the Diskv and check the cache themselves before
|
|
||||||
// calling read.
|
|
||||||
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
|
|
||||||
filename := d.completeFilename(key)
|
|
||||||
|
|
||||||
fi, err := os.Stat(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if fi.IsDir() {
|
|
||||||
return nil, os.ErrNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var r io.Reader
|
|
||||||
if d.CacheSizeMax > 0 {
|
|
||||||
r = newSiphon(f, d, key)
|
|
||||||
} else {
|
|
||||||
r = &closingReader{f}
|
|
||||||
}
|
|
||||||
|
|
||||||
var rc = io.ReadCloser(ioutil.NopCloser(r))
|
|
||||||
if d.Compression != nil {
|
|
||||||
rc, err = d.Compression.Reader(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// closingReader provides a Reader that automatically closes the
|
|
||||||
// embedded ReadCloser when it reaches EOF
|
|
||||||
type closingReader struct {
|
|
||||||
rc io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr closingReader) Read(p []byte) (int, error) {
|
|
||||||
n, err := cr.rc.Read(p)
|
|
||||||
if err == io.EOF {
|
|
||||||
if closeErr := cr.rc.Close(); closeErr != nil {
|
|
||||||
return n, closeErr // close must succeed for Read to succeed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// siphon is like a TeeReader: it copies all data read through it to an
|
|
||||||
// internal buffer, and moves that buffer to the cache at EOF.
|
|
||||||
type siphon struct {
|
|
||||||
f *os.File
|
|
||||||
d *Diskv
|
|
||||||
key string
|
|
||||||
buf *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
// newSiphon constructs a siphoning reader that represents the passed file.
|
|
||||||
// When a successful series of reads ends in an EOF, the siphon will write
|
|
||||||
// the buffered data to Diskv's cache under the given key.
|
|
||||||
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
|
|
||||||
return &siphon{
|
|
||||||
f: f,
|
|
||||||
d: d,
|
|
||||||
key: key,
|
|
||||||
buf: &bytes.Buffer{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read implements the io.Reader interface for siphon.
|
|
||||||
func (s *siphon) Read(p []byte) (int, error) {
|
|
||||||
n, err := s.f.Read(p)
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
|
|
||||||
if closeErr := s.f.Close(); closeErr != nil {
|
|
||||||
return n, closeErr // close must succeed for Read to succeed
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Erase synchronously erases the given key from the disk and the cache.
|
|
||||||
func (d *Diskv) Erase(key string) error {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
|
|
||||||
d.bustCacheWithLock(key)
|
|
||||||
|
|
||||||
// erase from index
|
|
||||||
if d.Index != nil {
|
|
||||||
d.Index.Delete(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// erase from disk
|
|
||||||
filename := d.completeFilename(key)
|
|
||||||
if s, err := os.Stat(filename); err == nil {
|
|
||||||
if s.IsDir() {
|
|
||||||
return errBadKey
|
|
||||||
}
|
|
||||||
if err = os.Remove(filename); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Return err as-is so caller can do os.IsNotExist(err).
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// clean up and return
|
|
||||||
d.pruneDirsWithLock(key)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EraseAll will delete all of the data from the store, both in the cache and on
|
|
||||||
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
|
|
||||||
// diskv-related data. Care should be taken to always specify a diskv base
|
|
||||||
// directory that is exclusively for diskv data.
|
|
||||||
func (d *Diskv) EraseAll() error {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
d.cache = make(map[string][]byte)
|
|
||||||
d.cacheSize = 0
|
|
||||||
if d.TempDir != "" {
|
|
||||||
os.RemoveAll(d.TempDir) // errors ignored
|
|
||||||
}
|
|
||||||
return os.RemoveAll(d.BasePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has returns true if the given key exists.
|
|
||||||
func (d *Diskv) Has(key string) bool {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
|
|
||||||
if _, ok := d.cache[key]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := d.completeFilename(key)
|
|
||||||
s, err := os.Stat(filename)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if s.IsDir() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a channel that will yield every key accessible by the store,
|
|
||||||
// in undefined order. If a cancel channel is provided, closing it will
|
|
||||||
// terminate and close the keys channel.
|
|
||||||
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
|
|
||||||
return d.KeysPrefix("", cancel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeysPrefix returns a channel that will yield every key accessible by the
|
|
||||||
// store with the given prefix, in undefined order. If a cancel channel is
|
|
||||||
// provided, closing it will terminate and close the keys channel. If the
|
|
||||||
// provided prefix is the empty string, all keys will be yielded.
|
|
||||||
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
|
|
||||||
var prepath string
|
|
||||||
if prefix == "" {
|
|
||||||
prepath = d.BasePath
|
|
||||||
} else {
|
|
||||||
prepath = d.pathFor(prefix)
|
|
||||||
}
|
|
||||||
c := make(chan string)
|
|
||||||
go func() {
|
|
||||||
filepath.Walk(prepath, walker(c, prefix, cancel))
|
|
||||||
close(c)
|
|
||||||
}()
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// walker returns a function which satisfies the filepath.WalkFunc interface.
|
|
||||||
// It sends every non-directory file entry down the channel c.
|
|
||||||
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
|
|
||||||
return func(path string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
|
|
||||||
return nil // "pass"
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case c <- info.Name():
|
|
||||||
case <-cancel:
|
|
||||||
return errCanceled
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// pathFor returns the absolute path for location on the filesystem where the
|
|
||||||
// data for the given key will be stored.
|
|
||||||
func (d *Diskv) pathFor(key string) string {
|
|
||||||
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensurePathWithLock is a helper function that generates all necessary
|
|
||||||
// directories on the filesystem for the given key.
|
|
||||||
func (d *Diskv) ensurePathWithLock(key string) error {
|
|
||||||
return os.MkdirAll(d.pathFor(key), d.PathPerm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// completeFilename returns the absolute path to the file for the given key.
|
|
||||||
func (d *Diskv) completeFilename(key string) string {
|
|
||||||
return filepath.Join(d.pathFor(key), key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cacheWithLock attempts to cache the given key-value pair in the store's
|
|
||||||
// cache. It can fail if the value is larger than the cache's maximum size.
|
|
||||||
func (d *Diskv) cacheWithLock(key string, val []byte) error {
|
|
||||||
valueSize := uint64(len(val))
|
|
||||||
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
|
|
||||||
return fmt.Errorf("%s; not caching", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// be very strict about memory guarantees
|
|
||||||
if (d.cacheSize + valueSize) > d.CacheSizeMax {
|
|
||||||
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.cache[key] = val
|
|
||||||
d.cacheSize += valueSize
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
|
|
||||||
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
return d.cacheWithLock(key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Diskv) bustCacheWithLock(key string) {
|
|
||||||
if val, ok := d.cache[key]; ok {
|
|
||||||
d.uncacheWithLock(key, uint64(len(val)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
|
|
||||||
d.cacheSize -= sz
|
|
||||||
delete(d.cache, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// pruneDirsWithLock deletes empty directories in the path walk leading to the
|
|
||||||
// key k. Typically this function is called after an Erase is made.
|
|
||||||
func (d *Diskv) pruneDirsWithLock(key string) error {
|
|
||||||
pathlist := d.Transform(key)
|
|
||||||
for i := range pathlist {
|
|
||||||
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
|
|
||||||
|
|
||||||
// thanks to Steven Blenkinsop for this snippet
|
|
||||||
switch fi, err := os.Stat(dir); true {
|
|
||||||
case err != nil:
|
|
||||||
return err
|
|
||||||
case !fi.IsDir():
|
|
||||||
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if len(nlinks) > 0 {
|
|
||||||
return nil // has subdirs -- do not prune
|
|
||||||
}
|
|
||||||
if err = os.Remove(dir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
|
|
||||||
// until the cache has at least valueSize bytes available.
|
|
||||||
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
|
|
||||||
if valueSize > d.CacheSizeMax {
|
|
||||||
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
|
|
||||||
}
|
|
||||||
|
|
||||||
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
|
|
||||||
|
|
||||||
for key, val := range d.cache {
|
|
||||||
if safe() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
d.uncacheWithLock(key, uint64(len(val)))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !safe() {
|
|
||||||
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
|
|
||||||
// satisfy the io.WriteCloser interface.
|
|
||||||
type nopWriteCloser struct {
|
|
||||||
io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
|
|
||||||
func (wc *nopWriteCloser) Close() error { return nil }
|
|
||||||
115
vendor/github.com/peterbourgon/diskv/index.go
generated
vendored
115
vendor/github.com/peterbourgon/diskv/index.go
generated
vendored
@@ -1,115 +0,0 @@
|
|||||||
package diskv
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/google/btree"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Index is a generic interface for things that can
|
|
||||||
// provide an ordered list of keys.
|
|
||||||
type Index interface {
|
|
||||||
Initialize(less LessFunction, keys <-chan string)
|
|
||||||
Insert(key string)
|
|
||||||
Delete(key string)
|
|
||||||
Keys(from string, n int) []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessFunction is used to initialize an Index of keys in a specific order.
|
|
||||||
type LessFunction func(string, string) bool
|
|
||||||
|
|
||||||
// btreeString is a custom data type that satisfies the BTree Less interface,
|
|
||||||
// making the strings it wraps sortable by the BTree package.
|
|
||||||
type btreeString struct {
|
|
||||||
s string
|
|
||||||
l LessFunction
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
|
|
||||||
func (s btreeString) Less(i btree.Item) bool {
|
|
||||||
return s.l(s.s, i.(btreeString).s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BTreeIndex is an implementation of the Index interface using google/btree.
|
|
||||||
type BTreeIndex struct {
|
|
||||||
sync.RWMutex
|
|
||||||
LessFunction
|
|
||||||
*btree.BTree
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize populates the BTree tree with data from the keys channel,
|
|
||||||
// according to the passed less function. It's destructive to the BTreeIndex.
|
|
||||||
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
i.LessFunction = less
|
|
||||||
i.BTree = rebuild(less, keys)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts the given key (only) into the BTree tree.
|
|
||||||
func (i *BTreeIndex) Insert(key string) {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
if i.BTree == nil || i.LessFunction == nil {
|
|
||||||
panic("uninitialized index")
|
|
||||||
}
|
|
||||||
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the given key (only) from the BTree tree.
|
|
||||||
func (i *BTreeIndex) Delete(key string) {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
if i.BTree == nil || i.LessFunction == nil {
|
|
||||||
panic("uninitialized index")
|
|
||||||
}
|
|
||||||
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
|
|
||||||
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
|
|
||||||
// first key in the returned slice will be the key that immediately follows the
|
|
||||||
// passed key, in key order.
|
|
||||||
func (i *BTreeIndex) Keys(from string, n int) []string {
|
|
||||||
i.RLock()
|
|
||||||
defer i.RUnlock()
|
|
||||||
|
|
||||||
if i.BTree == nil || i.LessFunction == nil {
|
|
||||||
panic("uninitialized index")
|
|
||||||
}
|
|
||||||
|
|
||||||
if i.BTree.Len() <= 0 {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
btreeFrom := btreeString{s: from, l: i.LessFunction}
|
|
||||||
skipFirst := true
|
|
||||||
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
|
|
||||||
// no such key, so fabricate an always-smallest item
|
|
||||||
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
|
|
||||||
skipFirst = false
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := []string{}
|
|
||||||
iterator := func(i btree.Item) bool {
|
|
||||||
keys = append(keys, i.(btreeString).s)
|
|
||||||
return len(keys) < n
|
|
||||||
}
|
|
||||||
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
|
|
||||||
|
|
||||||
if skipFirst && len(keys) > 0 {
|
|
||||||
keys = keys[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// rebuildIndex does the work of regenerating the index
|
|
||||||
// with the given keys.
|
|
||||||
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
|
|
||||||
tree := btree.New(2)
|
|
||||||
for key := range keys {
|
|
||||||
tree.ReplaceOrInsert(btreeString{s: key, l: less})
|
|
||||||
}
|
|
||||||
return tree
|
|
||||||
}
|
|
||||||
712
vendor/golang.org/x/net/html/atom/gen.go
generated
vendored
Normal file
712
vendor/golang.org/x/net/html/atom/gen.go
generated
vendored
Normal file
@@ -0,0 +1,712 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
//go:generate go run gen.go
|
||||||
|
//go:generate go run gen.go -test
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// identifier converts s to a Go exported identifier.
|
||||||
|
// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
|
||||||
|
func identifier(s string) string {
|
||||||
|
b := make([]byte, 0, len(s))
|
||||||
|
cap := true
|
||||||
|
for _, c := range s {
|
||||||
|
if c == '-' {
|
||||||
|
cap = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cap && 'a' <= c && c <= 'z' {
|
||||||
|
c -= 'a' - 'A'
|
||||||
|
}
|
||||||
|
cap = false
|
||||||
|
b = append(b, byte(c))
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
var test = flag.Bool("test", false, "generate table_test.go")
|
||||||
|
|
||||||
|
func genFile(name string, buf *bytes.Buffer) {
|
||||||
|
b, err := format.Source(buf.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err := ioutil.WriteFile(name, b, 0644); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
var all []string
|
||||||
|
all = append(all, elements...)
|
||||||
|
all = append(all, attributes...)
|
||||||
|
all = append(all, eventHandlers...)
|
||||||
|
all = append(all, extra...)
|
||||||
|
sort.Strings(all)
|
||||||
|
|
||||||
|
// uniq - lists have dups
|
||||||
|
w := 0
|
||||||
|
for _, s := range all {
|
||||||
|
if w == 0 || all[w-1] != s {
|
||||||
|
all[w] = s
|
||||||
|
w++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
all = all[:w]
|
||||||
|
|
||||||
|
if *test {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
|
||||||
|
fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n")
|
||||||
|
fmt.Fprintln(&buf, "package atom\n")
|
||||||
|
fmt.Fprintln(&buf, "var testAtomList = []string{")
|
||||||
|
for _, s := range all {
|
||||||
|
fmt.Fprintf(&buf, "\t%q,\n", s)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(&buf, "}")
|
||||||
|
|
||||||
|
genFile("table_test.go", &buf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find hash that minimizes table size.
|
||||||
|
var best *table
|
||||||
|
for i := 0; i < 1000000; i++ {
|
||||||
|
if best != nil && 1<<(best.k-1) < len(all) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
h := rand.Uint32()
|
||||||
|
for k := uint(0); k <= 16; k++ {
|
||||||
|
if best != nil && k >= best.k {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var t table
|
||||||
|
if t.init(h, k, all) {
|
||||||
|
best = &t
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if best == nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "failed to construct string table\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lay out strings, using overlaps when possible.
|
||||||
|
layout := append([]string{}, all...)
|
||||||
|
|
||||||
|
// Remove strings that are substrings of other strings
|
||||||
|
for changed := true; changed; {
|
||||||
|
changed = false
|
||||||
|
for i, s := range layout {
|
||||||
|
if s == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for j, t := range layout {
|
||||||
|
if i != j && t != "" && strings.Contains(s, t) {
|
||||||
|
changed = true
|
||||||
|
layout[j] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join strings where one suffix matches another prefix.
|
||||||
|
for {
|
||||||
|
// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
|
||||||
|
// maximizing overlap length k.
|
||||||
|
besti := -1
|
||||||
|
bestj := -1
|
||||||
|
bestk := 0
|
||||||
|
for i, s := range layout {
|
||||||
|
if s == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for j, t := range layout {
|
||||||
|
if i == j {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
|
||||||
|
if s[len(s)-k:] == t[:k] {
|
||||||
|
besti = i
|
||||||
|
bestj = j
|
||||||
|
bestk = k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if bestk > 0 {
|
||||||
|
layout[besti] += layout[bestj][bestk:]
|
||||||
|
layout[bestj] = ""
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
text := strings.Join(layout, "")
|
||||||
|
|
||||||
|
atom := map[string]uint32{}
|
||||||
|
for _, s := range all {
|
||||||
|
off := strings.Index(text, s)
|
||||||
|
if off < 0 {
|
||||||
|
panic("lost string " + s)
|
||||||
|
}
|
||||||
|
atom[s] = uint32(off<<8 | len(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
// Generate the Go code.
|
||||||
|
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
|
||||||
|
fmt.Fprintln(&buf, "//go:generate go run gen.go\n")
|
||||||
|
fmt.Fprintln(&buf, "package atom\n\nconst (")
|
||||||
|
|
||||||
|
// compute max len
|
||||||
|
maxLen := 0
|
||||||
|
for _, s := range all {
|
||||||
|
if maxLen < len(s) {
|
||||||
|
maxLen = len(s)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s])
|
||||||
|
}
|
||||||
|
fmt.Fprintln(&buf, ")\n")
|
||||||
|
|
||||||
|
fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0)
|
||||||
|
fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen)
|
||||||
|
|
||||||
|
fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k)
|
||||||
|
for i, s := range best.tab {
|
||||||
|
if s == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "}\n")
|
||||||
|
datasize := (1 << best.k) * 4
|
||||||
|
|
||||||
|
fmt.Fprintln(&buf, "const atomText =")
|
||||||
|
textsize := len(text)
|
||||||
|
for len(text) > 60 {
|
||||||
|
fmt.Fprintf(&buf, "\t%q +\n", text[:60])
|
||||||
|
text = text[60:]
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "\t%q\n\n", text)
|
||||||
|
|
||||||
|
genFile("table.go", &buf)
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
|
||||||
|
}
|
||||||
|
|
||||||
|
type byLen []string
|
||||||
|
|
||||||
|
func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
|
||||||
|
func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
func (x byLen) Len() int { return len(x) }
|
||||||
|
|
||||||
|
// fnv computes the FNV hash with an arbitrary starting value h.
|
||||||
|
func fnv(h uint32, s string) uint32 {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
h ^= uint32(s[i])
|
||||||
|
h *= 16777619
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// A table represents an attempt at constructing the lookup table.
|
||||||
|
// The lookup table uses cuckoo hashing, meaning that each string
|
||||||
|
// can be found in one of two positions.
|
||||||
|
type table struct {
|
||||||
|
h0 uint32
|
||||||
|
k uint
|
||||||
|
mask uint32
|
||||||
|
tab []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// hash returns the two hashes for s.
|
||||||
|
func (t *table) hash(s string) (h1, h2 uint32) {
|
||||||
|
h := fnv(t.h0, s)
|
||||||
|
h1 = h & t.mask
|
||||||
|
h2 = (h >> 16) & t.mask
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// init initializes the table with the given parameters.
|
||||||
|
// h0 is the initial hash value,
|
||||||
|
// k is the number of bits of hash value to use, and
|
||||||
|
// x is the list of strings to store in the table.
|
||||||
|
// init returns false if the table cannot be constructed.
|
||||||
|
func (t *table) init(h0 uint32, k uint, x []string) bool {
|
||||||
|
t.h0 = h0
|
||||||
|
t.k = k
|
||||||
|
t.tab = make([]string, 1<<k)
|
||||||
|
t.mask = 1<<k - 1
|
||||||
|
for _, s := range x {
|
||||||
|
if !t.insert(s) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert inserts s in the table.
|
||||||
|
func (t *table) insert(s string) bool {
|
||||||
|
h1, h2 := t.hash(s)
|
||||||
|
if t.tab[h1] == "" {
|
||||||
|
t.tab[h1] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if t.tab[h2] == "" {
|
||||||
|
t.tab[h2] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if t.push(h1, 0) {
|
||||||
|
t.tab[h1] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if t.push(h2, 0) {
|
||||||
|
t.tab[h2] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// push attempts to push aside the entry in slot i.
|
||||||
|
func (t *table) push(i uint32, depth int) bool {
|
||||||
|
if depth > len(t.tab) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s := t.tab[i]
|
||||||
|
h1, h2 := t.hash(s)
|
||||||
|
j := h1 + h2 - i
|
||||||
|
if t.tab[j] != "" && !t.push(j, depth+1) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
t.tab[j] = s
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// The lists of element names and attribute keys were taken from
|
||||||
|
// https://html.spec.whatwg.org/multipage/indices.html#index
|
||||||
|
// as of the "HTML Living Standard - Last Updated 16 April 2018" version.
|
||||||
|
|
||||||
|
// "command", "keygen" and "menuitem" have been removed from the spec,
|
||||||
|
// but are kept here for backwards compatibility.
|
||||||
|
var elements = []string{
|
||||||
|
"a",
|
||||||
|
"abbr",
|
||||||
|
"address",
|
||||||
|
"area",
|
||||||
|
"article",
|
||||||
|
"aside",
|
||||||
|
"audio",
|
||||||
|
"b",
|
||||||
|
"base",
|
||||||
|
"bdi",
|
||||||
|
"bdo",
|
||||||
|
"blockquote",
|
||||||
|
"body",
|
||||||
|
"br",
|
||||||
|
"button",
|
||||||
|
"canvas",
|
||||||
|
"caption",
|
||||||
|
"cite",
|
||||||
|
"code",
|
||||||
|
"col",
|
||||||
|
"colgroup",
|
||||||
|
"command",
|
||||||
|
"data",
|
||||||
|
"datalist",
|
||||||
|
"dd",
|
||||||
|
"del",
|
||||||
|
"details",
|
||||||
|
"dfn",
|
||||||
|
"dialog",
|
||||||
|
"div",
|
||||||
|
"dl",
|
||||||
|
"dt",
|
||||||
|
"em",
|
||||||
|
"embed",
|
||||||
|
"fieldset",
|
||||||
|
"figcaption",
|
||||||
|
"figure",
|
||||||
|
"footer",
|
||||||
|
"form",
|
||||||
|
"h1",
|
||||||
|
"h2",
|
||||||
|
"h3",
|
||||||
|
"h4",
|
||||||
|
"h5",
|
||||||
|
"h6",
|
||||||
|
"head",
|
||||||
|
"header",
|
||||||
|
"hgroup",
|
||||||
|
"hr",
|
||||||
|
"html",
|
||||||
|
"i",
|
||||||
|
"iframe",
|
||||||
|
"img",
|
||||||
|
"input",
|
||||||
|
"ins",
|
||||||
|
"kbd",
|
||||||
|
"keygen",
|
||||||
|
"label",
|
||||||
|
"legend",
|
||||||
|
"li",
|
||||||
|
"link",
|
||||||
|
"main",
|
||||||
|
"map",
|
||||||
|
"mark",
|
||||||
|
"menu",
|
||||||
|
"menuitem",
|
||||||
|
"meta",
|
||||||
|
"meter",
|
||||||
|
"nav",
|
||||||
|
"noscript",
|
||||||
|
"object",
|
||||||
|
"ol",
|
||||||
|
"optgroup",
|
||||||
|
"option",
|
||||||
|
"output",
|
||||||
|
"p",
|
||||||
|
"param",
|
||||||
|
"picture",
|
||||||
|
"pre",
|
||||||
|
"progress",
|
||||||
|
"q",
|
||||||
|
"rp",
|
||||||
|
"rt",
|
||||||
|
"ruby",
|
||||||
|
"s",
|
||||||
|
"samp",
|
||||||
|
"script",
|
||||||
|
"section",
|
||||||
|
"select",
|
||||||
|
"slot",
|
||||||
|
"small",
|
||||||
|
"source",
|
||||||
|
"span",
|
||||||
|
"strong",
|
||||||
|
"style",
|
||||||
|
"sub",
|
||||||
|
"summary",
|
||||||
|
"sup",
|
||||||
|
"table",
|
||||||
|
"tbody",
|
||||||
|
"td",
|
||||||
|
"template",
|
||||||
|
"textarea",
|
||||||
|
"tfoot",
|
||||||
|
"th",
|
||||||
|
"thead",
|
||||||
|
"time",
|
||||||
|
"title",
|
||||||
|
"tr",
|
||||||
|
"track",
|
||||||
|
"u",
|
||||||
|
"ul",
|
||||||
|
"var",
|
||||||
|
"video",
|
||||||
|
"wbr",
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
|
||||||
|
//
|
||||||
|
// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup",
|
||||||
|
// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec,
|
||||||
|
// but are kept here for backwards compatibility.
|
||||||
|
var attributes = []string{
|
||||||
|
"abbr",
|
||||||
|
"accept",
|
||||||
|
"accept-charset",
|
||||||
|
"accesskey",
|
||||||
|
"action",
|
||||||
|
"allowfullscreen",
|
||||||
|
"allowpaymentrequest",
|
||||||
|
"allowusermedia",
|
||||||
|
"alt",
|
||||||
|
"as",
|
||||||
|
"async",
|
||||||
|
"autocomplete",
|
||||||
|
"autofocus",
|
||||||
|
"autoplay",
|
||||||
|
"challenge",
|
||||||
|
"charset",
|
||||||
|
"checked",
|
||||||
|
"cite",
|
||||||
|
"class",
|
||||||
|
"color",
|
||||||
|
"cols",
|
||||||
|
"colspan",
|
||||||
|
"command",
|
||||||
|
"content",
|
||||||
|
"contenteditable",
|
||||||
|
"contextmenu",
|
||||||
|
"controls",
|
||||||
|
"coords",
|
||||||
|
"crossorigin",
|
||||||
|
"data",
|
||||||
|
"datetime",
|
||||||
|
"default",
|
||||||
|
"defer",
|
||||||
|
"dir",
|
||||||
|
"dirname",
|
||||||
|
"disabled",
|
||||||
|
"download",
|
||||||
|
"draggable",
|
||||||
|
"dropzone",
|
||||||
|
"enctype",
|
||||||
|
"for",
|
||||||
|
"form",
|
||||||
|
"formaction",
|
||||||
|
"formenctype",
|
||||||
|
"formmethod",
|
||||||
|
"formnovalidate",
|
||||||
|
"formtarget",
|
||||||
|
"headers",
|
||||||
|
"height",
|
||||||
|
"hidden",
|
||||||
|
"high",
|
||||||
|
"href",
|
||||||
|
"hreflang",
|
||||||
|
"http-equiv",
|
||||||
|
"icon",
|
||||||
|
"id",
|
||||||
|
"inputmode",
|
||||||
|
"integrity",
|
||||||
|
"is",
|
||||||
|
"ismap",
|
||||||
|
"itemid",
|
||||||
|
"itemprop",
|
||||||
|
"itemref",
|
||||||
|
"itemscope",
|
||||||
|
"itemtype",
|
||||||
|
"keytype",
|
||||||
|
"kind",
|
||||||
|
"label",
|
||||||
|
"lang",
|
||||||
|
"list",
|
||||||
|
"loop",
|
||||||
|
"low",
|
||||||
|
"manifest",
|
||||||
|
"max",
|
||||||
|
"maxlength",
|
||||||
|
"media",
|
||||||
|
"mediagroup",
|
||||||
|
"method",
|
||||||
|
"min",
|
||||||
|
"minlength",
|
||||||
|
"multiple",
|
||||||
|
"muted",
|
||||||
|
"name",
|
||||||
|
"nomodule",
|
||||||
|
"nonce",
|
||||||
|
"novalidate",
|
||||||
|
"open",
|
||||||
|
"optimum",
|
||||||
|
"pattern",
|
||||||
|
"ping",
|
||||||
|
"placeholder",
|
||||||
|
"playsinline",
|
||||||
|
"poster",
|
||||||
|
"preload",
|
||||||
|
"radiogroup",
|
||||||
|
"readonly",
|
||||||
|
"referrerpolicy",
|
||||||
|
"rel",
|
||||||
|
"required",
|
||||||
|
"reversed",
|
||||||
|
"rows",
|
||||||
|
"rowspan",
|
||||||
|
"sandbox",
|
||||||
|
"spellcheck",
|
||||||
|
"scope",
|
||||||
|
"scoped",
|
||||||
|
"seamless",
|
||||||
|
"selected",
|
||||||
|
"shape",
|
||||||
|
"size",
|
||||||
|
"sizes",
|
||||||
|
"sortable",
|
||||||
|
"sorted",
|
||||||
|
"slot",
|
||||||
|
"span",
|
||||||
|
"spellcheck",
|
||||||
|
"src",
|
||||||
|
"srcdoc",
|
||||||
|
"srclang",
|
||||||
|
"srcset",
|
||||||
|
"start",
|
||||||
|
"step",
|
||||||
|
"style",
|
||||||
|
"tabindex",
|
||||||
|
"target",
|
||||||
|
"title",
|
||||||
|
"translate",
|
||||||
|
"type",
|
||||||
|
"typemustmatch",
|
||||||
|
"updateviacache",
|
||||||
|
"usemap",
|
||||||
|
"value",
|
||||||
|
"width",
|
||||||
|
"workertype",
|
||||||
|
"wrap",
|
||||||
|
}
|
||||||
|
|
||||||
|
// "onautocomplete", "onautocompleteerror", "onmousewheel",
|
||||||
|
// "onshow" and "onsort" have been removed from the spec,
|
||||||
|
// but are kept here for backwards compatibility.
|
||||||
|
var eventHandlers = []string{
|
||||||
|
"onabort",
|
||||||
|
"onautocomplete",
|
||||||
|
"onautocompleteerror",
|
||||||
|
"onauxclick",
|
||||||
|
"onafterprint",
|
||||||
|
"onbeforeprint",
|
||||||
|
"onbeforeunload",
|
||||||
|
"onblur",
|
||||||
|
"oncancel",
|
||||||
|
"oncanplay",
|
||||||
|
"oncanplaythrough",
|
||||||
|
"onchange",
|
||||||
|
"onclick",
|
||||||
|
"onclose",
|
||||||
|
"oncontextmenu",
|
||||||
|
"oncopy",
|
||||||
|
"oncuechange",
|
||||||
|
"oncut",
|
||||||
|
"ondblclick",
|
||||||
|
"ondrag",
|
||||||
|
"ondragend",
|
||||||
|
"ondragenter",
|
||||||
|
"ondragexit",
|
||||||
|
"ondragleave",
|
||||||
|
"ondragover",
|
||||||
|
"ondragstart",
|
||||||
|
"ondrop",
|
||||||
|
"ondurationchange",
|
||||||
|
"onemptied",
|
||||||
|
"onended",
|
||||||
|
"onerror",
|
||||||
|
"onfocus",
|
||||||
|
"onhashchange",
|
||||||
|
"oninput",
|
||||||
|
"oninvalid",
|
||||||
|
"onkeydown",
|
||||||
|
"onkeypress",
|
||||||
|
"onkeyup",
|
||||||
|
"onlanguagechange",
|
||||||
|
"onload",
|
||||||
|
"onloadeddata",
|
||||||
|
"onloadedmetadata",
|
||||||
|
"onloadend",
|
||||||
|
"onloadstart",
|
||||||
|
"onmessage",
|
||||||
|
"onmessageerror",
|
||||||
|
"onmousedown",
|
||||||
|
"onmouseenter",
|
||||||
|
"onmouseleave",
|
||||||
|
"onmousemove",
|
||||||
|
"onmouseout",
|
||||||
|
"onmouseover",
|
||||||
|
"onmouseup",
|
||||||
|
"onmousewheel",
|
||||||
|
"onwheel",
|
||||||
|
"onoffline",
|
||||||
|
"ononline",
|
||||||
|
"onpagehide",
|
||||||
|
"onpageshow",
|
||||||
|
"onpaste",
|
||||||
|
"onpause",
|
||||||
|
"onplay",
|
||||||
|
"onplaying",
|
||||||
|
"onpopstate",
|
||||||
|
"onprogress",
|
||||||
|
"onratechange",
|
||||||
|
"onreset",
|
||||||
|
"onresize",
|
||||||
|
"onrejectionhandled",
|
||||||
|
"onscroll",
|
||||||
|
"onsecuritypolicyviolation",
|
||||||
|
"onseeked",
|
||||||
|
"onseeking",
|
||||||
|
"onselect",
|
||||||
|
"onshow",
|
||||||
|
"onsort",
|
||||||
|
"onstalled",
|
||||||
|
"onstorage",
|
||||||
|
"onsubmit",
|
||||||
|
"onsuspend",
|
||||||
|
"ontimeupdate",
|
||||||
|
"ontoggle",
|
||||||
|
"onunhandledrejection",
|
||||||
|
"onunload",
|
||||||
|
"onvolumechange",
|
||||||
|
"onwaiting",
|
||||||
|
}
|
||||||
|
|
||||||
|
// extra are ad-hoc values not covered by any of the lists above.
|
||||||
|
var extra = []string{
|
||||||
|
"acronym",
|
||||||
|
"align",
|
||||||
|
"annotation",
|
||||||
|
"annotation-xml",
|
||||||
|
"applet",
|
||||||
|
"basefont",
|
||||||
|
"bgsound",
|
||||||
|
"big",
|
||||||
|
"blink",
|
||||||
|
"center",
|
||||||
|
"color",
|
||||||
|
"desc",
|
||||||
|
"face",
|
||||||
|
"font",
|
||||||
|
"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
|
||||||
|
"foreignobject",
|
||||||
|
"frame",
|
||||||
|
"frameset",
|
||||||
|
"image",
|
||||||
|
"isindex",
|
||||||
|
"listing",
|
||||||
|
"malignmark",
|
||||||
|
"marquee",
|
||||||
|
"math",
|
||||||
|
"mglyph",
|
||||||
|
"mi",
|
||||||
|
"mn",
|
||||||
|
"mo",
|
||||||
|
"ms",
|
||||||
|
"mtext",
|
||||||
|
"nobr",
|
||||||
|
"noembed",
|
||||||
|
"noframes",
|
||||||
|
"plaintext",
|
||||||
|
"prompt",
|
||||||
|
"public",
|
||||||
|
"rb",
|
||||||
|
"rtc",
|
||||||
|
"spacer",
|
||||||
|
"strike",
|
||||||
|
"svg",
|
||||||
|
"system",
|
||||||
|
"tt",
|
||||||
|
"xmp",
|
||||||
|
}
|
||||||
383
vendor/golang.org/x/net/internal/iana/gen.go
generated
vendored
Normal file
383
vendor/golang.org/x/net/internal/iana/gen.go
generated
vendored
Normal file
@@ -0,0 +1,383 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
//go:generate go run gen.go
|
||||||
|
|
||||||
|
// This program generates internet protocol constants and tables by
|
||||||
|
// reading IANA protocol registries.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var registries = []struct {
|
||||||
|
url string
|
||||||
|
parse func(io.Writer, io.Reader) error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"https://www.iana.org/assignments/dscp-registry/dscp-registry.xml",
|
||||||
|
parseDSCPRegistry,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml",
|
||||||
|
parseProtocolNumbers,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"https://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml",
|
||||||
|
parseAddrFamilyNumbers,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var bb bytes.Buffer
|
||||||
|
fmt.Fprintf(&bb, "// go generate gen.go\n")
|
||||||
|
fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n")
|
||||||
|
fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n")
|
||||||
|
fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n")
|
||||||
|
for _, r := range registries {
|
||||||
|
resp, err := http.Get(r.url)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err := r.parse(&bb, resp.Body); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&bb, "\n")
|
||||||
|
}
|
||||||
|
b, err := format.Source(bb.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err := ioutil.WriteFile("const.go", b, 0644); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDSCPRegistry(w io.Writer, r io.Reader) error {
|
||||||
|
dec := xml.NewDecoder(r)
|
||||||
|
var dr dscpRegistry
|
||||||
|
if err := dec.Decode(&dr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated)
|
||||||
|
fmt.Fprintf(w, "const (\n")
|
||||||
|
for _, dr := range dr.escapeDSCP() {
|
||||||
|
fmt.Fprintf(w, "DiffServ%s = %#02x", dr.Name, dr.Value)
|
||||||
|
fmt.Fprintf(w, "// %s\n", dr.OrigName)
|
||||||
|
}
|
||||||
|
for _, er := range dr.escapeECN() {
|
||||||
|
fmt.Fprintf(w, "%s = %#02x", er.Descr, er.Value)
|
||||||
|
fmt.Fprintf(w, "// %s\n", er.OrigDescr)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, ")\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type dscpRegistry struct {
|
||||||
|
XMLName xml.Name `xml:"registry"`
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Updated string `xml:"updated"`
|
||||||
|
Note string `xml:"note"`
|
||||||
|
Registries []struct {
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Registries []struct {
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Records []struct {
|
||||||
|
Name string `xml:"name"`
|
||||||
|
Space string `xml:"space"`
|
||||||
|
} `xml:"record"`
|
||||||
|
} `xml:"registry"`
|
||||||
|
Records []struct {
|
||||||
|
Value string `xml:"value"`
|
||||||
|
Descr string `xml:"description"`
|
||||||
|
} `xml:"record"`
|
||||||
|
} `xml:"registry"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type canonDSCPRecord struct {
|
||||||
|
OrigName string
|
||||||
|
Name string
|
||||||
|
Value int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (drr *dscpRegistry) escapeDSCP() []canonDSCPRecord {
|
||||||
|
var drs []canonDSCPRecord
|
||||||
|
for _, preg := range drr.Registries {
|
||||||
|
if !strings.Contains(preg.Title, "Differentiated Services Field Codepoints") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, reg := range preg.Registries {
|
||||||
|
if !strings.Contains(reg.Title, "Pool 1 Codepoints") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
drs = make([]canonDSCPRecord, len(reg.Records))
|
||||||
|
sr := strings.NewReplacer(
|
||||||
|
"+", "",
|
||||||
|
"-", "",
|
||||||
|
"/", "",
|
||||||
|
".", "",
|
||||||
|
" ", "",
|
||||||
|
)
|
||||||
|
for i, dr := range reg.Records {
|
||||||
|
s := strings.TrimSpace(dr.Name)
|
||||||
|
drs[i].OrigName = s
|
||||||
|
drs[i].Name = sr.Replace(s)
|
||||||
|
n, err := strconv.ParseUint(dr.Space, 2, 8)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
drs[i].Value = int(n) << 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return drs
|
||||||
|
}
|
||||||
|
|
||||||
|
type canonECNRecord struct {
|
||||||
|
OrigDescr string
|
||||||
|
Descr string
|
||||||
|
Value int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (drr *dscpRegistry) escapeECN() []canonECNRecord {
|
||||||
|
var ers []canonECNRecord
|
||||||
|
for _, reg := range drr.Registries {
|
||||||
|
if !strings.Contains(reg.Title, "ECN Field") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ers = make([]canonECNRecord, len(reg.Records))
|
||||||
|
sr := strings.NewReplacer(
|
||||||
|
"Capable", "",
|
||||||
|
"Not-ECT", "",
|
||||||
|
"ECT(1)", "",
|
||||||
|
"ECT(0)", "",
|
||||||
|
"CE", "",
|
||||||
|
"(", "",
|
||||||
|
")", "",
|
||||||
|
"+", "",
|
||||||
|
"-", "",
|
||||||
|
"/", "",
|
||||||
|
".", "",
|
||||||
|
" ", "",
|
||||||
|
)
|
||||||
|
for i, er := range reg.Records {
|
||||||
|
s := strings.TrimSpace(er.Descr)
|
||||||
|
ers[i].OrigDescr = s
|
||||||
|
ss := strings.Split(s, " ")
|
||||||
|
if len(ss) > 1 {
|
||||||
|
ers[i].Descr = strings.Join(ss[1:], " ")
|
||||||
|
} else {
|
||||||
|
ers[i].Descr = ss[0]
|
||||||
|
}
|
||||||
|
ers[i].Descr = sr.Replace(er.Descr)
|
||||||
|
n, err := strconv.ParseUint(er.Value, 2, 8)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ers[i].Value = int(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ers
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseProtocolNumbers(w io.Writer, r io.Reader) error {
|
||||||
|
dec := xml.NewDecoder(r)
|
||||||
|
var pn protocolNumbers
|
||||||
|
if err := dec.Decode(&pn); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
prs := pn.escape()
|
||||||
|
prs = append([]canonProtocolRecord{{
|
||||||
|
Name: "IP",
|
||||||
|
Descr: "IPv4 encapsulation, pseudo protocol number",
|
||||||
|
Value: 0,
|
||||||
|
}}, prs...)
|
||||||
|
fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated)
|
||||||
|
fmt.Fprintf(w, "const (\n")
|
||||||
|
for _, pr := range prs {
|
||||||
|
if pr.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value)
|
||||||
|
s := pr.Descr
|
||||||
|
if s == "" {
|
||||||
|
s = pr.OrigName
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "// %s\n", s)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, ")\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type protocolNumbers struct {
|
||||||
|
XMLName xml.Name `xml:"registry"`
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Updated string `xml:"updated"`
|
||||||
|
RegTitle string `xml:"registry>title"`
|
||||||
|
Note string `xml:"registry>note"`
|
||||||
|
Records []struct {
|
||||||
|
Value string `xml:"value"`
|
||||||
|
Name string `xml:"name"`
|
||||||
|
Descr string `xml:"description"`
|
||||||
|
} `xml:"registry>record"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type canonProtocolRecord struct {
|
||||||
|
OrigName string
|
||||||
|
Name string
|
||||||
|
Descr string
|
||||||
|
Value int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pn *protocolNumbers) escape() []canonProtocolRecord {
|
||||||
|
prs := make([]canonProtocolRecord, len(pn.Records))
|
||||||
|
sr := strings.NewReplacer(
|
||||||
|
"-in-", "in",
|
||||||
|
"-within-", "within",
|
||||||
|
"-over-", "over",
|
||||||
|
"+", "P",
|
||||||
|
"-", "",
|
||||||
|
"/", "",
|
||||||
|
".", "",
|
||||||
|
" ", "",
|
||||||
|
)
|
||||||
|
for i, pr := range pn.Records {
|
||||||
|
if strings.Contains(pr.Name, "Deprecated") ||
|
||||||
|
strings.Contains(pr.Name, "deprecated") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
prs[i].OrigName = pr.Name
|
||||||
|
s := strings.TrimSpace(pr.Name)
|
||||||
|
switch pr.Name {
|
||||||
|
case "ISIS over IPv4":
|
||||||
|
prs[i].Name = "ISIS"
|
||||||
|
case "manet":
|
||||||
|
prs[i].Name = "MANET"
|
||||||
|
default:
|
||||||
|
prs[i].Name = sr.Replace(s)
|
||||||
|
}
|
||||||
|
ss := strings.Split(pr.Descr, "\n")
|
||||||
|
for i := range ss {
|
||||||
|
ss[i] = strings.TrimSpace(ss[i])
|
||||||
|
}
|
||||||
|
if len(ss) > 1 {
|
||||||
|
prs[i].Descr = strings.Join(ss, " ")
|
||||||
|
} else {
|
||||||
|
prs[i].Descr = ss[0]
|
||||||
|
}
|
||||||
|
prs[i].Value, _ = strconv.Atoi(pr.Value)
|
||||||
|
}
|
||||||
|
return prs
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAddrFamilyNumbers(w io.Writer, r io.Reader) error {
|
||||||
|
dec := xml.NewDecoder(r)
|
||||||
|
var afn addrFamilylNumbers
|
||||||
|
if err := dec.Decode(&afn); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
afrs := afn.escape()
|
||||||
|
fmt.Fprintf(w, "// %s, Updated: %s\n", afn.Title, afn.Updated)
|
||||||
|
fmt.Fprintf(w, "const (\n")
|
||||||
|
for _, afr := range afrs {
|
||||||
|
if afr.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "AddrFamily%s = %d", afr.Name, afr.Value)
|
||||||
|
fmt.Fprintf(w, "// %s\n", afr.Descr)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, ")\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type addrFamilylNumbers struct {
|
||||||
|
XMLName xml.Name `xml:"registry"`
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Updated string `xml:"updated"`
|
||||||
|
RegTitle string `xml:"registry>title"`
|
||||||
|
Note string `xml:"registry>note"`
|
||||||
|
Records []struct {
|
||||||
|
Value string `xml:"value"`
|
||||||
|
Descr string `xml:"description"`
|
||||||
|
} `xml:"registry>record"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type canonAddrFamilyRecord struct {
|
||||||
|
Name string
|
||||||
|
Descr string
|
||||||
|
Value int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (afn *addrFamilylNumbers) escape() []canonAddrFamilyRecord {
|
||||||
|
afrs := make([]canonAddrFamilyRecord, len(afn.Records))
|
||||||
|
sr := strings.NewReplacer(
|
||||||
|
"IP version 4", "IPv4",
|
||||||
|
"IP version 6", "IPv6",
|
||||||
|
"Identifier", "ID",
|
||||||
|
"-", "",
|
||||||
|
"-", "",
|
||||||
|
"/", "",
|
||||||
|
".", "",
|
||||||
|
" ", "",
|
||||||
|
)
|
||||||
|
for i, afr := range afn.Records {
|
||||||
|
if strings.Contains(afr.Descr, "Unassigned") ||
|
||||||
|
strings.Contains(afr.Descr, "Reserved") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
afrs[i].Descr = afr.Descr
|
||||||
|
s := strings.TrimSpace(afr.Descr)
|
||||||
|
switch s {
|
||||||
|
case "IP (IP version 4)":
|
||||||
|
afrs[i].Name = "IPv4"
|
||||||
|
case "IP6 (IP version 6)":
|
||||||
|
afrs[i].Name = "IPv6"
|
||||||
|
case "AFI for L2VPN information":
|
||||||
|
afrs[i].Name = "L2VPN"
|
||||||
|
case "E.164 with NSAP format subaddress":
|
||||||
|
afrs[i].Name = "E164withSubaddress"
|
||||||
|
case "MT IP: Multi-Topology IP version 4":
|
||||||
|
afrs[i].Name = "MTIPv4"
|
||||||
|
case "MAC/24":
|
||||||
|
afrs[i].Name = "MACFinal24bits"
|
||||||
|
case "MAC/40":
|
||||||
|
afrs[i].Name = "MACFinal40bits"
|
||||||
|
case "IPv6/64":
|
||||||
|
afrs[i].Name = "IPv6Initial64bits"
|
||||||
|
default:
|
||||||
|
n := strings.Index(s, "(")
|
||||||
|
if n > 0 {
|
||||||
|
s = s[:n]
|
||||||
|
}
|
||||||
|
n = strings.Index(s, ":")
|
||||||
|
if n > 0 {
|
||||||
|
s = s[:n]
|
||||||
|
}
|
||||||
|
afrs[i].Name = sr.Replace(s)
|
||||||
|
}
|
||||||
|
afrs[i].Value, _ = strconv.Atoi(afr.Value)
|
||||||
|
}
|
||||||
|
return afrs
|
||||||
|
}
|
||||||
39
vendor/golang.org/x/net/internal/socket/defs_aix.go
generated
vendored
Normal file
39
vendor/golang.org/x/net/internal/socket/defs_aix.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package socket
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
type iovec C.struct_iovec
|
||||||
|
|
||||||
|
type msghdr C.struct_msghdr
|
||||||
|
|
||||||
|
type mmsghdr C.struct_mmsghdr
|
||||||
|
|
||||||
|
type cmsghdr C.struct_cmsghdr
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofIovec = C.sizeof_struct_iovec
|
||||||
|
sizeofMsghdr = C.sizeof_struct_msghdr
|
||||||
|
sizeofMmsghdr = C.sizeof_struct_mmsghdr
|
||||||
|
sizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||||
|
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
)
|
||||||
36
vendor/golang.org/x/net/internal/socket/defs_darwin.go
generated
vendored
Normal file
36
vendor/golang.org/x/net/internal/socket/defs_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package socket
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
type iovec C.struct_iovec
|
||||||
|
|
||||||
|
type msghdr C.struct_msghdr
|
||||||
|
|
||||||
|
type cmsghdr C.struct_cmsghdr
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofIovec = C.sizeof_struct_iovec
|
||||||
|
sizeofMsghdr = C.sizeof_struct_msghdr
|
||||||
|
sizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||||
|
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
)
|
||||||
36
vendor/golang.org/x/net/internal/socket/defs_dragonfly.go
generated
vendored
Normal file
36
vendor/golang.org/x/net/internal/socket/defs_dragonfly.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package socket
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
type iovec C.struct_iovec
|
||||||
|
|
||||||
|
type msghdr C.struct_msghdr
|
||||||
|
|
||||||
|
type cmsghdr C.struct_cmsghdr
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofIovec = C.sizeof_struct_iovec
|
||||||
|
sizeofMsghdr = C.sizeof_struct_msghdr
|
||||||
|
sizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||||
|
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
)
|
||||||
36
vendor/golang.org/x/net/internal/socket/defs_freebsd.go
generated
vendored
Normal file
36
vendor/golang.org/x/net/internal/socket/defs_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package socket
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
type iovec C.struct_iovec
|
||||||
|
|
||||||
|
type msghdr C.struct_msghdr
|
||||||
|
|
||||||
|
type cmsghdr C.struct_cmsghdr
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofIovec = C.sizeof_struct_iovec
|
||||||
|
sizeofMsghdr = C.sizeof_struct_msghdr
|
||||||
|
sizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||||
|
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
)
|
||||||
41
vendor/golang.org/x/net/internal/socket/defs_linux.go
generated
vendored
Normal file
41
vendor/golang.org/x/net/internal/socket/defs_linux.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package socket
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <linux/in.h>
|
||||||
|
#include <linux/in6.h>
|
||||||
|
|
||||||
|
#define _GNU_SOURCE
|
||||||
|
#include <sys/socket.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
type iovec C.struct_iovec
|
||||||
|
|
||||||
|
type msghdr C.struct_msghdr
|
||||||
|
|
||||||
|
type mmsghdr C.struct_mmsghdr
|
||||||
|
|
||||||
|
type cmsghdr C.struct_cmsghdr
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofIovec = C.sizeof_struct_iovec
|
||||||
|
sizeofMsghdr = C.sizeof_struct_msghdr
|
||||||
|
sizeofMmsghdr = C.sizeof_struct_mmsghdr
|
||||||
|
sizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||||
|
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
)
|
||||||
39
vendor/golang.org/x/net/internal/socket/defs_netbsd.go
generated
vendored
Normal file
39
vendor/golang.org/x/net/internal/socket/defs_netbsd.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package socket
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
type iovec C.struct_iovec
|
||||||
|
|
||||||
|
type msghdr C.struct_msghdr
|
||||||
|
|
||||||
|
type mmsghdr C.struct_mmsghdr
|
||||||
|
|
||||||
|
type cmsghdr C.struct_cmsghdr
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofIovec = C.sizeof_struct_iovec
|
||||||
|
sizeofMsghdr = C.sizeof_struct_msghdr
|
||||||
|
sizeofMmsghdr = C.sizeof_struct_mmsghdr
|
||||||
|
sizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||||
|
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
)
|
||||||
36
vendor/golang.org/x/net/internal/socket/defs_openbsd.go
generated
vendored
Normal file
36
vendor/golang.org/x/net/internal/socket/defs_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package socket
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
type iovec C.struct_iovec
|
||||||
|
|
||||||
|
type msghdr C.struct_msghdr
|
||||||
|
|
||||||
|
type cmsghdr C.struct_cmsghdr
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofIovec = C.sizeof_struct_iovec
|
||||||
|
sizeofMsghdr = C.sizeof_struct_msghdr
|
||||||
|
sizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||||
|
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
)
|
||||||
36
vendor/golang.org/x/net/internal/socket/defs_solaris.go
generated
vendored
Normal file
36
vendor/golang.org/x/net/internal/socket/defs_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package socket
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
type iovec C.struct_iovec
|
||||||
|
|
||||||
|
type msghdr C.struct_msghdr
|
||||||
|
|
||||||
|
type cmsghdr C.struct_cmsghdr
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofIovec = C.sizeof_struct_iovec
|
||||||
|
sizeofMsghdr = C.sizeof_struct_msghdr
|
||||||
|
sizeofCmsghdr = C.sizeof_struct_cmsghdr
|
||||||
|
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
)
|
||||||
39
vendor/golang.org/x/net/ipv4/defs_aix.go
generated
vendored
Normal file
39
vendor/golang.org/x/net/ipv4/defs_aix.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
|
||||||
|
package ipv4
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIP_OPTIONS = C.IP_OPTIONS
|
||||||
|
sysIP_HDRINCL = C.IP_HDRINCL
|
||||||
|
sysIP_TOS = C.IP_TOS
|
||||||
|
sysIP_TTL = C.IP_TTL
|
||||||
|
sysIP_RECVOPTS = C.IP_RECVOPTS
|
||||||
|
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
|
||||||
|
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
|
||||||
|
sysIP_RETOPTS = C.IP_RETOPTS
|
||||||
|
// IP_RECVIF is defined on AIX but doesn't work.
|
||||||
|
// IP_RECVINTERFACE must be used instead.
|
||||||
|
sysIP_RECVIF = C.IP_RECVINTERFACE
|
||||||
|
sysIP_RECVTTL = C.IP_RECVTTL
|
||||||
|
|
||||||
|
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
|
||||||
|
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
|
||||||
|
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
|
||||||
|
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
|
||||||
|
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
|
||||||
|
|
||||||
|
sizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||||
|
)
|
||||||
|
|
||||||
|
type ipMreq C.struct_ip_mreq
|
||||||
77
vendor/golang.org/x/net/ipv4/defs_darwin.go
generated
vendored
Normal file
77
vendor/golang.org/x/net/ipv4/defs_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
|
||||||
|
package ipv4
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIP_OPTIONS = C.IP_OPTIONS
|
||||||
|
sysIP_HDRINCL = C.IP_HDRINCL
|
||||||
|
sysIP_TOS = C.IP_TOS
|
||||||
|
sysIP_TTL = C.IP_TTL
|
||||||
|
sysIP_RECVOPTS = C.IP_RECVOPTS
|
||||||
|
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
|
||||||
|
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
|
||||||
|
sysIP_RETOPTS = C.IP_RETOPTS
|
||||||
|
sysIP_RECVIF = C.IP_RECVIF
|
||||||
|
sysIP_STRIPHDR = C.IP_STRIPHDR
|
||||||
|
sysIP_RECVTTL = C.IP_RECVTTL
|
||||||
|
sysIP_BOUND_IF = C.IP_BOUND_IF
|
||||||
|
sysIP_PKTINFO = C.IP_PKTINFO
|
||||||
|
sysIP_RECVPKTINFO = C.IP_RECVPKTINFO
|
||||||
|
|
||||||
|
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
|
||||||
|
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
|
||||||
|
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
|
||||||
|
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
|
||||||
|
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
|
||||||
|
sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF
|
||||||
|
sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX
|
||||||
|
sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
|
||||||
|
sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
|
||||||
|
sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
|
||||||
|
sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
|
||||||
|
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
|
||||||
|
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
|
||||||
|
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
|
||||||
|
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
|
||||||
|
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
|
||||||
|
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
|
||||||
|
|
||||||
|
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofInetPktinfo = C.sizeof_struct_in_pktinfo
|
||||||
|
|
||||||
|
sizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||||
|
sizeofIPMreqn = C.sizeof_struct_ip_mreqn
|
||||||
|
sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
|
||||||
|
sizeofGroupReq = C.sizeof_struct_group_req
|
||||||
|
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
|
||||||
|
)
|
||||||
|
|
||||||
|
type sockaddrStorage C.struct_sockaddr_storage
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type inetPktinfo C.struct_in_pktinfo
|
||||||
|
|
||||||
|
type ipMreq C.struct_ip_mreq
|
||||||
|
|
||||||
|
type ipMreqn C.struct_ip_mreqn
|
||||||
|
|
||||||
|
type ipMreqSource C.struct_ip_mreq_source
|
||||||
|
|
||||||
|
type groupReq C.struct_group_req
|
||||||
|
|
||||||
|
type groupSourceReq C.struct_group_source_req
|
||||||
38
vendor/golang.org/x/net/ipv4/defs_dragonfly.go
generated
vendored
Normal file
38
vendor/golang.org/x/net/ipv4/defs_dragonfly.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
|
||||||
|
package ipv4
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIP_OPTIONS = C.IP_OPTIONS
|
||||||
|
sysIP_HDRINCL = C.IP_HDRINCL
|
||||||
|
sysIP_TOS = C.IP_TOS
|
||||||
|
sysIP_TTL = C.IP_TTL
|
||||||
|
sysIP_RECVOPTS = C.IP_RECVOPTS
|
||||||
|
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
|
||||||
|
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
|
||||||
|
sysIP_RETOPTS = C.IP_RETOPTS
|
||||||
|
sysIP_RECVIF = C.IP_RECVIF
|
||||||
|
sysIP_RECVTTL = C.IP_RECVTTL
|
||||||
|
|
||||||
|
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
|
||||||
|
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
|
||||||
|
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
|
||||||
|
sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF
|
||||||
|
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
|
||||||
|
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
|
||||||
|
|
||||||
|
sizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||||
|
)
|
||||||
|
|
||||||
|
type ipMreq C.struct_ip_mreq
|
||||||
75
vendor/golang.org/x/net/ipv4/defs_freebsd.go
generated
vendored
Normal file
75
vendor/golang.org/x/net/ipv4/defs_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
|
||||||
|
package ipv4
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIP_OPTIONS = C.IP_OPTIONS
|
||||||
|
sysIP_HDRINCL = C.IP_HDRINCL
|
||||||
|
sysIP_TOS = C.IP_TOS
|
||||||
|
sysIP_TTL = C.IP_TTL
|
||||||
|
sysIP_RECVOPTS = C.IP_RECVOPTS
|
||||||
|
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
|
||||||
|
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
|
||||||
|
sysIP_SENDSRCADDR = C.IP_SENDSRCADDR
|
||||||
|
sysIP_RETOPTS = C.IP_RETOPTS
|
||||||
|
sysIP_RECVIF = C.IP_RECVIF
|
||||||
|
sysIP_ONESBCAST = C.IP_ONESBCAST
|
||||||
|
sysIP_BINDANY = C.IP_BINDANY
|
||||||
|
sysIP_RECVTTL = C.IP_RECVTTL
|
||||||
|
sysIP_MINTTL = C.IP_MINTTL
|
||||||
|
sysIP_DONTFRAG = C.IP_DONTFRAG
|
||||||
|
sysIP_RECVTOS = C.IP_RECVTOS
|
||||||
|
|
||||||
|
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
|
||||||
|
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
|
||||||
|
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
|
||||||
|
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
|
||||||
|
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
|
||||||
|
sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF
|
||||||
|
sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
|
||||||
|
sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
|
||||||
|
sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
|
||||||
|
sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
|
||||||
|
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
|
||||||
|
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
|
||||||
|
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
|
||||||
|
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
|
||||||
|
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
|
||||||
|
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
|
||||||
|
|
||||||
|
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
|
||||||
|
sizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||||
|
sizeofIPMreqn = C.sizeof_struct_ip_mreqn
|
||||||
|
sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
|
||||||
|
sizeofGroupReq = C.sizeof_struct_group_req
|
||||||
|
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
|
||||||
|
)
|
||||||
|
|
||||||
|
type sockaddrStorage C.struct_sockaddr_storage
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type ipMreq C.struct_ip_mreq
|
||||||
|
|
||||||
|
type ipMreqn C.struct_ip_mreqn
|
||||||
|
|
||||||
|
type ipMreqSource C.struct_ip_mreq_source
|
||||||
|
|
||||||
|
type groupReq C.struct_group_req
|
||||||
|
|
||||||
|
type groupSourceReq C.struct_group_source_req
|
||||||
122
vendor/golang.org/x/net/ipv4/defs_linux.go
generated
vendored
Normal file
122
vendor/golang.org/x/net/ipv4/defs_linux.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
|
||||||
|
package ipv4
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <time.h>
|
||||||
|
|
||||||
|
#include <linux/errqueue.h>
|
||||||
|
#include <linux/icmp.h>
|
||||||
|
#include <linux/in.h>
|
||||||
|
#include <linux/filter.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIP_TOS = C.IP_TOS
|
||||||
|
sysIP_TTL = C.IP_TTL
|
||||||
|
sysIP_HDRINCL = C.IP_HDRINCL
|
||||||
|
sysIP_OPTIONS = C.IP_OPTIONS
|
||||||
|
sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT
|
||||||
|
sysIP_RECVOPTS = C.IP_RECVOPTS
|
||||||
|
sysIP_RETOPTS = C.IP_RETOPTS
|
||||||
|
sysIP_PKTINFO = C.IP_PKTINFO
|
||||||
|
sysIP_PKTOPTIONS = C.IP_PKTOPTIONS
|
||||||
|
sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER
|
||||||
|
sysIP_RECVERR = C.IP_RECVERR
|
||||||
|
sysIP_RECVTTL = C.IP_RECVTTL
|
||||||
|
sysIP_RECVTOS = C.IP_RECVTOS
|
||||||
|
sysIP_MTU = C.IP_MTU
|
||||||
|
sysIP_FREEBIND = C.IP_FREEBIND
|
||||||
|
sysIP_TRANSPARENT = C.IP_TRANSPARENT
|
||||||
|
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
|
||||||
|
sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR
|
||||||
|
sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR
|
||||||
|
sysIP_MINTTL = C.IP_MINTTL
|
||||||
|
sysIP_NODEFRAG = C.IP_NODEFRAG
|
||||||
|
sysIP_UNICAST_IF = C.IP_UNICAST_IF
|
||||||
|
|
||||||
|
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
|
||||||
|
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
|
||||||
|
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
|
||||||
|
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
|
||||||
|
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
|
||||||
|
sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
|
||||||
|
sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
|
||||||
|
sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
|
||||||
|
sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
|
||||||
|
sysIP_MSFILTER = C.IP_MSFILTER
|
||||||
|
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
|
||||||
|
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
|
||||||
|
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
|
||||||
|
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
|
||||||
|
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
|
||||||
|
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
|
||||||
|
sysMCAST_MSFILTER = C.MCAST_MSFILTER
|
||||||
|
sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL
|
||||||
|
|
||||||
|
//sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT
|
||||||
|
//sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT
|
||||||
|
//sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO
|
||||||
|
//sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE
|
||||||
|
//sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE
|
||||||
|
//sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT
|
||||||
|
|
||||||
|
sysICMP_FILTER = C.ICMP_FILTER
|
||||||
|
|
||||||
|
sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE
|
||||||
|
sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL
|
||||||
|
sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP
|
||||||
|
sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6
|
||||||
|
sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS
|
||||||
|
sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING
|
||||||
|
|
||||||
|
sysSOL_SOCKET = C.SOL_SOCKET
|
||||||
|
sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER
|
||||||
|
|
||||||
|
sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofInetPktinfo = C.sizeof_struct_in_pktinfo
|
||||||
|
sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err
|
||||||
|
|
||||||
|
sizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||||
|
sizeofIPMreqn = C.sizeof_struct_ip_mreqn
|
||||||
|
sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
|
||||||
|
sizeofGroupReq = C.sizeof_struct_group_req
|
||||||
|
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
|
||||||
|
|
||||||
|
sizeofICMPFilter = C.sizeof_struct_icmp_filter
|
||||||
|
|
||||||
|
sizeofSockFprog = C.sizeof_struct_sock_fprog
|
||||||
|
)
|
||||||
|
|
||||||
|
type kernelSockaddrStorage C.struct___kernel_sockaddr_storage
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type inetPktinfo C.struct_in_pktinfo
|
||||||
|
|
||||||
|
type sockExtendedErr C.struct_sock_extended_err
|
||||||
|
|
||||||
|
type ipMreq C.struct_ip_mreq
|
||||||
|
|
||||||
|
type ipMreqn C.struct_ip_mreqn
|
||||||
|
|
||||||
|
type ipMreqSource C.struct_ip_mreq_source
|
||||||
|
|
||||||
|
type groupReq C.struct_group_req
|
||||||
|
|
||||||
|
type groupSourceReq C.struct_group_source_req
|
||||||
|
|
||||||
|
type icmpFilter C.struct_icmp_filter
|
||||||
|
|
||||||
|
type sockFProg C.struct_sock_fprog
|
||||||
|
|
||||||
|
type sockFilter C.struct_sock_filter
|
||||||
37
vendor/golang.org/x/net/ipv4/defs_netbsd.go
generated
vendored
Normal file
37
vendor/golang.org/x/net/ipv4/defs_netbsd.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
|
||||||
|
package ipv4
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIP_OPTIONS = C.IP_OPTIONS
|
||||||
|
sysIP_HDRINCL = C.IP_HDRINCL
|
||||||
|
sysIP_TOS = C.IP_TOS
|
||||||
|
sysIP_TTL = C.IP_TTL
|
||||||
|
sysIP_RECVOPTS = C.IP_RECVOPTS
|
||||||
|
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
|
||||||
|
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
|
||||||
|
sysIP_RETOPTS = C.IP_RETOPTS
|
||||||
|
sysIP_RECVIF = C.IP_RECVIF
|
||||||
|
sysIP_RECVTTL = C.IP_RECVTTL
|
||||||
|
|
||||||
|
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
|
||||||
|
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
|
||||||
|
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
|
||||||
|
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
|
||||||
|
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
|
||||||
|
|
||||||
|
sizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||||
|
)
|
||||||
|
|
||||||
|
type ipMreq C.struct_ip_mreq
|
||||||
37
vendor/golang.org/x/net/ipv4/defs_openbsd.go
generated
vendored
Normal file
37
vendor/golang.org/x/net/ipv4/defs_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
|
||||||
|
package ipv4
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIP_OPTIONS = C.IP_OPTIONS
|
||||||
|
sysIP_HDRINCL = C.IP_HDRINCL
|
||||||
|
sysIP_TOS = C.IP_TOS
|
||||||
|
sysIP_TTL = C.IP_TTL
|
||||||
|
sysIP_RECVOPTS = C.IP_RECVOPTS
|
||||||
|
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
|
||||||
|
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
|
||||||
|
sysIP_RETOPTS = C.IP_RETOPTS
|
||||||
|
sysIP_RECVIF = C.IP_RECVIF
|
||||||
|
sysIP_RECVTTL = C.IP_RECVTTL
|
||||||
|
|
||||||
|
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
|
||||||
|
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
|
||||||
|
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
|
||||||
|
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
|
||||||
|
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
|
||||||
|
|
||||||
|
sizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||||
|
)
|
||||||
|
|
||||||
|
type ipMreq C.struct_ip_mreq
|
||||||
84
vendor/golang.org/x/net/ipv4/defs_solaris.go
generated
vendored
Normal file
84
vendor/golang.org/x/net/ipv4/defs_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in_addr [4]byte /* in_addr */
|
||||||
|
|
||||||
|
package ipv4
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIP_OPTIONS = C.IP_OPTIONS
|
||||||
|
sysIP_HDRINCL = C.IP_HDRINCL
|
||||||
|
sysIP_TOS = C.IP_TOS
|
||||||
|
sysIP_TTL = C.IP_TTL
|
||||||
|
sysIP_RECVOPTS = C.IP_RECVOPTS
|
||||||
|
sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
|
||||||
|
sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
|
||||||
|
sysIP_RETOPTS = C.IP_RETOPTS
|
||||||
|
sysIP_RECVIF = C.IP_RECVIF
|
||||||
|
sysIP_RECVSLLA = C.IP_RECVSLLA
|
||||||
|
sysIP_RECVTTL = C.IP_RECVTTL
|
||||||
|
|
||||||
|
sysIP_MULTICAST_IF = C.IP_MULTICAST_IF
|
||||||
|
sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL
|
||||||
|
sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP
|
||||||
|
sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP
|
||||||
|
sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
|
||||||
|
sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE
|
||||||
|
sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE
|
||||||
|
sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP
|
||||||
|
sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
|
||||||
|
sysIP_NEXTHOP = C.IP_NEXTHOP
|
||||||
|
|
||||||
|
sysIP_PKTINFO = C.IP_PKTINFO
|
||||||
|
sysIP_RECVPKTINFO = C.IP_RECVPKTINFO
|
||||||
|
sysIP_DONTFRAG = C.IP_DONTFRAG
|
||||||
|
|
||||||
|
sysIP_BOUND_IF = C.IP_BOUND_IF
|
||||||
|
sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC
|
||||||
|
sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL
|
||||||
|
sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF
|
||||||
|
|
||||||
|
sysIP_REUSEADDR = C.IP_REUSEADDR
|
||||||
|
sysIP_DONTROUTE = C.IP_DONTROUTE
|
||||||
|
sysIP_BROADCAST = C.IP_BROADCAST
|
||||||
|
|
||||||
|
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
|
||||||
|
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
|
||||||
|
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
|
||||||
|
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
|
||||||
|
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
|
||||||
|
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
|
||||||
|
|
||||||
|
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
|
||||||
|
sizeofSockaddrInet = C.sizeof_struct_sockaddr_in
|
||||||
|
sizeofInetPktinfo = C.sizeof_struct_in_pktinfo
|
||||||
|
|
||||||
|
sizeofIPMreq = C.sizeof_struct_ip_mreq
|
||||||
|
sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source
|
||||||
|
sizeofGroupReq = C.sizeof_struct_group_req
|
||||||
|
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
|
||||||
|
)
|
||||||
|
|
||||||
|
type sockaddrStorage C.struct_sockaddr_storage
|
||||||
|
|
||||||
|
type sockaddrInet C.struct_sockaddr_in
|
||||||
|
|
||||||
|
type inetPktinfo C.struct_in_pktinfo
|
||||||
|
|
||||||
|
type ipMreq C.struct_ip_mreq
|
||||||
|
|
||||||
|
type ipMreqSource C.struct_ip_mreq_source
|
||||||
|
|
||||||
|
type groupReq C.struct_group_req
|
||||||
|
|
||||||
|
type groupSourceReq C.struct_group_source_req
|
||||||
199
vendor/golang.org/x/net/ipv4/gen.go
generated
vendored
Normal file
199
vendor/golang.org/x/net/ipv4/gen.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
//go:generate go run gen.go
|
||||||
|
|
||||||
|
// This program generates system adaptation constants and types,
|
||||||
|
// internet protocol constants and tables by reading template files
|
||||||
|
// and IANA protocol registries.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := genzsys(); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err := geniana(); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func genzsys() error {
|
||||||
|
defs := "defs_" + runtime.GOOS + ".go"
|
||||||
|
f, err := os.Open(defs)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
cmd := exec.Command("go", "tool", "cgo", "-godefs", defs)
|
||||||
|
b, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b, err = format.Source(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
zsys := "zsys_" + runtime.GOOS + ".go"
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "freebsd", "linux":
|
||||||
|
zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
|
||||||
|
}
|
||||||
|
if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var registries = []struct {
|
||||||
|
url string
|
||||||
|
parse func(io.Writer, io.Reader) error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml",
|
||||||
|
parseICMPv4Parameters,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func geniana() error {
|
||||||
|
var bb bytes.Buffer
|
||||||
|
fmt.Fprintf(&bb, "// go generate gen.go\n")
|
||||||
|
fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n")
|
||||||
|
fmt.Fprintf(&bb, "package ipv4\n\n")
|
||||||
|
for _, r := range registries {
|
||||||
|
resp, err := http.Get(r.url)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url)
|
||||||
|
}
|
||||||
|
if err := r.parse(&bb, resp.Body); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&bb, "\n")
|
||||||
|
}
|
||||||
|
b, err := format.Source(bb.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseICMPv4Parameters(w io.Writer, r io.Reader) error {
|
||||||
|
dec := xml.NewDecoder(r)
|
||||||
|
var icp icmpv4Parameters
|
||||||
|
if err := dec.Decode(&icp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
prs := icp.escape()
|
||||||
|
fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
|
||||||
|
fmt.Fprintf(w, "const (\n")
|
||||||
|
for _, pr := range prs {
|
||||||
|
if pr.Descr == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value)
|
||||||
|
fmt.Fprintf(w, "// %s\n", pr.OrigDescr)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, ")\n\n")
|
||||||
|
fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
|
||||||
|
fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n")
|
||||||
|
for _, pr := range prs {
|
||||||
|
if pr.Descr == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr))
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "}\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type icmpv4Parameters struct {
|
||||||
|
XMLName xml.Name `xml:"registry"`
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Updated string `xml:"updated"`
|
||||||
|
Registries []struct {
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Records []struct {
|
||||||
|
Value string `xml:"value"`
|
||||||
|
Descr string `xml:"description"`
|
||||||
|
} `xml:"record"`
|
||||||
|
} `xml:"registry"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type canonICMPv4ParamRecord struct {
|
||||||
|
OrigDescr string
|
||||||
|
Descr string
|
||||||
|
Value int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord {
|
||||||
|
id := -1
|
||||||
|
for i, r := range icp.Registries {
|
||||||
|
if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") {
|
||||||
|
id = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if id < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records))
|
||||||
|
sr := strings.NewReplacer(
|
||||||
|
"Messages", "",
|
||||||
|
"Message", "",
|
||||||
|
"ICMP", "",
|
||||||
|
"+", "P",
|
||||||
|
"-", "",
|
||||||
|
"/", "",
|
||||||
|
".", "",
|
||||||
|
" ", "",
|
||||||
|
)
|
||||||
|
for i, pr := range icp.Registries[id].Records {
|
||||||
|
if strings.Contains(pr.Descr, "Reserved") ||
|
||||||
|
strings.Contains(pr.Descr, "Unassigned") ||
|
||||||
|
strings.Contains(pr.Descr, "Deprecated") ||
|
||||||
|
strings.Contains(pr.Descr, "Experiment") ||
|
||||||
|
strings.Contains(pr.Descr, "experiment") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ss := strings.Split(pr.Descr, "\n")
|
||||||
|
if len(ss) > 1 {
|
||||||
|
prs[i].Descr = strings.Join(ss, " ")
|
||||||
|
} else {
|
||||||
|
prs[i].Descr = ss[0]
|
||||||
|
}
|
||||||
|
s := strings.TrimSpace(prs[i].Descr)
|
||||||
|
prs[i].OrigDescr = s
|
||||||
|
prs[i].Descr = sr.Replace(s)
|
||||||
|
prs[i].Value, _ = strconv.Atoi(pr.Value)
|
||||||
|
}
|
||||||
|
return prs
|
||||||
|
}
|
||||||
82
vendor/golang.org/x/net/ipv6/defs_aix.go
generated
vendored
Normal file
82
vendor/golang.org/x/net/ipv6/defs_aix.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package ipv6
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/param.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
#include <netinet/icmp6.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
|
||||||
|
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
|
||||||
|
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
|
||||||
|
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
|
||||||
|
sysICMP6_FILTER = C.ICMP6_FILTER
|
||||||
|
|
||||||
|
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
|
||||||
|
sysIPV6_V6ONLY = C.IPV6_V6ONLY
|
||||||
|
|
||||||
|
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
|
||||||
|
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
|
||||||
|
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
|
||||||
|
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
|
||||||
|
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
|
||||||
|
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
|
||||||
|
sysIPV6_PATHMTU = C.IPV6_PATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PKTINFO = C.IPV6_PKTINFO
|
||||||
|
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
|
||||||
|
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
|
||||||
|
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
|
||||||
|
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
|
||||||
|
sysIPV6_RTHDR = C.IPV6_RTHDR
|
||||||
|
|
||||||
|
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
|
||||||
|
|
||||||
|
sysIPV6_TCLASS = C.IPV6_TCLASS
|
||||||
|
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
|
||||||
|
|
||||||
|
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||||
|
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||||
|
sizeofGroupReq = C.sizeof_struct_group_req
|
||||||
|
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
|
||||||
|
|
||||||
|
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
type sockaddrStorage C.struct_sockaddr_storage
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
type inet6Pktinfo C.struct_in6_pktinfo
|
||||||
|
|
||||||
|
type ipv6Mtuinfo C.struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
type ipv6Mreq C.struct_ipv6_mreq
|
||||||
|
|
||||||
|
type icmpv6Filter C.struct_icmp6_filter
|
||||||
|
|
||||||
|
type groupReq C.struct_group_req
|
||||||
|
|
||||||
|
type groupSourceReq C.struct_group_source_req
|
||||||
112
vendor/golang.org/x/net/ipv6/defs_darwin.go
generated
vendored
Normal file
112
vendor/golang.org/x/net/ipv6/defs_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package ipv6
|
||||||
|
|
||||||
|
/*
|
||||||
|
#define __APPLE_USE_RFC_3542
|
||||||
|
#include <netinet/in.h>
|
||||||
|
#include <netinet/icmp6.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
|
||||||
|
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
|
||||||
|
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
|
||||||
|
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
|
||||||
|
|
||||||
|
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
|
||||||
|
sysICMP6_FILTER = C.ICMP6_FILTER
|
||||||
|
sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO
|
||||||
|
sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT
|
||||||
|
sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP
|
||||||
|
sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS
|
||||||
|
sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS
|
||||||
|
sysIPV6_2292RTHDR = C.IPV6_2292RTHDR
|
||||||
|
|
||||||
|
sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS
|
||||||
|
|
||||||
|
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
|
||||||
|
sysIPV6_V6ONLY = C.IPV6_V6ONLY
|
||||||
|
|
||||||
|
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
|
||||||
|
|
||||||
|
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
|
||||||
|
sysIPV6_TCLASS = C.IPV6_TCLASS
|
||||||
|
|
||||||
|
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
|
||||||
|
|
||||||
|
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
|
||||||
|
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
|
||||||
|
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
|
||||||
|
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
|
||||||
|
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PATHMTU = C.IPV6_PATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PKTINFO = C.IPV6_PKTINFO
|
||||||
|
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
|
||||||
|
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
|
||||||
|
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
|
||||||
|
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
|
||||||
|
sysIPV6_RTHDR = C.IPV6_RTHDR
|
||||||
|
|
||||||
|
sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
|
||||||
|
|
||||||
|
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
|
||||||
|
|
||||||
|
sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
|
||||||
|
|
||||||
|
sysIPV6_MSFILTER = C.IPV6_MSFILTER
|
||||||
|
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
|
||||||
|
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
|
||||||
|
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
|
||||||
|
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
|
||||||
|
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
|
||||||
|
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
|
||||||
|
|
||||||
|
sysIPV6_BOUND_IF = C.IPV6_BOUND_IF
|
||||||
|
|
||||||
|
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
|
||||||
|
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
|
||||||
|
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
|
||||||
|
|
||||||
|
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||||
|
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||||
|
sizeofGroupReq = C.sizeof_struct_group_req
|
||||||
|
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
|
||||||
|
|
||||||
|
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
type sockaddrStorage C.struct_sockaddr_storage
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
type inet6Pktinfo C.struct_in6_pktinfo
|
||||||
|
|
||||||
|
type ipv6Mtuinfo C.struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
type ipv6Mreq C.struct_ipv6_mreq
|
||||||
|
|
||||||
|
type icmpv6Filter C.struct_icmp6_filter
|
||||||
|
|
||||||
|
type groupReq C.struct_group_req
|
||||||
|
|
||||||
|
type groupSourceReq C.struct_group_source_req
|
||||||
84
vendor/golang.org/x/net/ipv6/defs_dragonfly.go
generated
vendored
Normal file
84
vendor/golang.org/x/net/ipv6/defs_dragonfly.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package ipv6
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/param.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
#include <netinet/icmp6.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
|
||||||
|
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
|
||||||
|
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
|
||||||
|
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
|
||||||
|
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
|
||||||
|
sysICMP6_FILTER = C.ICMP6_FILTER
|
||||||
|
|
||||||
|
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
|
||||||
|
sysIPV6_V6ONLY = C.IPV6_V6ONLY
|
||||||
|
|
||||||
|
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
|
||||||
|
|
||||||
|
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
|
||||||
|
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
|
||||||
|
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
|
||||||
|
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
|
||||||
|
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
|
||||||
|
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
|
||||||
|
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PATHMTU = C.IPV6_PATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PKTINFO = C.IPV6_PKTINFO
|
||||||
|
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
|
||||||
|
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
|
||||||
|
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
|
||||||
|
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
|
||||||
|
sysIPV6_RTHDR = C.IPV6_RTHDR
|
||||||
|
|
||||||
|
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
|
||||||
|
|
||||||
|
sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
|
||||||
|
|
||||||
|
sysIPV6_TCLASS = C.IPV6_TCLASS
|
||||||
|
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
|
||||||
|
|
||||||
|
sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
|
||||||
|
|
||||||
|
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
|
||||||
|
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
|
||||||
|
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
|
||||||
|
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||||
|
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||||
|
|
||||||
|
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
type inet6Pktinfo C.struct_in6_pktinfo
|
||||||
|
|
||||||
|
type ipv6Mtuinfo C.struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
type ipv6Mreq C.struct_ipv6_mreq
|
||||||
|
|
||||||
|
type icmpv6Filter C.struct_icmp6_filter
|
||||||
105
vendor/golang.org/x/net/ipv6/defs_freebsd.go
generated
vendored
Normal file
105
vendor/golang.org/x/net/ipv6/defs_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package ipv6
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/param.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
#include <netinet/icmp6.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
|
||||||
|
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
|
||||||
|
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
|
||||||
|
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
|
||||||
|
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
|
||||||
|
sysICMP6_FILTER = C.ICMP6_FILTER
|
||||||
|
|
||||||
|
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
|
||||||
|
sysIPV6_V6ONLY = C.IPV6_V6ONLY
|
||||||
|
|
||||||
|
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
|
||||||
|
|
||||||
|
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
|
||||||
|
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
|
||||||
|
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
|
||||||
|
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
|
||||||
|
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
|
||||||
|
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PATHMTU = C.IPV6_PATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PKTINFO = C.IPV6_PKTINFO
|
||||||
|
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
|
||||||
|
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
|
||||||
|
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
|
||||||
|
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
|
||||||
|
sysIPV6_RTHDR = C.IPV6_RTHDR
|
||||||
|
|
||||||
|
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
|
||||||
|
|
||||||
|
sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
|
||||||
|
|
||||||
|
sysIPV6_TCLASS = C.IPV6_TCLASS
|
||||||
|
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
|
||||||
|
|
||||||
|
sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
|
||||||
|
|
||||||
|
sysIPV6_BINDANY = C.IPV6_BINDANY
|
||||||
|
|
||||||
|
sysIPV6_MSFILTER = C.IPV6_MSFILTER
|
||||||
|
|
||||||
|
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
|
||||||
|
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
|
||||||
|
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
|
||||||
|
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
|
||||||
|
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
|
||||||
|
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
|
||||||
|
|
||||||
|
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
|
||||||
|
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
|
||||||
|
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
|
||||||
|
|
||||||
|
sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||||
|
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||||
|
sizeofGroupReq = C.sizeof_struct_group_req
|
||||||
|
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
|
||||||
|
|
||||||
|
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
type sockaddrStorage C.struct_sockaddr_storage
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
type inet6Pktinfo C.struct_in6_pktinfo
|
||||||
|
|
||||||
|
type ipv6Mtuinfo C.struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
type ipv6Mreq C.struct_ipv6_mreq
|
||||||
|
|
||||||
|
type groupReq C.struct_group_req
|
||||||
|
|
||||||
|
type groupSourceReq C.struct_group_source_req
|
||||||
|
|
||||||
|
type icmpv6Filter C.struct_icmp6_filter
|
||||||
147
vendor/golang.org/x/net/ipv6/defs_linux.go
generated
vendored
Normal file
147
vendor/golang.org/x/net/ipv6/defs_linux.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package ipv6
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <linux/in.h>
|
||||||
|
#include <linux/in6.h>
|
||||||
|
#include <linux/ipv6.h>
|
||||||
|
#include <linux/icmpv6.h>
|
||||||
|
#include <linux/filter.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIPV6_ADDRFORM = C.IPV6_ADDRFORM
|
||||||
|
sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO
|
||||||
|
sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS
|
||||||
|
sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS
|
||||||
|
sysIPV6_2292RTHDR = C.IPV6_2292RTHDR
|
||||||
|
sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS
|
||||||
|
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
|
||||||
|
sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT
|
||||||
|
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
|
||||||
|
sysIPV6_FLOWINFO = C.IPV6_FLOWINFO
|
||||||
|
|
||||||
|
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
|
||||||
|
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
|
||||||
|
sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP
|
||||||
|
sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP
|
||||||
|
sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP
|
||||||
|
sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP
|
||||||
|
sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP
|
||||||
|
sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
|
||||||
|
sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE
|
||||||
|
sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE
|
||||||
|
sysMCAST_MSFILTER = C.MCAST_MSFILTER
|
||||||
|
sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT
|
||||||
|
sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER
|
||||||
|
sysIPV6_MTU = C.IPV6_MTU
|
||||||
|
sysIPV6_RECVERR = C.IPV6_RECVERR
|
||||||
|
sysIPV6_V6ONLY = C.IPV6_V6ONLY
|
||||||
|
sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST
|
||||||
|
sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST
|
||||||
|
|
||||||
|
//sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT
|
||||||
|
//sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT
|
||||||
|
//sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO
|
||||||
|
//sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE
|
||||||
|
//sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE
|
||||||
|
//sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT
|
||||||
|
|
||||||
|
sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR
|
||||||
|
sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND
|
||||||
|
|
||||||
|
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
|
||||||
|
sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY
|
||||||
|
|
||||||
|
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
|
||||||
|
sysIPV6_PKTINFO = C.IPV6_PKTINFO
|
||||||
|
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
|
||||||
|
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
|
||||||
|
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
|
||||||
|
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
|
||||||
|
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
|
||||||
|
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
|
||||||
|
sysIPV6_RTHDR = C.IPV6_RTHDR
|
||||||
|
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
|
||||||
|
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
|
||||||
|
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
|
||||||
|
sysIPV6_PATHMTU = C.IPV6_PATHMTU
|
||||||
|
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
|
||||||
|
|
||||||
|
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
|
||||||
|
sysIPV6_TCLASS = C.IPV6_TCLASS
|
||||||
|
|
||||||
|
sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES
|
||||||
|
|
||||||
|
sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP
|
||||||
|
sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC
|
||||||
|
sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT
|
||||||
|
sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA
|
||||||
|
sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME
|
||||||
|
sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA
|
||||||
|
sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA
|
||||||
|
|
||||||
|
sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT
|
||||||
|
|
||||||
|
sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR
|
||||||
|
sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR
|
||||||
|
sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT
|
||||||
|
sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF
|
||||||
|
|
||||||
|
sysICMPV6_FILTER = C.ICMPV6_FILTER
|
||||||
|
|
||||||
|
sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK
|
||||||
|
sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS
|
||||||
|
sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS
|
||||||
|
sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY
|
||||||
|
|
||||||
|
sysSOL_SOCKET = C.SOL_SOCKET
|
||||||
|
sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER
|
||||||
|
|
||||||
|
sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||||
|
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
|
||||||
|
sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req
|
||||||
|
|
||||||
|
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||||
|
sizeofGroupReq = C.sizeof_struct_group_req
|
||||||
|
sizeofGroupSourceReq = C.sizeof_struct_group_source_req
|
||||||
|
|
||||||
|
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||||
|
|
||||||
|
sizeofSockFprog = C.sizeof_struct_sock_fprog
|
||||||
|
)
|
||||||
|
|
||||||
|
type kernelSockaddrStorage C.struct___kernel_sockaddr_storage
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
type inet6Pktinfo C.struct_in6_pktinfo
|
||||||
|
|
||||||
|
type ipv6Mtuinfo C.struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
type ipv6FlowlabelReq C.struct_in6_flowlabel_req
|
||||||
|
|
||||||
|
type ipv6Mreq C.struct_ipv6_mreq
|
||||||
|
|
||||||
|
type groupReq C.struct_group_req
|
||||||
|
|
||||||
|
type groupSourceReq C.struct_group_source_req
|
||||||
|
|
||||||
|
type icmpv6Filter C.struct_icmp6_filter
|
||||||
|
|
||||||
|
type sockFProg C.struct_sock_fprog
|
||||||
|
|
||||||
|
type sockFilter C.struct_sock_filter
|
||||||
80
vendor/golang.org/x/net/ipv6/defs_netbsd.go
generated
vendored
Normal file
80
vendor/golang.org/x/net/ipv6/defs_netbsd.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package ipv6
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/param.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
#include <netinet/icmp6.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
|
||||||
|
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
|
||||||
|
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
|
||||||
|
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
|
||||||
|
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
|
||||||
|
sysICMP6_FILTER = C.ICMP6_FILTER
|
||||||
|
|
||||||
|
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
|
||||||
|
sysIPV6_V6ONLY = C.IPV6_V6ONLY
|
||||||
|
|
||||||
|
sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
|
||||||
|
|
||||||
|
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
|
||||||
|
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
|
||||||
|
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
|
||||||
|
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
|
||||||
|
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
|
||||||
|
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
|
||||||
|
sysIPV6_PATHMTU = C.IPV6_PATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PKTINFO = C.IPV6_PKTINFO
|
||||||
|
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
|
||||||
|
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
|
||||||
|
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
|
||||||
|
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
|
||||||
|
sysIPV6_RTHDR = C.IPV6_RTHDR
|
||||||
|
|
||||||
|
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
|
||||||
|
|
||||||
|
sysIPV6_TCLASS = C.IPV6_TCLASS
|
||||||
|
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
|
||||||
|
|
||||||
|
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
|
||||||
|
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
|
||||||
|
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
|
||||||
|
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||||
|
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||||
|
|
||||||
|
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
type inet6Pktinfo C.struct_in6_pktinfo
|
||||||
|
|
||||||
|
type ipv6Mtuinfo C.struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
type ipv6Mreq C.struct_ipv6_mreq
|
||||||
|
|
||||||
|
type icmpv6Filter C.struct_icmp6_filter
|
||||||
89
vendor/golang.org/x/net/ipv6/defs_openbsd.go
generated
vendored
Normal file
89
vendor/golang.org/x/net/ipv6/defs_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// +godefs map struct_in6_addr [16]byte /* in6_addr */
|
||||||
|
|
||||||
|
package ipv6
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <sys/param.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
|
||||||
|
#include <netinet/in.h>
|
||||||
|
#include <netinet/icmp6.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF
|
||||||
|
sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
|
||||||
|
sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
|
||||||
|
sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP
|
||||||
|
sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP
|
||||||
|
sysIPV6_PORTRANGE = C.IPV6_PORTRANGE
|
||||||
|
sysICMP6_FILTER = C.ICMP6_FILTER
|
||||||
|
|
||||||
|
sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
|
||||||
|
sysIPV6_V6ONLY = C.IPV6_V6ONLY
|
||||||
|
|
||||||
|
sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
|
||||||
|
sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
|
||||||
|
sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
|
||||||
|
sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS
|
||||||
|
sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
|
||||||
|
|
||||||
|
sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
|
||||||
|
sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PATHMTU = C.IPV6_PATHMTU
|
||||||
|
|
||||||
|
sysIPV6_PKTINFO = C.IPV6_PKTINFO
|
||||||
|
sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
|
||||||
|
sysIPV6_NEXTHOP = C.IPV6_NEXTHOP
|
||||||
|
sysIPV6_HOPOPTS = C.IPV6_HOPOPTS
|
||||||
|
sysIPV6_DSTOPTS = C.IPV6_DSTOPTS
|
||||||
|
sysIPV6_RTHDR = C.IPV6_RTHDR
|
||||||
|
|
||||||
|
sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL
|
||||||
|
sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL
|
||||||
|
sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL
|
||||||
|
sysIPSEC6_OUTSA = C.IPSEC6_OUTSA
|
||||||
|
sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
|
||||||
|
|
||||||
|
sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
|
||||||
|
sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL
|
||||||
|
|
||||||
|
sysIPV6_TCLASS = C.IPV6_TCLASS
|
||||||
|
sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
|
||||||
|
sysIPV6_PIPEX = C.IPV6_PIPEX
|
||||||
|
|
||||||
|
sysIPV6_RTABLE = C.IPV6_RTABLE
|
||||||
|
|
||||||
|
sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
|
||||||
|
sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH
|
||||||
|
sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW
|
||||||
|
|
||||||
|
sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
|
||||||
|
sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
|
||||||
|
sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
|
||||||
|
|
||||||
|
sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
type sockaddrInet6 C.struct_sockaddr_in6
|
||||||
|
|
||||||
|
type inet6Pktinfo C.struct_in6_pktinfo
|
||||||
|
|
||||||
|
type ipv6Mtuinfo C.struct_ip6_mtuinfo
|
||||||
|
|
||||||
|
type ipv6Mreq C.struct_ipv6_mreq
|
||||||
|
|
||||||
|
type icmpv6Filter C.struct_icmp6_filter
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user