intergrate opensearch v1 (#5135)

opensearchv1

Signed-off-by: chengdehao <dehaocheng@kubesphere.io>

Signed-off-by: chengdehao <dehaocheng@kubesphere.io>
Co-authored-by: chengdehao <dehaocheng@kubesphere.io>
This commit is contained in:
Elon Cheng
2022-08-12 06:58:33 -05:00
committed by GitHub
parent 2a31867df1
commit f741bc7943
186 changed files with 47471 additions and 4 deletions

View File

@@ -0,0 +1,338 @@
// SPDX-License-Identifier: Apache-2.0
//
// The OpenSearch Contributors require contributions made to
// this file be licensed under the Apache-2.0 license or a
// compatible open source license.
//
// Modifications Copyright OpenSearch Contributors. See
// GitHub history for details.
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package opensearchtransport
import (
"errors"
"fmt"
"math"
"net/url"
"sort"
"sync"
"time"
)
var (
defaultResurrectTimeoutInitial = 60 * time.Second
defaultResurrectTimeoutFactorCutoff = 5
)
// Selector defines the interface for selecting connections from the pool.
//
type Selector interface {
Select([]*Connection) (*Connection, error)
}
// ConnectionPool defines the interface for the connection pool.
//
type ConnectionPool interface {
Next() (*Connection, error) // Next returns the next available connection.
OnSuccess(*Connection) error // OnSuccess reports that the connection was successful.
OnFailure(*Connection) error // OnFailure reports that the connection failed.
URLs() []*url.URL // URLs returns the list of URLs of available connections.
}
// Connection represents a connection to a node.
//
type Connection struct {
sync.Mutex
URL *url.URL
IsDead bool
DeadSince time.Time
Failures int
ID string
Name string
Roles []string
Attributes map[string]interface{}
}
type singleConnectionPool struct {
connection *Connection
metrics *metrics
}
type statusConnectionPool struct {
sync.Mutex
live []*Connection // List of live connections
dead []*Connection // List of dead connections
selector Selector
metrics *metrics
}
type roundRobinSelector struct {
sync.Mutex
curr int // Index of the current connection
}
// NewConnectionPool creates and returns a default connection pool.
//
func NewConnectionPool(conns []*Connection, selector Selector) (ConnectionPool, error) {
if len(conns) == 1 {
return &singleConnectionPool{connection: conns[0]}, nil
}
if selector == nil {
selector = &roundRobinSelector{curr: -1}
}
return &statusConnectionPool{live: conns, selector: selector}, nil
}
// Next returns the connection from pool.
//
func (cp *singleConnectionPool) Next() (*Connection, error) {
return cp.connection, nil
}
// OnSuccess is a no-op for single connection pool.
func (cp *singleConnectionPool) OnSuccess(c *Connection) error { return nil }
// OnFailure is a no-op for single connection pool.
func (cp *singleConnectionPool) OnFailure(c *Connection) error { return nil }
// URLs returns the list of URLs of available connections.
func (cp *singleConnectionPool) URLs() []*url.URL { return []*url.URL{cp.connection.URL} }
func (cp *singleConnectionPool) connections() []*Connection { return []*Connection{cp.connection} }
// Next returns a connection from pool, or an error.
//
func (cp *statusConnectionPool) Next() (*Connection, error) {
cp.Lock()
defer cp.Unlock()
// Return next live connection
if len(cp.live) > 0 {
return cp.selector.Select(cp.live)
} else if len(cp.dead) > 0 {
// No live connection is available, resurrect one of the dead ones.
c := cp.dead[len(cp.dead)-1]
cp.dead = cp.dead[:len(cp.dead)-1]
c.Lock()
defer c.Unlock()
cp.resurrect(c, false)
return c, nil
}
return nil, errors.New("no connection available")
}
// OnSuccess marks the connection as successful.
//
func (cp *statusConnectionPool) OnSuccess(c *Connection) error {
c.Lock()
defer c.Unlock()
// Short-circuit for live connection
if !c.IsDead {
return nil
}
c.markAsHealthy()
cp.Lock()
defer cp.Unlock()
return cp.resurrect(c, true)
}
// OnFailure marks the connection as failed.
//
func (cp *statusConnectionPool) OnFailure(c *Connection) error {
cp.Lock()
defer cp.Unlock()
c.Lock()
if c.IsDead {
if debugLogger != nil {
debugLogger.Logf("Already removed %s\n", c.URL)
}
c.Unlock()
return nil
}
if debugLogger != nil {
debugLogger.Logf("Removing %s...\n", c.URL)
}
c.markAsDead()
cp.scheduleResurrect(c)
c.Unlock()
// Push item to dead list and sort slice by number of failures
cp.dead = append(cp.dead, c)
sort.Slice(cp.dead, func(i, j int) bool {
c1 := cp.dead[i]
c2 := cp.dead[j]
c1.Lock()
c2.Lock()
defer c1.Unlock()
defer c2.Unlock()
res := c1.Failures > c2.Failures
return res
})
// Check if connection exists in the list, return error if not.
index := -1
for i, conn := range cp.live {
if conn == c {
index = i
}
}
if index < 0 {
return errors.New("connection not in live list")
}
// Remove item; https://github.com/golang/go/wiki/SliceTricks
copy(cp.live[index:], cp.live[index+1:])
cp.live = cp.live[:len(cp.live)-1]
return nil
}
// URLs returns the list of URLs of available connections.
//
func (cp *statusConnectionPool) URLs() []*url.URL {
var urls []*url.URL
cp.Lock()
defer cp.Unlock()
for _, c := range cp.live {
urls = append(urls, c.URL)
}
return urls
}
func (cp *statusConnectionPool) connections() []*Connection {
var conns []*Connection
conns = append(conns, cp.live...)
conns = append(conns, cp.dead...)
return conns
}
// resurrect adds the connection to the list of available connections.
// When removeDead is true, it also removes it from the dead list.
// The calling code is responsible for locking.
//
func (cp *statusConnectionPool) resurrect(c *Connection, removeDead bool) error {
if debugLogger != nil {
debugLogger.Logf("Resurrecting %s\n", c.URL)
}
c.markAsLive()
cp.live = append(cp.live, c)
if removeDead {
index := -1
for i, conn := range cp.dead {
if conn == c {
index = i
}
}
if index >= 0 {
// Remove item; https://github.com/golang/go/wiki/SliceTricks
copy(cp.dead[index:], cp.dead[index+1:])
cp.dead = cp.dead[:len(cp.dead)-1]
}
}
return nil
}
// scheduleResurrect schedules the connection to be resurrected.
//
func (cp *statusConnectionPool) scheduleResurrect(c *Connection) {
factor := math.Min(float64(c.Failures-1), float64(defaultResurrectTimeoutFactorCutoff))
timeout := time.Duration(defaultResurrectTimeoutInitial.Seconds() * math.Exp2(factor) * float64(time.Second))
if debugLogger != nil {
debugLogger.Logf("Resurrect %s (failures=%d, factor=%1.1f, timeout=%s) in %s\n", c.URL, c.Failures, factor, timeout, c.DeadSince.Add(timeout).Sub(time.Now().UTC()).Truncate(time.Second))
}
time.AfterFunc(timeout, func() {
cp.Lock()
defer cp.Unlock()
c.Lock()
defer c.Unlock()
if !c.IsDead {
if debugLogger != nil {
debugLogger.Logf("Already resurrected %s\n", c.URL)
}
return
}
cp.resurrect(c, true)
})
}
// Select returns the connection in a round-robin fashion.
//
func (s *roundRobinSelector) Select(conns []*Connection) (*Connection, error) {
s.Lock()
defer s.Unlock()
s.curr = (s.curr + 1) % len(conns)
return conns[s.curr], nil
}
// markAsDead marks the connection as dead.
//
func (c *Connection) markAsDead() {
c.IsDead = true
if c.DeadSince.IsZero() {
c.DeadSince = time.Now().UTC()
}
c.Failures++
}
// markAsLive marks the connection as alive.
//
func (c *Connection) markAsLive() {
c.IsDead = false
}
// markAsHealthy marks the connection as healthy.
//
func (c *Connection) markAsHealthy() {
c.IsDead = false
c.DeadSince = time.Time{}
c.Failures = 0
}
// String returns a readable connection representation.
//
func (c *Connection) String() string {
c.Lock()
defer c.Unlock()
return fmt.Sprintf("<%s> dead=%v failures=%d", c.URL, c.IsDead, c.Failures)
}

View File

@@ -0,0 +1,221 @@
// SPDX-License-Identifier: Apache-2.0
//
// The OpenSearch Contributors require contributions made to
// this file be licensed under the Apache-2.0 license or a
// compatible open source license.
//
// Modifications Copyright OpenSearch Contributors. See
// GitHub history for details.
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package opensearchtransport
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"time"
)
// Discoverable defines the interface for transports supporting node discovery.
//
type Discoverable interface {
DiscoverNodes() error
}
// nodeInfo represents the information about node in a cluster.
//
type nodeInfo struct {
ID string
Name string
URL *url.URL
Roles []string `json:"roles"`
Attributes map[string]interface{}
HTTP struct {
PublishAddress string `json:"publish_address"`
}
}
// DiscoverNodes reloads the client connections by fetching information from the cluster.
//
func (c *Client) DiscoverNodes() error {
var conns []*Connection
nodes, err := c.getNodesInfo()
if err != nil {
if debugLogger != nil {
debugLogger.Logf("Error getting nodes info: %s\n", err)
}
return fmt.Errorf("discovery: get nodes: %s", err)
}
for _, node := range nodes {
var (
isMasterOnlyNode bool
)
roles := append(node.Roles[:0:0], node.Roles...)
sort.Strings(roles)
if len(roles) == 1 && roles[0] == "master" {
isMasterOnlyNode = true
}
if debugLogger != nil {
var skip string
if isMasterOnlyNode {
skip = "; [SKIP]"
}
debugLogger.Logf("Discovered node [%s]; %s; roles=%s%s\n", node.Name, node.URL, node.Roles, skip)
}
// Skip master only nodes
// TODO: Move logic to Selector?
if isMasterOnlyNode {
continue
}
conns = append(conns, &Connection{
URL: node.URL,
ID: node.ID,
Name: node.Name,
Roles: node.Roles,
Attributes: node.Attributes,
})
}
c.Lock()
defer c.Unlock()
if lockable, ok := c.pool.(sync.Locker); ok {
lockable.Lock()
defer lockable.Unlock()
}
if c.poolFunc != nil {
c.pool = c.poolFunc(conns, c.selector)
} else {
// TODO: Replace only live connections, leave dead scheduled for resurrect?
c.pool, err = NewConnectionPool(conns, c.selector)
if err != nil {
return err
}
}
return nil
}
func (c *Client) getNodesInfo() ([]nodeInfo, error) {
var (
out []nodeInfo
scheme = c.urls[0].Scheme
)
req, err := http.NewRequest("GET", "/_nodes/http", nil)
if err != nil {
return out, err
}
c.Lock()
conn, err := c.pool.Next()
c.Unlock()
// TODO: If no connection is returned, fallback to original URLs
if err != nil {
return out, err
}
c.setReqURL(conn.URL, req)
c.setReqAuth(conn.URL, req)
c.setReqUserAgent(req)
res, err := c.transport.RoundTrip(req)
if err != nil {
return out, err
}
defer res.Body.Close()
if res.StatusCode > 200 {
body, _ := ioutil.ReadAll(res.Body)
return out, fmt.Errorf("server error: %s: %s", res.Status, body)
}
var env map[string]json.RawMessage
if err := json.NewDecoder(res.Body).Decode(&env); err != nil {
return out, err
}
var nodes map[string]nodeInfo
if err := json.Unmarshal(env["nodes"], &nodes); err != nil {
return out, err
}
for id, node := range nodes {
node.ID = id
u, err := c.getNodeURL(node, scheme)
if err != nil {
return out, err
}
node.URL = u
out = append(out, node)
}
return out, nil
}
func (c *Client) getNodeURL(node nodeInfo, scheme string) (*url.URL, error) {
var (
host string
port string
addrs = strings.Split(node.HTTP.PublishAddress, "/")
ports = strings.Split(node.HTTP.PublishAddress, ":")
)
if len(addrs) > 1 {
host = addrs[0]
} else {
host = strings.Split(addrs[0], ":")[0]
}
port = ports[len(ports)-1]
u := &url.URL{
Scheme: scheme,
Host: host + ":" + port,
}
return u, nil
}
func (c *Client) scheduleDiscoverNodes(d time.Duration) {
go c.DiscoverNodes()
c.Lock()
defer c.Unlock()
if c.discoverNodesTimer != nil {
c.discoverNodesTimer.Stop()
}
c.discoverNodesTimer = time.AfterFunc(c.discoverNodesInterval, func() {
c.scheduleDiscoverNodes(c.discoverNodesInterval)
})
}

View File

@@ -0,0 +1,59 @@
// SPDX-License-Identifier: Apache-2.0
//
// The OpenSearch Contributors require contributions made to
// this file be licensed under the Apache-2.0 license or a
// compatible open source license.
//
// Modifications Copyright OpenSearch Contributors. See
// GitHub history for details.
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/*
Package opensearchtransport provides the transport layer for the OpenSearch client.
It is automatically included in the client provided by the github.com/opensearch-project/opensearch-go package
and is not intended for direct use: to configure the client, use the opensearch.Config struct.
The default HTTP transport of the client is http.Transport; use the Transport option to customize it;
The package will automatically retry requests on network-related errors, and on specific
response status codes (by default 502, 503, 504). Use the RetryOnStatus option to customize the list.
The transport will not retry a timeout network error, unless enabled by setting EnableRetryOnTimeout to true.
Use the MaxRetries option to configure the number of retries, and set DisableRetry to true
to disable the retry behaviour altogether.
By default, the retry will be performed without any delay; to configure a backoff interval,
implement the RetryBackoff option function; see an example in the package unit tests for information.
When multiple addresses are passed in configuration, the package will use them in a round-robin fashion,
and will keep track of live and dead nodes. The status of dead nodes is checked periodically.
To customize the node selection behaviour, provide a Selector implementation in the configuration.
To replace the connection pool entirely, provide a custom ConnectionPool implementation via
the ConnectionPoolFunc option.
The package defines the Logger interface for logging information about request and response.
It comes with several bundled loggers for logging in text and JSON.
Use the EnableDebugLogger option to enable the debugging logger for connection management.
Use the EnableMetrics option to enable metric collection and export.
*/
package opensearchtransport

View File

@@ -0,0 +1,461 @@
// SPDX-License-Identifier: Apache-2.0
//
// The OpenSearch Contributors require contributions made to
// this file be licensed under the Apache-2.0 license or a
// compatible open source license.
//
// Modifications Copyright OpenSearch Contributors. See
// GitHub history for details.
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package opensearchtransport
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
var debugLogger DebuggingLogger
// Logger defines an interface for logging request and response.
//
type Logger interface {
// LogRoundTrip should not modify the request or response, except for consuming and closing the body.
// Implementations have to check for nil values in request and response.
LogRoundTrip(*http.Request, *http.Response, error, time.Time, time.Duration) error
// RequestBodyEnabled makes the client pass a copy of request body to the logger.
RequestBodyEnabled() bool
// ResponseBodyEnabled makes the client pass a copy of response body to the logger.
ResponseBodyEnabled() bool
}
// DebuggingLogger defines the interface for a debugging logger.
//
type DebuggingLogger interface {
Log(a ...interface{}) error
Logf(format string, a ...interface{}) error
}
// TextLogger prints the log message in plain text.
//
type TextLogger struct {
Output io.Writer
EnableRequestBody bool
EnableResponseBody bool
}
// ColorLogger prints the log message in a terminal-optimized plain text.
//
type ColorLogger struct {
Output io.Writer
EnableRequestBody bool
EnableResponseBody bool
}
// CurlLogger prints the log message as a runnable curl command.
//
type CurlLogger struct {
Output io.Writer
EnableRequestBody bool
EnableResponseBody bool
}
// JSONLogger prints the log message as JSON.
//
type JSONLogger struct {
Output io.Writer
EnableRequestBody bool
EnableResponseBody bool
}
// debuggingLogger prints debug messages as plain text.
//
type debuggingLogger struct {
Output io.Writer
}
// LogRoundTrip prints the information about request and response.
//
func (l *TextLogger) LogRoundTrip(req *http.Request, res *http.Response, err error, start time.Time, dur time.Duration) error {
fmt.Fprintf(l.Output, "%s %s %s [status:%d request:%s]\n",
start.Format(time.RFC3339),
req.Method,
req.URL.String(),
resStatusCode(res),
dur.Truncate(time.Millisecond),
)
if l.RequestBodyEnabled() && req != nil && req.Body != nil && req.Body != http.NoBody {
var buf bytes.Buffer
if req.GetBody != nil {
b, _ := req.GetBody()
buf.ReadFrom(b)
} else {
buf.ReadFrom(req.Body)
}
logBodyAsText(l.Output, &buf, ">")
}
if l.ResponseBodyEnabled() && res != nil && res.Body != nil && res.Body != http.NoBody {
defer res.Body.Close()
var buf bytes.Buffer
buf.ReadFrom(res.Body)
logBodyAsText(l.Output, &buf, "<")
}
if err != nil {
fmt.Fprintf(l.Output, "! ERROR: %v\n", err)
}
return nil
}
// RequestBodyEnabled returns true when the request body should be logged.
func (l *TextLogger) RequestBodyEnabled() bool { return l.EnableRequestBody }
// ResponseBodyEnabled returns true when the response body should be logged.
func (l *TextLogger) ResponseBodyEnabled() bool { return l.EnableResponseBody }
// LogRoundTrip prints the information about request and response.
//
func (l *ColorLogger) LogRoundTrip(req *http.Request, res *http.Response, err error, start time.Time, dur time.Duration) error {
query, _ := url.QueryUnescape(req.URL.RawQuery)
if query != "" {
query = "?" + query
}
var (
status string
color string
)
status = res.Status
switch {
case res.StatusCode > 0 && res.StatusCode < 300:
color = "\x1b[32m"
case res.StatusCode > 299 && res.StatusCode < 500:
color = "\x1b[33m"
case res.StatusCode > 499:
color = "\x1b[31m"
default:
status = "ERROR"
color = "\x1b[31;4m"
}
fmt.Fprintf(l.Output, "%6s \x1b[1;4m%s://%s%s\x1b[0m%s %s%s\x1b[0m \x1b[2m%s\x1b[0m\n",
req.Method,
req.URL.Scheme,
req.URL.Host,
req.URL.Path,
query,
color,
status,
dur.Truncate(time.Millisecond),
)
if l.RequestBodyEnabled() && req != nil && req.Body != nil && req.Body != http.NoBody {
var buf bytes.Buffer
if req.GetBody != nil {
b, _ := req.GetBody()
buf.ReadFrom(b)
} else {
buf.ReadFrom(req.Body)
}
fmt.Fprint(l.Output, "\x1b[2m")
logBodyAsText(l.Output, &buf, " »")
fmt.Fprint(l.Output, "\x1b[0m")
}
if l.ResponseBodyEnabled() && res != nil && res.Body != nil && res.Body != http.NoBody {
defer res.Body.Close()
var buf bytes.Buffer
buf.ReadFrom(res.Body)
fmt.Fprint(l.Output, "\x1b[2m")
logBodyAsText(l.Output, &buf, " «")
fmt.Fprint(l.Output, "\x1b[0m")
}
if err != nil {
fmt.Fprintf(l.Output, "\x1b[31;1m» ERROR \x1b[31m%v\x1b[0m\n", err)
}
if l.RequestBodyEnabled() || l.ResponseBodyEnabled() {
fmt.Fprintf(l.Output, "\x1b[2m%s\x1b[0m\n", strings.Repeat("─", 80))
}
return nil
}
// RequestBodyEnabled returns true when the request body should be logged.
func (l *ColorLogger) RequestBodyEnabled() bool { return l.EnableRequestBody }
// ResponseBodyEnabled returns true when the response body should be logged.
func (l *ColorLogger) ResponseBodyEnabled() bool { return l.EnableResponseBody }
// LogRoundTrip prints the information about request and response.
//
func (l *CurlLogger) LogRoundTrip(req *http.Request, res *http.Response, err error, start time.Time, dur time.Duration) error {
var b bytes.Buffer
var query string
qvalues := url.Values{}
for k, v := range req.URL.Query() {
if k == "pretty" {
continue
}
for _, qv := range v {
qvalues.Add(k, qv)
}
}
if len(qvalues) > 0 {
query = qvalues.Encode()
}
b.WriteString(`curl`)
if req.Method == "HEAD" {
b.WriteString(" --head")
} else {
fmt.Fprintf(&b, " -X %s", req.Method)
}
if len(req.Header) > 0 {
for k, vv := range req.Header {
if k == "Authorization" || k == "User-Agent" {
continue
}
v := strings.Join(vv, ",")
b.WriteString(fmt.Sprintf(" -H '%s: %s'", k, v))
}
}
b.WriteString(" 'http://localhost:9200")
b.WriteString(req.URL.Path)
b.WriteString("?pretty")
if query != "" {
fmt.Fprintf(&b, "&%s", query)
}
b.WriteString("'")
if req != nil && req.Body != nil && req.Body != http.NoBody {
var buf bytes.Buffer
if req.GetBody != nil {
b, _ := req.GetBody()
buf.ReadFrom(b)
} else {
buf.ReadFrom(req.Body)
}
b.Grow(buf.Len())
b.WriteString(" -d \\\n'")
json.Indent(&b, buf.Bytes(), "", " ")
b.WriteString("'")
}
b.WriteRune('\n')
var status string
status = res.Status
fmt.Fprintf(&b, "# => %s [%s] %s\n", start.UTC().Format(time.RFC3339), status, dur.Truncate(time.Millisecond))
if l.ResponseBodyEnabled() && res != nil && res.Body != nil && res.Body != http.NoBody {
var buf bytes.Buffer
buf.ReadFrom(res.Body)
b.Grow(buf.Len())
b.WriteString("# ")
json.Indent(&b, buf.Bytes(), "# ", " ")
}
b.WriteString("\n")
if l.ResponseBodyEnabled() && res != nil && res.Body != nil && res.Body != http.NoBody {
b.WriteString("\n")
}
b.WriteTo(l.Output)
return nil
}
// RequestBodyEnabled returns true when the request body should be logged.
func (l *CurlLogger) RequestBodyEnabled() bool { return l.EnableRequestBody }
// ResponseBodyEnabled returns true when the response body should be logged.
func (l *CurlLogger) ResponseBodyEnabled() bool { return l.EnableResponseBody }
// LogRoundTrip prints the information about request and response.
//
func (l *JSONLogger) LogRoundTrip(req *http.Request, res *http.Response, err error, start time.Time, dur time.Duration) error {
// TODO: Research performance optimization of using sync.Pool
bsize := 200
var b = bytes.NewBuffer(make([]byte, 0, bsize))
var v = make([]byte, 0, bsize)
appendTime := func(t time.Time) {
v = v[:0]
v = t.AppendFormat(v, time.RFC3339)
b.Write(v)
}
appendQuote := func(s string) {
v = v[:0]
v = strconv.AppendQuote(v, s)
b.Write(v)
}
appendInt := func(i int64) {
v = v[:0]
v = strconv.AppendInt(v, i, 10)
b.Write(v)
}
port := req.URL.Port()
b.WriteRune('{')
// -- Timestamp
b.WriteString(`"@timestamp":"`)
appendTime(start.UTC())
b.WriteRune('"')
// -- Event
b.WriteString(`,"event":{`)
b.WriteString(`"duration":`)
appendInt(dur.Nanoseconds())
b.WriteRune('}')
// -- URL
b.WriteString(`,"url":{`)
b.WriteString(`"scheme":`)
appendQuote(req.URL.Scheme)
b.WriteString(`,"domain":`)
appendQuote(req.URL.Hostname())
if port != "" {
b.WriteString(`,"port":`)
b.WriteString(port)
}
b.WriteString(`,"path":`)
appendQuote(req.URL.Path)
b.WriteString(`,"query":`)
appendQuote(req.URL.RawQuery)
b.WriteRune('}') // Close "url"
// -- HTTP
b.WriteString(`,"http":`)
// ---- Request
b.WriteString(`{"request":{`)
b.WriteString(`"method":`)
appendQuote(req.Method)
if l.RequestBodyEnabled() && req != nil && req.Body != nil && req.Body != http.NoBody {
var buf bytes.Buffer
if req.GetBody != nil {
b, _ := req.GetBody()
buf.ReadFrom(b)
} else {
buf.ReadFrom(req.Body)
}
b.Grow(buf.Len() + 8)
b.WriteString(`,"body":`)
appendQuote(buf.String())
}
b.WriteRune('}') // Close "http.request"
// ---- Response
b.WriteString(`,"response":{`)
b.WriteString(`"status_code":`)
appendInt(int64(resStatusCode(res)))
if l.ResponseBodyEnabled() && res != nil && res.Body != nil && res.Body != http.NoBody {
defer res.Body.Close()
var buf bytes.Buffer
buf.ReadFrom(res.Body)
b.Grow(buf.Len() + 8)
b.WriteString(`,"body":`)
appendQuote(buf.String())
}
b.WriteRune('}') // Close "http.response"
b.WriteRune('}') // Close "http"
// -- Error
if err != nil {
b.WriteString(`,"error":{"message":`)
appendQuote(err.Error())
b.WriteRune('}') // Close "error"
}
b.WriteRune('}')
b.WriteRune('\n')
b.WriteTo(l.Output)
return nil
}
// RequestBodyEnabled returns true when the request body should be logged.
func (l *JSONLogger) RequestBodyEnabled() bool { return l.EnableRequestBody }
// ResponseBodyEnabled returns true when the response body should be logged.
func (l *JSONLogger) ResponseBodyEnabled() bool { return l.EnableResponseBody }
// Log prints the arguments to output in default format.
//
func (l *debuggingLogger) Log(a ...interface{}) error {
_, err := fmt.Fprint(l.Output, a...)
return err
}
// Logf prints formats the arguments and prints them to output.
//
func (l *debuggingLogger) Logf(format string, a ...interface{}) error {
_, err := fmt.Fprintf(l.Output, format, a...)
return err
}
func logBodyAsText(dst io.Writer, body io.Reader, prefix string) {
scanner := bufio.NewScanner(body)
for scanner.Scan() {
s := scanner.Text()
if s != "" {
fmt.Fprintf(dst, "%s %s\n", prefix, s)
}
}
}
func duplicateBody(body io.ReadCloser) (io.ReadCloser, io.ReadCloser, error) {
var (
b1 bytes.Buffer
b2 bytes.Buffer
tr = io.TeeReader(body, &b2)
)
_, err := b1.ReadFrom(tr)
if err != nil {
return ioutil.NopCloser(io.MultiReader(&b1, errorReader{err: err})), ioutil.NopCloser(io.MultiReader(&b2, errorReader{err: err})), err
}
defer func() { body.Close() }()
return ioutil.NopCloser(&b1), ioutil.NopCloser(&b2), nil
}
func resStatusCode(res *http.Response) int {
if res == nil {
return -1
}
return res.StatusCode
}
type errorReader struct{ err error }
func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }

View File

@@ -0,0 +1,203 @@
// SPDX-License-Identifier: Apache-2.0
//
// The OpenSearch Contributors require contributions made to
// this file be licensed under the Apache-2.0 license or a
// compatible open source license.
//
// Modifications Copyright OpenSearch Contributors. See
// GitHub history for details.
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package opensearchtransport
import (
"errors"
"fmt"
"strconv"
"strings"
"sync"
"time"
)
// Measurable defines the interface for transports supporting metrics.
//
type Measurable interface {
Metrics() (Metrics, error)
}
// connectionable defines the interface for transports returning a list of connections.
//
type connectionable interface {
connections() []*Connection
}
// Metrics represents the transport metrics.
//
type Metrics struct {
Requests int `json:"requests"`
Failures int `json:"failures"`
Responses map[int]int `json:"responses"`
Connections []fmt.Stringer `json:"connections"`
}
// ConnectionMetric represents metric information for a connection.
//
type ConnectionMetric struct {
URL string `json:"url"`
Failures int `json:"failures,omitempty"`
IsDead bool `json:"dead,omitempty"`
DeadSince *time.Time `json:"dead_since,omitempty"`
Meta struct {
ID string `json:"id"`
Name string `json:"name"`
Roles []string `json:"roles"`
} `json:"meta"`
}
// metrics represents the inner state of metrics.
//
type metrics struct {
sync.RWMutex
requests int
failures int
responses map[int]int
connections []*Connection
}
// Metrics returns the transport metrics.
//
func (c *Client) Metrics() (Metrics, error) {
if c.metrics == nil {
return Metrics{}, errors.New("transport metrics not enabled")
}
c.metrics.RLock()
defer c.metrics.RUnlock()
if lockable, ok := c.pool.(sync.Locker); ok {
lockable.Lock()
defer lockable.Unlock()
}
m := Metrics{
Requests: c.metrics.requests,
Failures: c.metrics.failures,
Responses: c.metrics.responses,
}
if pool, ok := c.pool.(connectionable); ok {
for _, c := range pool.connections() {
c.Lock()
cm := ConnectionMetric{
URL: c.URL.String(),
IsDead: c.IsDead,
Failures: c.Failures,
}
if !c.DeadSince.IsZero() {
cm.DeadSince = &c.DeadSince
}
if c.ID != "" {
cm.Meta.ID = c.ID
}
if c.Name != "" {
cm.Meta.Name = c.Name
}
if len(c.Roles) > 0 {
cm.Meta.Roles = c.Roles
}
m.Connections = append(m.Connections, cm)
c.Unlock()
}
}
return m, nil
}
// String returns the metrics as a string.
//
func (m Metrics) String() string {
var (
i int
b strings.Builder
)
b.WriteString("{")
b.WriteString("Requests:")
b.WriteString(strconv.Itoa(m.Requests))
b.WriteString(" Failures:")
b.WriteString(strconv.Itoa(m.Failures))
if len(m.Responses) > 0 {
b.WriteString(" Responses: ")
b.WriteString("[")
for code, num := range m.Responses {
b.WriteString(strconv.Itoa(code))
b.WriteString(":")
b.WriteString(strconv.Itoa(num))
if i+1 < len(m.Responses) {
b.WriteString(", ")
}
i++
}
b.WriteString("]")
}
b.WriteString(" Connections: [")
for i, c := range m.Connections {
b.WriteString(c.String())
if i+1 < len(m.Connections) {
b.WriteString(", ")
}
i++
}
b.WriteString("]")
b.WriteString("}")
return b.String()
}
// String returns the connection information as a string.
//
func (cm ConnectionMetric) String() string {
var b strings.Builder
b.WriteString("{")
b.WriteString(cm.URL)
if cm.IsDead {
fmt.Fprintf(&b, " dead=%v", cm.IsDead)
}
if cm.Failures > 0 {
fmt.Fprintf(&b, " failures=%d", cm.Failures)
}
if cm.DeadSince != nil {
fmt.Fprintf(&b, " dead_since=%s", cm.DeadSince.Local().Format(time.Stamp))
}
b.WriteString("}")
return b.String()
}

View File

@@ -0,0 +1,521 @@
// SPDX-License-Identifier: Apache-2.0
//
// The OpenSearch Contributors require contributions made to
// this file be licensed under the Apache-2.0 license or a
// compatible open source license.
//
// Modifications Copyright OpenSearch Contributors. See
// GitHub history for details.
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package opensearchtransport
import (
"bytes"
"compress/gzip"
"crypto/x509"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/opensearch-project/opensearch-go/signer"
"github.com/opensearch-project/opensearch-go/internal/version"
)
const (
// Version returns the package version as a string.
Version = version.Client
// esCompatHeader defines the env var for Compatibility header.
esCompatHeader = "ELASTIC_CLIENT_APIVERSIONING"
)
var (
userAgent string
compatibilityHeader bool
reGoVersion = regexp.MustCompile(`go(\d+\.\d+\..+)`)
defaultMaxRetries = 3
defaultRetryOnStatus = [...]int{502, 503, 504}
)
func init() {
userAgent = initUserAgent()
compatHeaderEnv := os.Getenv(esCompatHeader)
compatibilityHeader, _ = strconv.ParseBool(compatHeaderEnv)
}
// Interface defines the interface for HTTP client.
//
type Interface interface {
Perform(*http.Request) (*http.Response, error)
}
// Config represents the configuration of HTTP client.
//
type Config struct {
URLs []*url.URL
Username string
Password string
Header http.Header
CACert []byte
Signer signer.Signer
RetryOnStatus []int
DisableRetry bool
EnableRetryOnTimeout bool
MaxRetries int
RetryBackoff func(attempt int) time.Duration
CompressRequestBody bool
EnableMetrics bool
EnableDebugLogger bool
DiscoverNodesInterval time.Duration
Transport http.RoundTripper
Logger Logger
Selector Selector
ConnectionPoolFunc func([]*Connection, Selector) ConnectionPool
}
// Client represents the HTTP client.
//
type Client struct {
sync.Mutex
urls []*url.URL
username string
password string
header http.Header
signer signer.Signer
retryOnStatus []int
disableRetry bool
enableRetryOnTimeout bool
maxRetries int
retryBackoff func(attempt int) time.Duration
discoverNodesInterval time.Duration
discoverNodesTimer *time.Timer
compressRequestBody bool
metrics *metrics
transport http.RoundTripper
logger Logger
selector Selector
pool ConnectionPool
poolFunc func([]*Connection, Selector) ConnectionPool
}
// New creates new transport client.
//
// http.DefaultTransport will be used if no transport is passed in the configuration.
//
func New(cfg Config) (*Client, error) {
if cfg.Transport == nil {
cfg.Transport = http.DefaultTransport
}
if cfg.CACert != nil {
httpTransport, ok := cfg.Transport.(*http.Transport)
if !ok {
return nil, fmt.Errorf("unable to set CA certificate for transport of type %T", cfg.Transport)
}
httpTransport = httpTransport.Clone()
httpTransport.TLSClientConfig.RootCAs = x509.NewCertPool()
if ok := httpTransport.TLSClientConfig.RootCAs.AppendCertsFromPEM(cfg.CACert); !ok {
return nil, errors.New("unable to add CA certificate")
}
cfg.Transport = httpTransport
}
if len(cfg.RetryOnStatus) == 0 {
cfg.RetryOnStatus = defaultRetryOnStatus[:]
}
if cfg.MaxRetries == 0 {
cfg.MaxRetries = defaultMaxRetries
}
var conns []*Connection
for _, u := range cfg.URLs {
conns = append(conns, &Connection{URL: u})
}
client := Client{
urls: cfg.URLs,
username: cfg.Username,
password: cfg.Password,
header: cfg.Header,
signer: cfg.Signer,
retryOnStatus: cfg.RetryOnStatus,
disableRetry: cfg.DisableRetry,
enableRetryOnTimeout: cfg.EnableRetryOnTimeout,
maxRetries: cfg.MaxRetries,
retryBackoff: cfg.RetryBackoff,
discoverNodesInterval: cfg.DiscoverNodesInterval,
compressRequestBody: cfg.CompressRequestBody,
transport: cfg.Transport,
logger: cfg.Logger,
selector: cfg.Selector,
poolFunc: cfg.ConnectionPoolFunc,
}
if client.poolFunc != nil {
client.pool = client.poolFunc(conns, client.selector)
} else {
client.pool, _ = NewConnectionPool(conns, client.selector)
}
if cfg.EnableDebugLogger {
debugLogger = &debuggingLogger{Output: os.Stdout}
}
if cfg.EnableMetrics {
client.metrics = &metrics{responses: make(map[int]int)}
// TODO(karmi): Type assertion to interface
if pool, ok := client.pool.(*singleConnectionPool); ok {
pool.metrics = client.metrics
}
if pool, ok := client.pool.(*statusConnectionPool); ok {
pool.metrics = client.metrics
}
}
if client.discoverNodesInterval > 0 {
time.AfterFunc(client.discoverNodesInterval, func() {
client.scheduleDiscoverNodes(client.discoverNodesInterval)
})
}
return &client, nil
}
// Perform executes the request and returns a response or error.
//
func (c *Client) Perform(req *http.Request) (*http.Response, error) {
var (
res *http.Response
err error
)
// Compatibility Header
if compatibilityHeader {
if req.Body != nil {
req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=7")
}
req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=7")
}
// Record metrics, when enabled
if c.metrics != nil {
c.metrics.Lock()
c.metrics.requests++
c.metrics.Unlock()
}
// Update request
c.setReqUserAgent(req)
c.setReqGlobalHeader(req)
if req.Body != nil && req.Body != http.NoBody {
if c.compressRequestBody {
var buf bytes.Buffer
zw := gzip.NewWriter(&buf)
if _, err := io.Copy(zw, req.Body); err != nil {
return nil, fmt.Errorf("failed to compress request body: %s", err)
}
if err := zw.Close(); err != nil {
return nil, fmt.Errorf("failed to compress request body (during close): %s", err)
}
req.GetBody = func() (io.ReadCloser, error) {
r := buf
return ioutil.NopCloser(&r), nil
}
req.Body, _ = req.GetBody()
req.Header.Set("Content-Encoding", "gzip")
req.ContentLength = int64(buf.Len())
} else if req.GetBody == nil {
if !c.disableRetry || (c.logger != nil && c.logger.RequestBodyEnabled()) {
var buf bytes.Buffer
buf.ReadFrom(req.Body)
req.GetBody = func() (io.ReadCloser, error) {
r := buf
return ioutil.NopCloser(&r), nil
}
req.Body, _ = req.GetBody()
}
}
}
for i := 0; i <= c.maxRetries; i++ {
var (
conn *Connection
shouldRetry bool
shouldCloseBody bool
)
// Get connection from the pool
c.Lock()
conn, err = c.pool.Next()
c.Unlock()
if err != nil {
if c.logger != nil {
c.logRoundTrip(req, nil, err, time.Time{}, time.Duration(0))
}
return nil, fmt.Errorf("cannot get connection: %s", err)
}
// Update request
c.setReqURL(conn.URL, req)
c.setReqAuth(conn.URL, req)
if err = c.signRequest(req); err != nil {
return nil, fmt.Errorf("failed to sign request: %s", err)
}
if !c.disableRetry && i > 0 && req.Body != nil && req.Body != http.NoBody {
body, err := req.GetBody()
if err != nil {
return nil, fmt.Errorf("cannot get request body: %s", err)
}
req.Body = body
}
// Set up time measures and execute the request
start := time.Now().UTC()
res, err = c.transport.RoundTrip(req)
dur := time.Since(start)
// Log request and response
if c.logger != nil {
if c.logger.RequestBodyEnabled() && req.Body != nil && req.Body != http.NoBody {
req.Body, _ = req.GetBody()
}
c.logRoundTrip(req, res, err, start, dur)
}
if err != nil {
// Record metrics, when enabled
if c.metrics != nil {
c.metrics.Lock()
c.metrics.failures++
c.metrics.Unlock()
}
// Report the connection as unsuccessful
c.Lock()
c.pool.OnFailure(conn)
c.Unlock()
// Retry on EOF errors
if err == io.EOF {
shouldRetry = true
}
// Retry on network errors, but not on timeout errors, unless configured
if err, ok := err.(net.Error); ok {
if (!err.Timeout() || c.enableRetryOnTimeout) && !c.disableRetry {
shouldRetry = true
}
}
} else {
// Report the connection as succesfull
c.Lock()
c.pool.OnSuccess(conn)
c.Unlock()
}
if res != nil && c.metrics != nil {
c.metrics.Lock()
c.metrics.responses[res.StatusCode]++
c.metrics.Unlock()
}
// Retry on configured response statuses
if res != nil && !c.disableRetry {
for _, code := range c.retryOnStatus {
if res.StatusCode == code {
shouldRetry = true
shouldCloseBody = true
}
}
}
// Break if retry should not be performed
if !shouldRetry {
break
}
// Drain and close body when retrying after response
if shouldCloseBody && i < c.maxRetries {
if res.Body != nil {
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
}
}
// Delay the retry if a backoff function is configured
if c.retryBackoff != nil {
time.Sleep(c.retryBackoff(i + 1))
}
}
// TODO(karmi): Wrap error
return res, err
}
// URLs returns a list of transport URLs.
//
//
func (c *Client) URLs() []*url.URL {
return c.pool.URLs()
}
func (c *Client) setReqURL(u *url.URL, req *http.Request) *http.Request {
req.URL.Scheme = u.Scheme
req.URL.Host = u.Host
if u.Path != "" {
var b strings.Builder
b.Grow(len(u.Path) + len(req.URL.Path))
b.WriteString(u.Path)
b.WriteString(req.URL.Path)
req.URL.Path = b.String()
}
return req
}
func (c *Client) setReqAuth(u *url.URL, req *http.Request) *http.Request {
if _, ok := req.Header["Authorization"]; !ok {
if u.User != nil {
password, _ := u.User.Password()
req.SetBasicAuth(u.User.Username(), password)
return req
}
if c.username != "" && c.password != "" {
req.SetBasicAuth(c.username, c.password)
return req
}
}
return req
}
func (c *Client) signRequest(req *http.Request) error {
if c.signer != nil {
return c.signer.SignRequest(req)
}
return nil
}
func (c *Client) setReqUserAgent(req *http.Request) *http.Request {
req.Header.Set("User-Agent", userAgent)
return req
}
func (c *Client) setReqGlobalHeader(req *http.Request) *http.Request {
if len(c.header) > 0 {
for k, v := range c.header {
if req.Header.Get(k) != k {
for _, vv := range v {
req.Header.Add(k, vv)
}
}
}
}
return req
}
func (c *Client) logRoundTrip(
req *http.Request,
res *http.Response,
err error,
start time.Time,
dur time.Duration,
) {
var dupRes http.Response
if res != nil {
dupRes = *res
}
if c.logger.ResponseBodyEnabled() {
if res != nil && res.Body != nil && res.Body != http.NoBody {
b1, b2, _ := duplicateBody(res.Body)
dupRes.Body = b1
res.Body = b2
}
}
c.logger.LogRoundTrip(req, &dupRes, err, start, dur) // errcheck exclude
}
func initUserAgent() string {
var b strings.Builder
b.WriteString("opensearch-go")
b.WriteRune('/')
b.WriteString(Version)
b.WriteRune(' ')
b.WriteRune('(')
b.WriteString(runtime.GOOS)
b.WriteRune(' ')
b.WriteString(runtime.GOARCH)
b.WriteString("; ")
b.WriteString("Go ")
if v := reGoVersion.ReplaceAllString(runtime.Version(), "$1"); v != "" {
b.WriteString(v)
} else {
b.WriteString(runtime.Version())
}
b.WriteRune(')')
return b.String()
}