1
vendor/go.etcd.io/etcd/client/v3/maintenance.go
generated
vendored
1
vendor/go.etcd.io/etcd/client/v3/maintenance.go
generated
vendored
@@ -92,6 +92,7 @@ func NewMaintenance(c *Client) Maintenance {
|
||||
err = c.getToken(dctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, nil, fmt.Errorf("failed to getToken from endpoint %s with maintenance client: %v", endpoint, err)
|
||||
}
|
||||
cancel = func() { conn.Close() }
|
||||
|
||||
31
vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go
generated
vendored
31
vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go
generated
vendored
@@ -74,13 +74,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien
|
||||
continue
|
||||
}
|
||||
if c.shouldRefreshToken(lastErr, callOpts) {
|
||||
// clear auth token before refreshing it.
|
||||
// call c.Auth.Authenticate with an invalid token will always fail the auth check on the server-side,
|
||||
// if the server has not apply the patch of pr #12165 (https://github.com/etcd-io/etcd/pull/12165)
|
||||
// and a rpctypes.ErrInvalidAuthToken will recursively call c.getToken until system run out of resource.
|
||||
c.authTokenBundle.UpdateAuthToken("")
|
||||
|
||||
gterr := c.getToken(ctx)
|
||||
gterr := c.refreshToken(ctx)
|
||||
if gterr != nil {
|
||||
c.GetLogger().Warn(
|
||||
"retrying of unary invoker failed to fetch new auth token",
|
||||
@@ -161,6 +155,24 @@ func (c *Client) shouldRefreshToken(err error, callOpts *options) bool {
|
||||
(rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision)
|
||||
}
|
||||
|
||||
func (c *Client) refreshToken(ctx context.Context) error {
|
||||
if c.authTokenBundle == nil {
|
||||
// c.authTokenBundle will be initialized only when
|
||||
// c.Username != "" && c.Password != "".
|
||||
//
|
||||
// When users use the TLS CommonName based authentication, the
|
||||
// authTokenBundle is always nil. But it's possible for the clients
|
||||
// to get `rpctypes.ErrAuthOldRevision` response when the clients
|
||||
// concurrently modify auth data (e.g, addUser, deleteUser etc.).
|
||||
// In this case, there is no need to refresh the token; instead the
|
||||
// clients just need to retry the operations (e.g. Put, Delete etc).
|
||||
return nil
|
||||
}
|
||||
// clear auth token before refreshing it.
|
||||
c.authTokenBundle.UpdateAuthToken("")
|
||||
return c.getToken(ctx)
|
||||
}
|
||||
|
||||
// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
|
||||
// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
|
||||
// a new ClientStream according to the retry policy.
|
||||
@@ -259,10 +271,7 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}
|
||||
return true, err
|
||||
}
|
||||
if s.client.shouldRefreshToken(err, s.callOpts) {
|
||||
// clear auth token to avoid failure when call getToken
|
||||
s.client.authTokenBundle.UpdateAuthToken("")
|
||||
|
||||
gterr := s.client.getToken(s.ctx)
|
||||
gterr := s.client.refreshToken(s.ctx)
|
||||
if gterr != nil {
|
||||
s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
|
||||
return false, err // return the original error for simplicity
|
||||
|
||||
50
vendor/go.etcd.io/etcd/client/v3/watch.go
generated
vendored
50
vendor/go.etcd.io/etcd/client/v3/watch.go
generated
vendored
@@ -37,6 +37,13 @@ const (
|
||||
EventTypePut = mvccpb.PUT
|
||||
|
||||
closeSendErrTimeout = 250 * time.Millisecond
|
||||
|
||||
// AutoWatchID is the watcher ID passed in WatchStream.Watch when no
|
||||
// user-provided ID is available. If pass, an ID will automatically be assigned.
|
||||
AutoWatchID = 0
|
||||
|
||||
// InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch.
|
||||
InvalidWatchID = -1
|
||||
)
|
||||
|
||||
type Event mvccpb.Event
|
||||
@@ -450,7 +457,7 @@ func (w *watcher) closeStream(wgs *watchGrpcStream) {
|
||||
|
||||
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
|
||||
// check watch ID for backward compatibility (<= v3.3)
|
||||
if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
|
||||
if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") {
|
||||
w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
|
||||
// failed; no channel
|
||||
close(ws.recvc)
|
||||
@@ -481,7 +488,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
|
||||
} else if ws.outc != nil {
|
||||
close(ws.outc)
|
||||
}
|
||||
if ws.id != -1 {
|
||||
if ws.id != InvalidWatchID {
|
||||
delete(w.substreams, ws.id)
|
||||
return
|
||||
}
|
||||
@@ -533,6 +540,7 @@ func (w *watchGrpcStream) run() {
|
||||
cancelSet := make(map[int64]struct{})
|
||||
|
||||
var cur *pb.WatchResponse
|
||||
backoff := time.Millisecond
|
||||
for {
|
||||
select {
|
||||
// Watch() requested
|
||||
@@ -543,7 +551,7 @@ func (w *watchGrpcStream) run() {
|
||||
// TODO: pass custom watch ID?
|
||||
ws := &watcherStream{
|
||||
initReq: *wreq,
|
||||
id: -1,
|
||||
id: InvalidWatchID,
|
||||
outc: outc,
|
||||
// unbuffered so resumes won't cause repeat events
|
||||
recvc: make(chan *WatchResponse),
|
||||
@@ -649,6 +657,7 @@ func (w *watchGrpcStream) run() {
|
||||
closeErr = err
|
||||
return
|
||||
}
|
||||
backoff = w.backoffIfUnavailable(backoff, err)
|
||||
if wc, closeErr = w.newWatchClient(); closeErr != nil {
|
||||
return
|
||||
}
|
||||
@@ -669,7 +678,7 @@ func (w *watchGrpcStream) run() {
|
||||
if len(w.substreams)+len(w.resuming) == 0 {
|
||||
return
|
||||
}
|
||||
if ws.id != -1 {
|
||||
if ws.id != InvalidWatchID {
|
||||
// client is closing an established watch; close it on the server proactively instead of waiting
|
||||
// to close when the next message arrives
|
||||
cancelSet[ws.id] = struct{}{}
|
||||
@@ -716,9 +725,9 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
||||
cancelReason: pbresp.CancelReason,
|
||||
}
|
||||
|
||||
// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
|
||||
// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to
|
||||
// indicate they should be broadcast.
|
||||
if wr.IsProgressNotify() && pbresp.WatchId == -1 {
|
||||
if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID {
|
||||
return w.broadcastResponse(wr)
|
||||
}
|
||||
|
||||
@@ -873,7 +882,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||
w.resumec = make(chan struct{})
|
||||
w.joinSubstreams()
|
||||
for _, ws := range w.substreams {
|
||||
ws.id = -1
|
||||
ws.id = InvalidWatchID
|
||||
w.resuming = append(w.resuming, ws)
|
||||
}
|
||||
// strip out nils, if any
|
||||
@@ -963,6 +972,21 @@ func (w *watchGrpcStream) joinSubstreams() {
|
||||
|
||||
var maxBackoff = 100 * time.Millisecond
|
||||
|
||||
func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration {
|
||||
if isUnavailableErr(w.ctx, err) {
|
||||
// retry, but backoff
|
||||
if backoff < maxBackoff {
|
||||
// 25% backoff factor
|
||||
backoff = backoff + backoff/4
|
||||
if backoff > maxBackoff {
|
||||
backoff = maxBackoff
|
||||
}
|
||||
}
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
return backoff
|
||||
}
|
||||
|
||||
// openWatchClient retries opening a watch client until success or halt.
|
||||
// manually retry in case "ws==nil && err==nil"
|
||||
// TODO: remove FailFast=false
|
||||
@@ -983,17 +1007,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
|
||||
if isHaltErr(w.ctx, err) {
|
||||
return nil, v3rpc.Error(err)
|
||||
}
|
||||
if isUnavailableErr(w.ctx, err) {
|
||||
// retry, but backoff
|
||||
if backoff < maxBackoff {
|
||||
// 25% backoff factor
|
||||
backoff = backoff + backoff/4
|
||||
if backoff > maxBackoff {
|
||||
backoff = maxBackoff
|
||||
}
|
||||
}
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
backoff = w.backoffIfUnavailable(backoff, err)
|
||||
}
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user