use istio client-go library instead of knative (#1661)
use istio client-go library instead of knative bump kubernetes dependency version change code coverage to codecov
This commit is contained in:
4
vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
generated
vendored
4
vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
generated
vendored
@@ -35,7 +35,7 @@ type RateLimiter interface {
|
||||
}
|
||||
|
||||
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
|
||||
// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential
|
||||
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
|
||||
func DefaultControllerRateLimiter() RateLimiter {
|
||||
return NewMaxOfRateLimiter(
|
||||
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
|
||||
@@ -62,7 +62,7 @@ func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
|
||||
func (r *BucketRateLimiter) Forget(item interface{}) {
|
||||
}
|
||||
|
||||
// ItemExponentialFailureRateLimiter does a simple baseDelay*10^<num-failures> limit
|
||||
// ItemExponentialFailureRateLimiter does a simple baseDelay*2^<num-failures> limit
|
||||
// dealing with max failures and expiration are up to the caller
|
||||
type ItemExponentialFailureRateLimiter struct {
|
||||
failuresLock sync.Mutex
|
||||
|
||||
14
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
14
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
@@ -18,6 +18,7 @@ package workqueue
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
@@ -65,6 +66,8 @@ type delayingType struct {
|
||||
|
||||
// stopCh lets us signal a shutdown to the waiting loop
|
||||
stopCh chan struct{}
|
||||
// stopOnce guarantees we only signal shutdown a single time
|
||||
stopOnce sync.Once
|
||||
|
||||
// heartbeat ensures we wait no more than maxWait before firing
|
||||
heartbeat clock.Ticker
|
||||
@@ -131,11 +134,14 @@ func (pq waitForPriorityQueue) Peek() interface{} {
|
||||
return pq[0]
|
||||
}
|
||||
|
||||
// ShutDown gives a way to shut off this queue
|
||||
// ShutDown stops the queue. After the queue drains, the returned shutdown bool
|
||||
// on Get() will be true. This method may be invoked more than once.
|
||||
func (q *delayingType) ShutDown() {
|
||||
q.Interface.ShutDown()
|
||||
close(q.stopCh)
|
||||
q.heartbeat.Stop()
|
||||
q.stopOnce.Do(func() {
|
||||
q.Interface.ShutDown()
|
||||
close(q.stopCh)
|
||||
q.heartbeat.Stop()
|
||||
})
|
||||
}
|
||||
|
||||
// AddAfter adds the given item to the work queue after the given delay
|
||||
|
||||
2
vendor/k8s.io/client-go/util/workqueue/doc.go
generated
vendored
2
vendor/k8s.io/client-go/util/workqueue/doc.go
generated
vendored
@@ -23,4 +23,4 @@ limitations under the License.
|
||||
// * Multiple consumers and producers. In particular, it is allowed for an
|
||||
// item to be reenqueued while it is being processed.
|
||||
// * Shutdown notifications.
|
||||
package workqueue
|
||||
package workqueue // import "k8s.io/client-go/util/workqueue"
|
||||
|
||||
34
vendor/k8s.io/client-go/util/workqueue/metrics.go
generated
vendored
34
vendor/k8s.io/client-go/util/workqueue/metrics.go
generated
vendored
@@ -57,6 +57,11 @@ type SummaryMetric interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
// HistogramMetric counts individual observations.
|
||||
type HistogramMetric interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
type noopMetric struct{}
|
||||
|
||||
func (noopMetric) Inc() {}
|
||||
@@ -73,9 +78,9 @@ type defaultQueueMetrics struct {
|
||||
// total number of adds handled by a workqueue
|
||||
adds CounterMetric
|
||||
// how long an item stays in a workqueue
|
||||
latency SummaryMetric
|
||||
latency HistogramMetric
|
||||
// how long processing an item from a workqueue takes
|
||||
workDuration SummaryMetric
|
||||
workDuration HistogramMetric
|
||||
addTimes map[t]time.Time
|
||||
processingStartTimes map[t]time.Time
|
||||
|
||||
@@ -104,7 +109,7 @@ func (m *defaultQueueMetrics) get(item t) {
|
||||
m.depth.Dec()
|
||||
m.processingStartTimes[item] = m.clock.Now()
|
||||
if startTime, exists := m.addTimes[item]; exists {
|
||||
m.latency.Observe(m.sinceInMicroseconds(startTime))
|
||||
m.latency.Observe(m.sinceInSeconds(startTime))
|
||||
delete(m.addTimes, item)
|
||||
}
|
||||
}
|
||||
@@ -115,7 +120,7 @@ func (m *defaultQueueMetrics) done(item t) {
|
||||
}
|
||||
|
||||
if startTime, exists := m.processingStartTimes[item]; exists {
|
||||
m.workDuration.Observe(m.sinceInMicroseconds(startTime))
|
||||
m.workDuration.Observe(m.sinceInSeconds(startTime))
|
||||
delete(m.processingStartTimes, item)
|
||||
}
|
||||
}
|
||||
@@ -135,7 +140,7 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() {
|
||||
// Convert to seconds; microseconds is unhelpfully granular for this.
|
||||
total /= 1000000
|
||||
m.unfinishedWorkSeconds.Set(total)
|
||||
m.longestRunningProcessor.Set(oldest) // in microseconds.
|
||||
m.longestRunningProcessor.Set(oldest / 1000000)
|
||||
}
|
||||
|
||||
type noMetrics struct{}
|
||||
@@ -150,6 +155,11 @@ func (m *defaultQueueMetrics) sinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(m.clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
||||
|
||||
// Gets the time since the specified start in seconds.
|
||||
func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
|
||||
return m.clock.Since(start).Seconds()
|
||||
}
|
||||
|
||||
type retryMetrics interface {
|
||||
retry()
|
||||
}
|
||||
@@ -170,10 +180,10 @@ func (m *defaultRetryMetrics) retry() {
|
||||
type MetricsProvider interface {
|
||||
NewDepthMetric(name string) GaugeMetric
|
||||
NewAddsMetric(name string) CounterMetric
|
||||
NewLatencyMetric(name string) SummaryMetric
|
||||
NewWorkDurationMetric(name string) SummaryMetric
|
||||
NewLatencyMetric(name string) HistogramMetric
|
||||
NewWorkDurationMetric(name string) HistogramMetric
|
||||
NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
|
||||
NewLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric
|
||||
NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric
|
||||
NewRetriesMetric(name string) CounterMetric
|
||||
}
|
||||
|
||||
@@ -187,11 +197,11 @@ func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewLatencyMetric(name string) SummaryMetric {
|
||||
func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric {
|
||||
func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
@@ -199,7 +209,7 @@ func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) Settabl
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
func (_ noopMetricsProvider) NewLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric {
|
||||
func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
@@ -235,7 +245,7 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu
|
||||
latency: mp.NewLatencyMetric(name),
|
||||
workDuration: mp.NewWorkDurationMetric(name),
|
||||
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
|
||||
longestRunningProcessor: mp.NewLongestRunningProcessorMicrosecondsMetric(name),
|
||||
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
|
||||
addTimes: map[t]time.Time{},
|
||||
processingStartTimes: map[t]time.Time{},
|
||||
}
|
||||
|
||||
8
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
8
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
@@ -25,14 +25,6 @@ import (
|
||||
|
||||
type DoWorkPieceFunc func(piece int)
|
||||
|
||||
// Parallelize is a very simple framework that allows for parallelizing
|
||||
// N independent pieces of work.
|
||||
//
|
||||
// Deprecated: Use ParallelizeUntil instead.
|
||||
func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
||||
ParallelizeUntil(nil, workers, pieces, doWorkPiece)
|
||||
}
|
||||
|
||||
// ParallelizeUntil is a framework that allows for parallelizing N
|
||||
// independent pieces of work until done or the context is canceled.
|
||||
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
||||
|
||||
Reference in New Issue
Block a user