Upgrade k8s package verison (#5358)
* upgrade k8s package version Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io> * Script upgrade and code formatting. Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io> Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
This commit is contained in:
67
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/interface.go
generated
vendored
Normal file
67
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/interface.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
// Gauge is the methods of a gauge that are used by instrumented code.
|
||||
type Gauge interface {
|
||||
Set(float64)
|
||||
Inc()
|
||||
Dec()
|
||||
Add(float64)
|
||||
SetToCurrentTime()
|
||||
}
|
||||
|
||||
// RatioedGauge tracks ratios.
|
||||
// The numerator is set/changed through the Gauge methods,
|
||||
// and the denominator can be updated through the SetDenominator method.
|
||||
// A ratio is tracked whenever the numerator or denominator is set/changed.
|
||||
type RatioedGauge interface {
|
||||
Gauge
|
||||
|
||||
// SetDenominator sets the denominator to use until it is changed again
|
||||
SetDenominator(float64)
|
||||
}
|
||||
|
||||
// RatioedGaugeVec creates related observers that are
|
||||
// differentiated by a series of label values
|
||||
type RatioedGaugeVec interface {
|
||||
// NewForLabelValuesSafe makes a new vector member for the given tuple of label values,
|
||||
// initialized with the given numerator and denominator.
|
||||
// Unlike the usual Vec WithLabelValues method, this is intended to be called only
|
||||
// once per vector member (at the start of its lifecycle).
|
||||
// The "Safe" part is saying that the returned object will function properly after metric registration
|
||||
// even if this method is called before registration.
|
||||
NewForLabelValuesSafe(initialNumerator, initialDenominator float64, labelValues []string) RatioedGauge
|
||||
}
|
||||
|
||||
//////////////////////////////// Pairs ////////////////////////////////
|
||||
//
|
||||
// API Priority and Fairness tends to use RatioedGaugeVec members in pairs,
|
||||
// one for requests waiting in a queue and one for requests being executed.
|
||||
// The following definitions are a convenience layer that adds support for that
|
||||
// particular pattern of usage.
|
||||
|
||||
// RatioedGaugePair is a corresponding pair of gauges, one for the
|
||||
// number of requests waiting in queue(s) and one for the number of
|
||||
// requests being executed.
|
||||
type RatioedGaugePair struct {
|
||||
// RequestsWaiting is given observations of the number of currently queued requests
|
||||
RequestsWaiting RatioedGauge
|
||||
|
||||
// RequestsExecuting is given observations of the number of requests currently executing
|
||||
RequestsExecuting RatioedGauge
|
||||
}
|
||||
291
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
generated
vendored
291
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
generated
vendored
@@ -18,11 +18,13 @@ package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
epmetrics "k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
basemetricstestutil "k8s.io/component-base/metrics/testutil"
|
||||
@@ -34,11 +36,13 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
requestKind = "request_kind"
|
||||
priorityLevel = "priority_level"
|
||||
flowSchema = "flow_schema"
|
||||
phase = "phase"
|
||||
mark = "mark"
|
||||
requestKind = "request_kind"
|
||||
priorityLevel = "priority_level"
|
||||
flowSchema = "flow_schema"
|
||||
phase = "phase"
|
||||
LabelNamePhase = "phase"
|
||||
LabelValueWaiting = "waiting"
|
||||
LabelValueExecuting = "executing"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -88,7 +92,7 @@ var (
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "rejected_requests_total",
|
||||
Help: "Number of requests rejected by API Priority and Fairness system",
|
||||
Help: "Number of requests rejected by API Priority and Fairness subsystem",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema, "reason"},
|
||||
@@ -98,58 +102,109 @@ var (
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "dispatched_requests_total",
|
||||
Help: "Number of requests released by API Priority and Fairness system for service",
|
||||
Help: "Number of requests executed by API Priority and Fairness subsystem",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
|
||||
// PriorityLevelConcurrencyObserverPairGenerator creates pairs that observe concurrency for priority levels
|
||||
PriorityLevelConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond,
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "priority_level_request_count_samples",
|
||||
Help: "Periodic observations of the number of requests",
|
||||
Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1},
|
||||
// PriorityLevelExecutionSeatsGaugeVec creates observers of seats occupied throughout execution for priority levels
|
||||
PriorityLevelExecutionSeatsGaugeVec = NewTimingRatioHistogramVec(
|
||||
&compbasemetrics.TimingHistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "priority_level_seat_utilization",
|
||||
Help: "Observations, at the end of every nanosecond, of utilization of seats for any stage of execution (but only initial stage for WATCHes)",
|
||||
// Buckets for both 0.99 and 1.0 mean PromQL's histogram_quantile will reveal saturation
|
||||
Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 1},
|
||||
ConstLabels: map[string]string{phase: "executing"},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "priority_level_request_count_watermarks",
|
||||
Help: "Watermarks of the number of requests",
|
||||
Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1},
|
||||
priorityLevel,
|
||||
)
|
||||
// PriorityLevelConcurrencyGaugeVec creates gauges of concurrency broken down by phase, priority level
|
||||
PriorityLevelConcurrencyGaugeVec = NewTimingRatioHistogramVec(
|
||||
&compbasemetrics.TimingHistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "priority_level_request_utilization",
|
||||
Help: "Observations, at the end of every nanosecond, of number of requests (as a fraction of the relevant limit) waiting or in any stage of execution (but only initial stage for WATCHes)",
|
||||
// For executing: the denominator will be seats, so this metric will skew low.
|
||||
// For waiting: total queue capacity is generally quite generous, so this metric will skew low.
|
||||
Buckets: []float64{0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.25, 0.5, 0.75, 1},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel})
|
||||
|
||||
// ReadWriteConcurrencyObserverPairGenerator creates pairs that observe concurrency broken down by mutating vs readonly
|
||||
ReadWriteConcurrencyObserverPairGenerator = NewSampleAndWaterMarkHistogramsPairGenerator(clock.RealClock{}, time.Millisecond,
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "read_vs_write_request_count_samples",
|
||||
Help: "Periodic observations of the number of requests",
|
||||
Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1},
|
||||
LabelNamePhase, priorityLevel,
|
||||
)
|
||||
// readWriteConcurrencyGaugeVec creates ratioed gauges of requests/limit broken down by phase and mutating vs readonly
|
||||
readWriteConcurrencyGaugeVec = NewTimingRatioHistogramVec(
|
||||
&compbasemetrics.TimingHistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "read_vs_write_current_requests",
|
||||
Help: "Observations, at the end of every nanosecond, of the number of requests (as a fraction of the relevant limit) waiting or in regular stage of execution",
|
||||
// This metric will skew low for the same reason as the priority level metrics
|
||||
// and also because APF has a combined limit for mutating and readonly.
|
||||
Buckets: []float64{0, 0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 1},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
&compbasemetrics.HistogramOpts{
|
||||
LabelNamePhase, requestKind,
|
||||
)
|
||||
apiserverCurrentR = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "read_vs_write_request_count_watermarks",
|
||||
Help: "Watermarks of the number of requests",
|
||||
Buckets: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1},
|
||||
Name: "current_r",
|
||||
Help: "R(time of last change)",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{requestKind})
|
||||
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverDispatchR = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "dispatch_r",
|
||||
Help: "R(time of last dispatch)",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverLatestS = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "latest_s",
|
||||
Help: "S(most recently dispatched request)",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverNextSBounds = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "next_s_bounds",
|
||||
Help: "min and max, over queues, of S(oldest waiting request in queue)",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, "bound"},
|
||||
)
|
||||
apiserverNextDiscountedSBounds = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "next_discounted_s_bounds",
|
||||
Help: "min and max, over queues, of S(oldest waiting request in queue) - estimated work in progress",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, "bound"},
|
||||
)
|
||||
apiserverCurrentInqueueRequests = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "current_inqueue_requests",
|
||||
Help: "Number of requests currently pending in queues of the API Priority and Fairness system",
|
||||
Help: "Number of requests currently pending in queues of the API Priority and Fairness subsystem",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
@@ -159,7 +214,7 @@ var (
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_queue_length_after_enqueue",
|
||||
Help: "Length of queue in the API Priority and Fairness system, as seen by each request after it is enqueued",
|
||||
Help: "Length of queue in the API Priority and Fairness subsystem, as seen by each request after it is enqueued",
|
||||
Buckets: queueLengthBuckets,
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
@@ -170,7 +225,7 @@ var (
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_concurrency_limit",
|
||||
Help: "Shared concurrency limit in the API Priority and Fairness system",
|
||||
Help: "Shared concurrency limit in the API Priority and Fairness subsystem",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
@@ -180,7 +235,17 @@ var (
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "current_executing_requests",
|
||||
Help: "Number of requests currently executing in the API Priority and Fairness system",
|
||||
Help: "Number of requests in initial (for a WATCH) or any (for a non-WATCH) execution stage in the API Priority and Fairness subsystem",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
apiserverRequestConcurrencyInUse = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_concurrency_in_use",
|
||||
Help: "Concurrency (number of seats) occupied by the currently executing (initial stage for a WATCH, any stage otherwise) requests in the API Priority and Fairness subsystem",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
@@ -201,26 +266,112 @@ var (
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_execution_seconds",
|
||||
Help: "Duration of request execution in the API Priority and Fairness system",
|
||||
Help: "Duration of initial stage (for a WATCH) or any (for a non-WATCH) stage of request execution in the API Priority and Fairness subsystem",
|
||||
Buckets: requestDurationSecondsBuckets,
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema, "type"},
|
||||
)
|
||||
watchCountSamples = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "watch_count_samples",
|
||||
Help: "count of watchers for mutating requests in API Priority and Fairness",
|
||||
Buckets: []float64{0, 1, 10, 100, 1000, 10000},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
apiserverEpochAdvances = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "epoch_advance_total",
|
||||
Help: "Number of times the queueset's progress meter jumped backward",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, "success"},
|
||||
)
|
||||
apiserverWorkEstimatedSeats = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "work_estimated_seats",
|
||||
Help: "Number of estimated seats (maximum of initial and final seats) associated with requests in API Priority and Fairness",
|
||||
// the upper bound comes from the maximum number of seats a request
|
||||
// can occupy which is currently set at 10.
|
||||
Buckets: []float64{1, 2, 4, 10},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
apiserverDispatchWithNoAccommodation = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "request_dispatch_no_accommodation_total",
|
||||
Help: "Number of times a dispatch attempt resulted in a non accommodation due to lack of available seats",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
|
||||
metrics = Registerables{
|
||||
apiserverRejectedRequestsTotal,
|
||||
apiserverDispatchedRequestsTotal,
|
||||
apiserverCurrentR,
|
||||
apiserverDispatchR,
|
||||
apiserverLatestS,
|
||||
apiserverNextSBounds,
|
||||
apiserverNextDiscountedSBounds,
|
||||
apiserverCurrentInqueueRequests,
|
||||
apiserverRequestQueueLength,
|
||||
apiserverRequestConcurrencyLimit,
|
||||
apiserverRequestConcurrencyInUse,
|
||||
apiserverCurrentExecutingRequests,
|
||||
apiserverRequestWaitingSeconds,
|
||||
apiserverRequestExecutionSeconds,
|
||||
watchCountSamples,
|
||||
apiserverEpochAdvances,
|
||||
apiserverWorkEstimatedSeats,
|
||||
apiserverDispatchWithNoAccommodation,
|
||||
}.
|
||||
Append(PriorityLevelConcurrencyObserverPairGenerator.metrics()...).
|
||||
Append(ReadWriteConcurrencyObserverPairGenerator.metrics()...)
|
||||
Append(PriorityLevelExecutionSeatsGaugeVec.metrics()...).
|
||||
Append(PriorityLevelConcurrencyGaugeVec.metrics()...).
|
||||
Append(readWriteConcurrencyGaugeVec.metrics()...)
|
||||
)
|
||||
|
||||
type indexOnce struct {
|
||||
labelValues []string
|
||||
once sync.Once
|
||||
gauge RatioedGauge
|
||||
}
|
||||
|
||||
func (io *indexOnce) getGauge() RatioedGauge {
|
||||
io.once.Do(func() {
|
||||
io.gauge = readWriteConcurrencyGaugeVec.NewForLabelValuesSafe(0, 1, io.labelValues)
|
||||
})
|
||||
return io.gauge
|
||||
}
|
||||
|
||||
var waitingReadonly = indexOnce{labelValues: []string{LabelValueWaiting, epmetrics.ReadOnlyKind}}
|
||||
var executingReadonly = indexOnce{labelValues: []string{LabelValueExecuting, epmetrics.ReadOnlyKind}}
|
||||
var waitingMutating = indexOnce{labelValues: []string{LabelValueWaiting, epmetrics.MutatingKind}}
|
||||
var executingMutating = indexOnce{labelValues: []string{LabelValueExecuting, epmetrics.MutatingKind}}
|
||||
|
||||
// GetWaitingReadonlyConcurrency returns the gauge of number of readonly requests waiting / limit on those.
|
||||
var GetWaitingReadonlyConcurrency = waitingReadonly.getGauge
|
||||
|
||||
// GetExecutingReadonlyConcurrency returns the gauge of number of executing readonly requests / limit on those.
|
||||
var GetExecutingReadonlyConcurrency = executingReadonly.getGauge
|
||||
|
||||
// GetWaitingMutatingConcurrency returns the gauge of number of mutating requests waiting / limit on those.
|
||||
var GetWaitingMutatingConcurrency = waitingMutating.getGauge
|
||||
|
||||
// GetExecutingMutatingConcurrency returns the gauge of number of executing mutating requests / limit on those.
|
||||
var GetExecutingMutatingConcurrency = executingMutating.getGauge
|
||||
|
||||
// AddRequestsInQueues adds the given delta to the gauge of the # of requests in the queues of the specified flowSchema and priorityLevel
|
||||
func AddRequestsInQueues(ctx context.Context, priorityLevel, flowSchema string, delta int) {
|
||||
apiserverCurrentInqueueRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
@@ -231,6 +382,27 @@ func AddRequestsExecuting(ctx context.Context, priorityLevel, flowSchema string,
|
||||
apiserverCurrentExecutingRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
}
|
||||
|
||||
// SetCurrentR sets the current-R (virtualTime) gauge for the given priority level
|
||||
func SetCurrentR(priorityLevel string, r float64) {
|
||||
apiserverCurrentR.WithLabelValues(priorityLevel).Set(r)
|
||||
}
|
||||
|
||||
// SetLatestS sets the latest-S (virtual time of dispatched request) gauge for the given priority level
|
||||
func SetDispatchMetrics(priorityLevel string, r, s, sMin, sMax, discountedSMin, discountedSMax float64) {
|
||||
apiserverDispatchR.WithLabelValues(priorityLevel).Set(r)
|
||||
apiserverLatestS.WithLabelValues(priorityLevel).Set(s)
|
||||
apiserverNextSBounds.WithLabelValues(priorityLevel, "min").Set(sMin)
|
||||
apiserverNextSBounds.WithLabelValues(priorityLevel, "max").Set(sMax)
|
||||
apiserverNextDiscountedSBounds.WithLabelValues(priorityLevel, "min").Set(discountedSMin)
|
||||
apiserverNextDiscountedSBounds.WithLabelValues(priorityLevel, "max").Set(discountedSMax)
|
||||
}
|
||||
|
||||
// AddRequestConcurrencyInUse adds the given delta to the gauge of concurrency in use by
|
||||
// the currently executing requests of the given flowSchema and priorityLevel
|
||||
func AddRequestConcurrencyInUse(priorityLevel, flowSchema string, delta int) {
|
||||
apiserverRequestConcurrencyInUse.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
}
|
||||
|
||||
// UpdateSharedConcurrencyLimit updates the value for the concurrency limit in flow control
|
||||
func UpdateSharedConcurrencyLimit(priorityLevel string, limit int) {
|
||||
apiserverRequestConcurrencyLimit.WithLabelValues(priorityLevel).Set(float64(limit))
|
||||
@@ -258,5 +430,30 @@ func ObserveWaitingDuration(ctx context.Context, priorityLevel, flowSchema, exec
|
||||
|
||||
// ObserveExecutionDuration observes the execution duration for flow control
|
||||
func ObserveExecutionDuration(ctx context.Context, priorityLevel, flowSchema string, executionTime time.Duration) {
|
||||
apiserverRequestExecutionSeconds.WithContext(ctx).WithLabelValues(priorityLevel, flowSchema).Observe(executionTime.Seconds())
|
||||
reqType := "regular"
|
||||
if requestInfo, ok := apirequest.RequestInfoFrom(ctx); ok && requestInfo.Verb == "watch" {
|
||||
reqType = requestInfo.Verb
|
||||
}
|
||||
apiserverRequestExecutionSeconds.WithContext(ctx).WithLabelValues(priorityLevel, flowSchema, reqType).Observe(executionTime.Seconds())
|
||||
}
|
||||
|
||||
// ObserveWatchCount notes a sampling of a watch count
|
||||
func ObserveWatchCount(ctx context.Context, priorityLevel, flowSchema string, count int) {
|
||||
watchCountSamples.WithLabelValues(priorityLevel, flowSchema).Observe(float64(count))
|
||||
}
|
||||
|
||||
// AddEpochAdvance notes an advance of the progress meter baseline for a given priority level
|
||||
func AddEpochAdvance(ctx context.Context, priorityLevel string, success bool) {
|
||||
apiserverEpochAdvances.WithContext(ctx).WithLabelValues(priorityLevel, strconv.FormatBool(success)).Inc()
|
||||
}
|
||||
|
||||
// ObserveWorkEstimatedSeats notes a sampling of estimated seats associated with a request
|
||||
func ObserveWorkEstimatedSeats(priorityLevel, flowSchema string, seats int) {
|
||||
apiserverWorkEstimatedSeats.WithLabelValues(priorityLevel, flowSchema).Observe(float64(seats))
|
||||
}
|
||||
|
||||
// AddDispatchWithNoAccommodation keeps track of number of times dispatch attempt results
|
||||
// in a non accommodation due to lack of available seats.
|
||||
func AddDispatchWithNoAccommodation(priorityLevel, flowSchema string) {
|
||||
apiserverDispatchWithNoAccommodation.WithLabelValues(priorityLevel, flowSchema).Inc()
|
||||
}
|
||||
|
||||
209
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go
generated
vendored
209
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/sample_and_watermark.go
generated
vendored
@@ -1,209 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
labelNameMark = "mark"
|
||||
labelValueLo = "low"
|
||||
labelValueHi = "high"
|
||||
labelNamePhase = "phase"
|
||||
labelValueWaiting = "waiting"
|
||||
labelValueExecuting = "executing"
|
||||
)
|
||||
|
||||
// SampleAndWaterMarkPairGenerator makes pairs of TimedObservers that
|
||||
// track samples and watermarks.
|
||||
type SampleAndWaterMarkPairGenerator struct {
|
||||
urGenerator SampleAndWaterMarkObserverGenerator
|
||||
}
|
||||
|
||||
var _ TimedObserverPairGenerator = SampleAndWaterMarkPairGenerator{}
|
||||
|
||||
// NewSampleAndWaterMarkHistogramsPairGenerator makes a new pair generator
|
||||
func NewSampleAndWaterMarkHistogramsPairGenerator(clock clock.PassiveClock, samplePeriod time.Duration, sampleOpts, waterMarkOpts *compbasemetrics.HistogramOpts, labelNames []string) SampleAndWaterMarkPairGenerator {
|
||||
return SampleAndWaterMarkPairGenerator{
|
||||
urGenerator: NewSampleAndWaterMarkHistogramsGenerator(clock, samplePeriod, sampleOpts, waterMarkOpts, append([]string{labelNamePhase}, labelNames...)),
|
||||
}
|
||||
}
|
||||
|
||||
// Generate makes a new pair
|
||||
func (spg SampleAndWaterMarkPairGenerator) Generate(waiting1, executing1 float64, labelValues []string) TimedObserverPair {
|
||||
return TimedObserverPair{
|
||||
RequestsWaiting: spg.urGenerator.Generate(0, waiting1, append([]string{labelValueWaiting}, labelValues...)),
|
||||
RequestsExecuting: spg.urGenerator.Generate(0, executing1, append([]string{labelValueExecuting}, labelValues...)),
|
||||
}
|
||||
}
|
||||
|
||||
func (spg SampleAndWaterMarkPairGenerator) metrics() Registerables {
|
||||
return spg.urGenerator.metrics()
|
||||
}
|
||||
|
||||
// SampleAndWaterMarkObserverGenerator creates TimedObservers that
|
||||
// populate histograms of samples and low- and high-water-marks. The
|
||||
// generator has a samplePeriod, and the histograms get an observation
|
||||
// every samplePeriod. The sampling windows are quantized based on
|
||||
// the monotonic rather than wall-clock times. The `t0` field is
|
||||
// there so to provide a baseline for monotonic clock differences.
|
||||
type SampleAndWaterMarkObserverGenerator struct {
|
||||
*sampleAndWaterMarkObserverGenerator
|
||||
}
|
||||
|
||||
type sampleAndWaterMarkObserverGenerator struct {
|
||||
clock clock.PassiveClock
|
||||
t0 time.Time
|
||||
samplePeriod time.Duration
|
||||
samples *compbasemetrics.HistogramVec
|
||||
waterMarks *compbasemetrics.HistogramVec
|
||||
}
|
||||
|
||||
var _ TimedObserverGenerator = (*sampleAndWaterMarkObserverGenerator)(nil)
|
||||
|
||||
// NewSampleAndWaterMarkHistogramsGenerator makes a new one
|
||||
func NewSampleAndWaterMarkHistogramsGenerator(clock clock.PassiveClock, samplePeriod time.Duration, sampleOpts, waterMarkOpts *compbasemetrics.HistogramOpts, labelNames []string) SampleAndWaterMarkObserverGenerator {
|
||||
return SampleAndWaterMarkObserverGenerator{
|
||||
&sampleAndWaterMarkObserverGenerator{
|
||||
clock: clock,
|
||||
t0: clock.Now(),
|
||||
samplePeriod: samplePeriod,
|
||||
samples: compbasemetrics.NewHistogramVec(sampleOpts, labelNames),
|
||||
waterMarks: compbasemetrics.NewHistogramVec(waterMarkOpts, append([]string{labelNameMark}, labelNames...)),
|
||||
}}
|
||||
}
|
||||
|
||||
func (swg *sampleAndWaterMarkObserverGenerator) quantize(when time.Time) int64 {
|
||||
return int64(when.Sub(swg.t0) / swg.samplePeriod)
|
||||
}
|
||||
|
||||
// Generate makes a new TimedObserver
|
||||
func (swg *sampleAndWaterMarkObserverGenerator) Generate(x, x1 float64, labelValues []string) TimedObserver {
|
||||
relX := x / x1
|
||||
when := swg.clock.Now()
|
||||
return &sampleAndWaterMarkHistograms{
|
||||
sampleAndWaterMarkObserverGenerator: swg,
|
||||
labelValues: labelValues,
|
||||
loLabelValues: append([]string{labelValueLo}, labelValues...),
|
||||
hiLabelValues: append([]string{labelValueHi}, labelValues...),
|
||||
x1: x1,
|
||||
sampleAndWaterMarkAccumulator: sampleAndWaterMarkAccumulator{
|
||||
lastSet: when,
|
||||
lastSetInt: swg.quantize(when),
|
||||
x: x,
|
||||
relX: relX,
|
||||
loRelX: relX,
|
||||
hiRelX: relX,
|
||||
}}
|
||||
}
|
||||
|
||||
func (swg *sampleAndWaterMarkObserverGenerator) metrics() Registerables {
|
||||
return Registerables{swg.samples, swg.waterMarks}
|
||||
}
|
||||
|
||||
type sampleAndWaterMarkHistograms struct {
|
||||
*sampleAndWaterMarkObserverGenerator
|
||||
labelValues []string
|
||||
loLabelValues, hiLabelValues []string
|
||||
|
||||
sync.Mutex
|
||||
x1 float64
|
||||
sampleAndWaterMarkAccumulator
|
||||
}
|
||||
|
||||
type sampleAndWaterMarkAccumulator struct {
|
||||
lastSet time.Time
|
||||
lastSetInt int64 // lastSet / samplePeriod
|
||||
x float64
|
||||
relX float64 // x / x1
|
||||
loRelX, hiRelX float64
|
||||
}
|
||||
|
||||
var _ TimedObserver = (*sampleAndWaterMarkHistograms)(nil)
|
||||
|
||||
func (saw *sampleAndWaterMarkHistograms) Add(deltaX float64) {
|
||||
saw.innerSet(func() {
|
||||
saw.x += deltaX
|
||||
})
|
||||
}
|
||||
|
||||
func (saw *sampleAndWaterMarkHistograms) Set(x float64) {
|
||||
saw.innerSet(func() {
|
||||
saw.x = x
|
||||
})
|
||||
}
|
||||
|
||||
func (saw *sampleAndWaterMarkHistograms) SetX1(x1 float64) {
|
||||
saw.innerSet(func() {
|
||||
saw.x1 = x1
|
||||
})
|
||||
}
|
||||
|
||||
func (saw *sampleAndWaterMarkHistograms) innerSet(updateXOrX1 func()) {
|
||||
when, whenInt, acc, wellOrdered := func() (time.Time, int64, sampleAndWaterMarkAccumulator, bool) {
|
||||
saw.Lock()
|
||||
defer saw.Unlock()
|
||||
// Moved these variables here to tiptoe around https://github.com/golang/go/issues/43570 for #97685
|
||||
when := saw.clock.Now()
|
||||
whenInt := saw.quantize(when)
|
||||
acc := saw.sampleAndWaterMarkAccumulator
|
||||
wellOrdered := !when.Before(acc.lastSet)
|
||||
updateXOrX1()
|
||||
saw.relX = saw.x / saw.x1
|
||||
if wellOrdered {
|
||||
if acc.lastSetInt < whenInt {
|
||||
saw.loRelX, saw.hiRelX = acc.relX, acc.relX
|
||||
saw.lastSetInt = whenInt
|
||||
}
|
||||
saw.lastSet = when
|
||||
}
|
||||
// `wellOrdered` should always be true because we are using
|
||||
// monotonic clock readings and they never go backwards. Yet
|
||||
// very small backwards steps (under 1 microsecond) have been
|
||||
// observed
|
||||
// (https://github.com/kubernetes/kubernetes/issues/96459).
|
||||
// In the backwards case, treat the current reading as if it
|
||||
// had occurred at time `saw.lastSet` and log an error. It
|
||||
// would be wrong to update `saw.lastSet` in this case because
|
||||
// that plants a time bomb for future updates to
|
||||
// `saw.lastSetInt`.
|
||||
if saw.relX < saw.loRelX {
|
||||
saw.loRelX = saw.relX
|
||||
} else if saw.relX > saw.hiRelX {
|
||||
saw.hiRelX = saw.relX
|
||||
}
|
||||
return when, whenInt, acc, wellOrdered
|
||||
}()
|
||||
if !wellOrdered {
|
||||
lastSetS := acc.lastSet.String()
|
||||
whenS := when.String()
|
||||
klog.Errorf("Time went backwards from %s to %s for labelValues=%#+v", lastSetS, whenS, saw.labelValues)
|
||||
}
|
||||
for acc.lastSetInt < whenInt {
|
||||
saw.samples.WithLabelValues(saw.labelValues...).Observe(acc.relX)
|
||||
saw.waterMarks.WithLabelValues(saw.loLabelValues...).Observe(acc.loRelX)
|
||||
saw.waterMarks.WithLabelValues(saw.hiLabelValues...).Observe(acc.hiRelX)
|
||||
acc.lastSetInt++
|
||||
acc.loRelX, acc.hiRelX = acc.relX, acc.relX
|
||||
}
|
||||
}
|
||||
52
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go
generated
vendored
52
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timed_observer.go
generated
vendored
@@ -1,52 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
// TimedObserver gets informed about the values assigned to a variable
|
||||
// `X float64` over time, and reports on the ratio `X/X1`.
|
||||
type TimedObserver interface {
|
||||
// Add notes a change to the variable
|
||||
Add(deltaX float64)
|
||||
|
||||
// Set notes a setting of the variable
|
||||
Set(x float64)
|
||||
|
||||
// SetX1 changes the value to use for X1
|
||||
SetX1(x1 float64)
|
||||
}
|
||||
|
||||
// TimedObserverGenerator creates related observers that are
|
||||
// differentiated by a series of label values
|
||||
type TimedObserverGenerator interface {
|
||||
Generate(x, x1 float64, labelValues []string) TimedObserver
|
||||
}
|
||||
|
||||
// TimedObserverPair is a corresponding pair of observers, one for the
|
||||
// number of requests waiting in queue(s) and one for the number of
|
||||
// requests being executed
|
||||
type TimedObserverPair struct {
|
||||
// RequestsWaiting is given observations of the number of currently queued requests
|
||||
RequestsWaiting TimedObserver
|
||||
|
||||
// RequestsExecuting is given observations of the number of requests currently executing
|
||||
RequestsExecuting TimedObserver
|
||||
}
|
||||
|
||||
// TimedObserverPairGenerator generates pairs
|
||||
type TimedObserverPairGenerator interface {
|
||||
Generate(waiting1, executing1 float64, labelValues []string) TimedObserverPair
|
||||
}
|
||||
225
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timing_ratio_histogram.go
generated
vendored
Normal file
225
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/timing_ratio_histogram.go
generated
vendored
Normal file
@@ -0,0 +1,225 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// TimingRatioHistogram is essentially a gauge for a ratio where the client
|
||||
// independently controls the numerator and denominator.
|
||||
// When scraped it produces a histogram of samples of the ratio
|
||||
// taken at the end of every nanosecond.
|
||||
// `*TimingRatioHistogram` implements both Registerable and RatioedGauge.
|
||||
type TimingRatioHistogram struct {
|
||||
// The implementation is layered on TimingHistogram,
|
||||
// adding the division by an occasionally adjusted denominator.
|
||||
|
||||
// Registerable is the registerable aspect.
|
||||
// That is the registerable aspect of the underlying TimingHistogram.
|
||||
compbasemetrics.Registerable
|
||||
|
||||
// timingRatioHistogramInner implements the RatioedGauge aspect.
|
||||
timingRatioHistogramInner
|
||||
}
|
||||
|
||||
// TimingRatioHistogramOpts is the constructor parameters of a TimingRatioHistogram.
|
||||
// The `TimingHistogramOpts.InitialValue` is the initial numerator.
|
||||
type TimingRatioHistogramOpts struct {
|
||||
compbasemetrics.TimingHistogramOpts
|
||||
InitialDenominator float64
|
||||
}
|
||||
|
||||
// timingRatioHistogramInner implements the instrumentation aspect
|
||||
type timingRatioHistogramInner struct {
|
||||
nowFunc func() time.Time
|
||||
getGaugeOfRatio func() Gauge
|
||||
sync.Mutex
|
||||
// access only with mutex locked
|
||||
numerator, denominator float64
|
||||
}
|
||||
|
||||
var _ RatioedGauge = &timingRatioHistogramInner{}
|
||||
var _ RatioedGauge = &TimingRatioHistogram{}
|
||||
var _ compbasemetrics.Registerable = &TimingRatioHistogram{}
|
||||
|
||||
// NewTimingHistogram returns an object which is TimingHistogram-like. However, nothing
|
||||
// will be measured until the histogram is registered in at least one registry.
|
||||
func NewTimingRatioHistogram(opts *TimingRatioHistogramOpts) *TimingRatioHistogram {
|
||||
return NewTestableTimingRatioHistogram(time.Now, opts)
|
||||
}
|
||||
|
||||
// NewTestableTimingHistogram adds injection of the clock
|
||||
func NewTestableTimingRatioHistogram(nowFunc func() time.Time, opts *TimingRatioHistogramOpts) *TimingRatioHistogram {
|
||||
ratioedOpts := opts.TimingHistogramOpts
|
||||
ratioedOpts.InitialValue /= opts.InitialDenominator
|
||||
th := compbasemetrics.NewTestableTimingHistogram(nowFunc, &ratioedOpts)
|
||||
return &TimingRatioHistogram{
|
||||
Registerable: th,
|
||||
timingRatioHistogramInner: timingRatioHistogramInner{
|
||||
nowFunc: nowFunc,
|
||||
getGaugeOfRatio: func() Gauge { return th },
|
||||
numerator: opts.InitialValue,
|
||||
denominator: opts.InitialDenominator,
|
||||
}}
|
||||
}
|
||||
|
||||
func (trh *timingRatioHistogramInner) Set(numerator float64) {
|
||||
trh.Lock()
|
||||
defer trh.Unlock()
|
||||
trh.numerator = numerator
|
||||
ratio := numerator / trh.denominator
|
||||
trh.getGaugeOfRatio().Set(ratio)
|
||||
}
|
||||
|
||||
func (trh *timingRatioHistogramInner) Add(deltaNumerator float64) {
|
||||
trh.Lock()
|
||||
defer trh.Unlock()
|
||||
numerator := trh.numerator + deltaNumerator
|
||||
trh.numerator = numerator
|
||||
ratio := numerator / trh.denominator
|
||||
trh.getGaugeOfRatio().Set(ratio)
|
||||
}
|
||||
|
||||
func (trh *timingRatioHistogramInner) Sub(deltaNumerator float64) {
|
||||
trh.Add(-deltaNumerator)
|
||||
}
|
||||
|
||||
func (trh *timingRatioHistogramInner) Inc() {
|
||||
trh.Add(1)
|
||||
}
|
||||
|
||||
func (trh *timingRatioHistogramInner) Dec() {
|
||||
trh.Add(-1)
|
||||
}
|
||||
|
||||
func (trh *timingRatioHistogramInner) SetToCurrentTime() {
|
||||
trh.Set(float64(trh.nowFunc().Sub(time.Unix(0, 0))))
|
||||
}
|
||||
|
||||
func (trh *timingRatioHistogramInner) SetDenominator(denominator float64) {
|
||||
trh.Lock()
|
||||
defer trh.Unlock()
|
||||
trh.denominator = denominator
|
||||
ratio := trh.numerator / denominator
|
||||
trh.getGaugeOfRatio().Set(ratio)
|
||||
}
|
||||
|
||||
// WithContext allows the normal TimingHistogram metric to pass in context.
|
||||
// The context is no-op at the current level of development.
|
||||
func (trh *timingRatioHistogramInner) WithContext(ctx context.Context) RatioedGauge {
|
||||
return trh
|
||||
}
|
||||
|
||||
// TimingRatioHistogramVec is a collection of TimingRatioHistograms that differ
|
||||
// only in label values.
|
||||
// `*TimingRatioHistogramVec` implements both Registerable and RatioedGaugeVec.
|
||||
type TimingRatioHistogramVec struct {
|
||||
// promote only the Registerable methods
|
||||
compbasemetrics.Registerable
|
||||
// delegate is TimingHistograms of the ratio
|
||||
delegate compbasemetrics.GaugeVecMetric
|
||||
}
|
||||
|
||||
var _ RatioedGaugeVec = &TimingRatioHistogramVec{}
|
||||
var _ compbasemetrics.Registerable = &TimingRatioHistogramVec{}
|
||||
|
||||
// NewTimingHistogramVec constructs a new vector.
|
||||
// `opts.InitialValue` is the initial ratio, but this applies
|
||||
// only for the tiny period of time until NewForLabelValuesSafe sets
|
||||
// the ratio based on the given initial numerator and denominator.
|
||||
// Thus there is a tiny splinter of time during member construction when
|
||||
// its underlying TimingHistogram is given the initial numerator rather than
|
||||
// the initial ratio (which is obviously a non-issue when both are zero).
|
||||
// Note the difficulties associated with extracting a member
|
||||
// before registering the vector.
|
||||
func NewTimingRatioHistogramVec(opts *compbasemetrics.TimingHistogramOpts, labelNames ...string) *TimingRatioHistogramVec {
|
||||
return NewTestableTimingRatioHistogramVec(time.Now, opts, labelNames...)
|
||||
}
|
||||
|
||||
// NewTestableTimingHistogramVec adds injection of the clock.
|
||||
func NewTestableTimingRatioHistogramVec(nowFunc func() time.Time, opts *compbasemetrics.TimingHistogramOpts, labelNames ...string) *TimingRatioHistogramVec {
|
||||
delegate := compbasemetrics.NewTestableTimingHistogramVec(nowFunc, opts, labelNames)
|
||||
return &TimingRatioHistogramVec{
|
||||
Registerable: delegate,
|
||||
delegate: delegate,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *TimingRatioHistogramVec) metrics() Registerables {
|
||||
return Registerables{v}
|
||||
}
|
||||
|
||||
// NewForLabelValuesChecked will return an error if this vec is not hidden and not yet registered
|
||||
// or there is a syntactic problem with the labelValues.
|
||||
func (v *TimingRatioHistogramVec) NewForLabelValuesChecked(initialNumerator, initialDenominator float64, labelValues []string) (RatioedGauge, error) {
|
||||
underMember, err := v.delegate.WithLabelValuesChecked(labelValues...)
|
||||
if err != nil {
|
||||
return noopRatioed{}, err
|
||||
}
|
||||
underMember.Set(initialNumerator / initialDenominator)
|
||||
return &timingRatioHistogramInner{
|
||||
getGaugeOfRatio: func() Gauge { return underMember },
|
||||
numerator: initialNumerator,
|
||||
denominator: initialDenominator,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewForLabelValuesSafe is the same as NewForLabelValuesChecked in cases where that does not
|
||||
// return an error. When the unsafe version returns an error due to the vector not being
|
||||
// registered yet, the safe version returns an object that implements its methods
|
||||
// by looking up the relevant vector member in each call (thus getting a non-noop after registration).
|
||||
// In the other error cases the object returned here is a noop.
|
||||
func (v *TimingRatioHistogramVec) NewForLabelValuesSafe(initialNumerator, initialDenominator float64, labelValues []string) RatioedGauge {
|
||||
tro, err := v.NewForLabelValuesChecked(initialNumerator, initialDenominator, labelValues)
|
||||
if err == nil {
|
||||
klog.V(3).InfoS("TimingRatioHistogramVec.NewForLabelValuesSafe hit the efficient case", "fqName", v.FQName(), "labelValues", labelValues)
|
||||
return tro
|
||||
}
|
||||
if !compbasemetrics.ErrIsNotRegistered(err) {
|
||||
klog.ErrorS(err, "Failed to extract TimingRatioHistogramVec member, using noop instead", "vectorname", v.FQName(), "labelValues", labelValues)
|
||||
return tro
|
||||
}
|
||||
klog.V(3).InfoS("TimingRatioHistogramVec.NewForLabelValuesSafe hit the inefficient case", "fqName", v.FQName(), "labelValues", labelValues)
|
||||
// At this point we know v.NewForLabelValuesChecked(..) returns a permanent noop,
|
||||
// which we precisely want to avoid using. Instead, make our own gauge that
|
||||
// fetches the element on every Set.
|
||||
return &timingRatioHistogramInner{
|
||||
getGaugeOfRatio: func() Gauge { return v.delegate.WithLabelValues(labelValues...) },
|
||||
numerator: initialNumerator,
|
||||
denominator: initialDenominator,
|
||||
}
|
||||
}
|
||||
|
||||
type noopRatioed struct{}
|
||||
|
||||
func (noopRatioed) Set(float64) {}
|
||||
func (noopRatioed) Add(float64) {}
|
||||
func (noopRatioed) Sub(float64) {}
|
||||
func (noopRatioed) Inc() {}
|
||||
func (noopRatioed) Dec() {}
|
||||
func (noopRatioed) SetToCurrentTime() {}
|
||||
func (noopRatioed) SetDenominator(float64) {}
|
||||
|
||||
func (v *TimingRatioHistogramVec) Reset() {
|
||||
v.delegate.Reset()
|
||||
}
|
||||
25
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/vec_element_pair.go
generated
vendored
Normal file
25
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/vec_element_pair.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
// RatioedGaugeVecPhasedElementPair extracts a pair of elements that differ in handling phase
|
||||
func RatioedGaugeVecPhasedElementPair(vec RatioedGaugeVec, initialWaitingDenominator, initialExecutingDenominator float64, labelValues []string) RatioedGaugePair {
|
||||
return RatioedGaugePair{
|
||||
RequestsWaiting: vec.NewForLabelValuesSafe(0, initialWaitingDenominator, append([]string{LabelValueWaiting}, labelValues...)),
|
||||
RequestsExecuting: vec.NewForLabelValuesSafe(0, initialExecutingDenominator, append([]string{LabelValueExecuting}, labelValues...)),
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user