Bump sigs.k8s.io/controller-runtime to v0.14.4 (#5507)
* Bump sigs.k8s.io/controller-runtime to v0.14.4 * Update gofmt
This commit is contained in:
2
vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go
generated
vendored
@@ -19,7 +19,7 @@ package apihelpers
|
||||
import (
|
||||
"sort"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta2"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
)
|
||||
|
||||
// SetFlowSchemaCondition sets conditions.
|
||||
|
||||
294
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
294
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
@@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -34,7 +33,6 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
apitypes "k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -52,9 +50,10 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta2"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2"
|
||||
flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta2"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
flowcontrolapplyconfiguration "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3"
|
||||
flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta3"
|
||||
)
|
||||
|
||||
const timeFmt = "2006-01-02T15:04:05.999"
|
||||
@@ -68,6 +67,22 @@ const timeFmt = "2006-01-02T15:04:05.999"
|
||||
// undesired becomes completely unused, all the config objects are
|
||||
// read and processed as a whole.
|
||||
|
||||
const (
|
||||
// Borrowing among priority levels will be accomplished by periodically
|
||||
// adjusting the current concurrency limits (CurrentCLs);
|
||||
// borrowingAdjustmentPeriod is that period.
|
||||
borrowingAdjustmentPeriod = 10 * time.Second
|
||||
|
||||
// The input to the seat borrowing is smoothed seat demand figures.
|
||||
// This constant controls the decay rate of that smoothing,
|
||||
// as described in the comment on the `seatDemandStats` field of `priorityLevelState`.
|
||||
// The particular number appearing here has the property that half-life
|
||||
// of that decay is 5 minutes.
|
||||
// This is a very preliminary guess at a good value and is likely to be tweaked
|
||||
// once we get some experience with borrowing.
|
||||
seatDemandSmoothingCoefficient = 0.977
|
||||
)
|
||||
|
||||
// The funcs in this package follow the naming convention that the suffix
|
||||
// "Locked" means the relevant mutex must be locked at the start of each
|
||||
// call and will be locked upon return. For a configController, the
|
||||
@@ -123,7 +138,7 @@ type configController struct {
|
||||
fsLister flowcontrollister.FlowSchemaLister
|
||||
fsInformerSynced cache.InformerSynced
|
||||
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1beta2Interface
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface
|
||||
|
||||
// serverConcurrencyLimit is the limit on the server's total
|
||||
// number of non-exempt requests being served at once. This comes
|
||||
@@ -162,6 +177,12 @@ type configController struct {
|
||||
// name to the state for that level. Every name referenced from a
|
||||
// member of `flowSchemas` has an entry here.
|
||||
priorityLevelStates map[string]*priorityLevelState
|
||||
|
||||
// nominalCLSum is the sum of the nominalCL fields in the priorityLevelState records.
|
||||
// This can exceed serverConcurrencyLimit because of the deliberate rounding up
|
||||
// in the computation of the nominalCL values.
|
||||
// This is tracked because it is an input to the allocation adjustment algorithm.
|
||||
nominalCLSum int
|
||||
}
|
||||
|
||||
type updateAttempt struct {
|
||||
@@ -197,6 +218,46 @@ type priorityLevelState struct {
|
||||
|
||||
// Observer of number of seats occupied throughout execution
|
||||
execSeatsObs metrics.RatioedGauge
|
||||
|
||||
// Integrator of seat demand, reset every CurrentCL adjustment period
|
||||
seatDemandIntegrator fq.Integrator
|
||||
|
||||
// Gauge of seat demand / nominalCL
|
||||
seatDemandRatioedGauge metrics.RatioedGauge
|
||||
|
||||
// seatDemandStats is derived from periodically examining the seatDemandIntegrator.
|
||||
// The average, standard deviation, and high watermark come directly from the integrator.
|
||||
// envelope = avg + stdDev.
|
||||
// Periodically smoothed gets replaced with `max(envelope, A*smoothed + (1-A)*envelope)`,
|
||||
// where A is seatDemandSmoothingCoefficient.
|
||||
seatDemandStats seatDemandStats
|
||||
|
||||
// nominalCL is the nominal concurrency limit configured in the PriorityLevelConfiguration
|
||||
nominalCL int
|
||||
|
||||
// minCL is the nominal limit less the lendable amount
|
||||
minCL int
|
||||
|
||||
//maxCL is the nominal limit plus the amount that may be borrowed
|
||||
maxCL int
|
||||
|
||||
// currentCL is the dynamically derived concurrency limit to impose for now
|
||||
currentCL int
|
||||
}
|
||||
|
||||
type seatDemandStats struct {
|
||||
avg float64
|
||||
stdDev float64
|
||||
highWatermark float64
|
||||
smoothed float64
|
||||
}
|
||||
|
||||
func (stats *seatDemandStats) update(obs fq.IntegratorResults) {
|
||||
stats.avg = obs.Average
|
||||
stats.stdDev = obs.Deviation
|
||||
stats.highWatermark = obs.Max
|
||||
envelope := obs.Average + obs.Deviation
|
||||
stats.smoothed = math.Max(envelope, seatDemandSmoothingCoefficient*stats.smoothed+(1-seatDemandSmoothingCoefficient)*envelope)
|
||||
}
|
||||
|
||||
// NewTestableController is extra flexible to facilitate testing
|
||||
@@ -221,7 +282,7 @@ func newTestableController(config TestableConfig) *configController {
|
||||
cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue")
|
||||
// ensure the data structure reflects the mandatory config
|
||||
cfgCtlr.lockAndDigestConfigObjects(nil, nil)
|
||||
fci := config.InformerFactory.Flowcontrol().V1beta2()
|
||||
fci := config.InformerFactory.Flowcontrol().V1beta3()
|
||||
pli := fci.PriorityLevelConfigurations()
|
||||
fsi := fci.FlowSchemas()
|
||||
cfgCtlr.plLister = pli.Lister()
|
||||
@@ -305,11 +366,87 @@ func (cfgCtlr *configController) Run(stopCh <-chan struct{}) error {
|
||||
klog.Info("Running API Priority and Fairness config worker")
|
||||
go wait.Until(cfgCtlr.runWorker, time.Second, stopCh)
|
||||
|
||||
klog.Info("Running API Priority and Fairness periodic rebalancing process")
|
||||
go wait.Until(cfgCtlr.updateBorrowing, borrowingAdjustmentPeriod, stopCh)
|
||||
|
||||
<-stopCh
|
||||
klog.Info("Shutting down API Priority and Fairness config worker")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfgCtlr *configController) updateBorrowing() {
|
||||
cfgCtlr.lock.Lock()
|
||||
defer cfgCtlr.lock.Unlock()
|
||||
cfgCtlr.updateBorrowingLocked(true, cfgCtlr.priorityLevelStates)
|
||||
}
|
||||
|
||||
func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plStates map[string]*priorityLevelState) {
|
||||
items := make([]allocProblemItem, 0, len(plStates))
|
||||
plNames := make([]string, 0, len(plStates))
|
||||
for plName, plState := range plStates {
|
||||
if plState.pl.Spec.Limited == nil {
|
||||
continue
|
||||
}
|
||||
obs := plState.seatDemandIntegrator.Reset()
|
||||
plState.seatDemandStats.update(obs)
|
||||
// Lower bound on this priority level's adjusted concurreny limit is the lesser of:
|
||||
// - its seat demamd high watermark over the last adjustment period, and
|
||||
// - its configured concurrency limit.
|
||||
// BUT: we do not want this to be lower than the lower bound from configuration.
|
||||
// See KEP-1040 for a more detailed explanation.
|
||||
minCurrentCL := math.Max(float64(plState.minCL), math.Min(float64(plState.nominalCL), plState.seatDemandStats.highWatermark))
|
||||
plNames = append(plNames, plName)
|
||||
items = append(items, allocProblemItem{
|
||||
lowerBound: minCurrentCL,
|
||||
upperBound: float64(plState.maxCL),
|
||||
target: math.Max(minCurrentCL, plState.seatDemandStats.smoothed),
|
||||
})
|
||||
}
|
||||
if len(items) == 0 && cfgCtlr.nominalCLSum > 0 {
|
||||
klog.ErrorS(nil, "Impossible: no non-exempt priority levels", "plStates", cfgCtlr.priorityLevelStates)
|
||||
return
|
||||
}
|
||||
allocs, fairFrac, err := computeConcurrencyAllocation(cfgCtlr.nominalCLSum, items)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to derive new concurrency limits", "plNames", plNames, "items", items)
|
||||
allocs = make([]float64, len(items))
|
||||
for idx, plName := range plNames {
|
||||
plState := plStates[plName]
|
||||
if plState.pl.Spec.Limited == nil {
|
||||
continue
|
||||
}
|
||||
allocs[idx] = float64(plState.currentCL)
|
||||
}
|
||||
}
|
||||
for idx, plName := range plNames {
|
||||
plState := plStates[plName]
|
||||
if plState.pl.Spec.Limited == nil {
|
||||
continue
|
||||
}
|
||||
if setCompleters {
|
||||
qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues,
|
||||
plState.pl, cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Inconceivable! Configuration error in existing priority level", "pl", plState.pl)
|
||||
continue
|
||||
}
|
||||
plState.qsCompleter = qsCompleter
|
||||
}
|
||||
currentCL := int(math.Round(float64(allocs[idx])))
|
||||
relChange := relDiff(float64(currentCL), float64(plState.currentCL))
|
||||
plState.currentCL = currentCL
|
||||
metrics.NotePriorityLevelConcurrencyAdjustment(plState.pl.Name, plState.seatDemandStats.highWatermark, plState.seatDemandStats.avg, plState.seatDemandStats.stdDev, plState.seatDemandStats.smoothed, float64(items[idx].target), currentCL)
|
||||
logLevel := klog.Level(4)
|
||||
if relChange >= 0.05 {
|
||||
logLevel = 2
|
||||
}
|
||||
klog.V(logLevel).InfoS("Update CurrentCL", "plName", plName, "seatDemandHighWatermark", plState.seatDemandStats.highWatermark, "seatDemandAvg", plState.seatDemandStats.avg, "seatDemandStdev", plState.seatDemandStats.stdDev, "seatDemandSmoothed", plState.seatDemandStats.smoothed, "fairFrac", fairFrac, "currentCL", currentCL, "backstop", err != nil)
|
||||
plState.queues = plState.qsCompleter.Complete(fq.DispatchingConfig{ConcurrencyLimit: currentCL})
|
||||
}
|
||||
metrics.SetFairFrac(float64(fairFrac))
|
||||
}
|
||||
|
||||
// runWorker is the logic of the one and only worker goroutine. We
|
||||
// limit the number to one in order to obviate explicit
|
||||
// synchronization around access to `cfgCtlr.mostRecentUpdates`.
|
||||
@@ -420,19 +557,12 @@ func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.Prior
|
||||
|
||||
// if we are going to issue an update, be sure we track every name we update so we know if we update it too often.
|
||||
currResult.updatedItems.Insert(fsu.flowSchema.Name)
|
||||
patchBytes, err := makeFlowSchemaConditionPatch(fsu.condition)
|
||||
if err != nil {
|
||||
// should never happen because these conditions are created here and well formed
|
||||
panic(fmt.Sprintf("Failed to json.Marshall(%#+v): %s", fsu.condition, err.Error()))
|
||||
}
|
||||
if klogV := klog.V(4); klogV.Enabled() {
|
||||
klogV.Infof("%s writing Condition %s to FlowSchema %s, which had ResourceVersion=%s, because its previous value was %s, diff: %s",
|
||||
cfgCtlr.name, fsu.condition, fsu.flowSchema.Name, fsu.flowSchema.ResourceVersion, fcfmt.Fmt(fsu.oldValue), cmp.Diff(fsu.oldValue, fsu.condition))
|
||||
}
|
||||
fsIfc := cfgCtlr.flowcontrolClient.FlowSchemas()
|
||||
patchOptions := metav1.PatchOptions{FieldManager: cfgCtlr.asFieldManager}
|
||||
_, err = fsIfc.Patch(context.TODO(), fsu.flowSchema.Name, apitypes.StrategicMergePatchType, patchBytes, patchOptions, "status")
|
||||
if err != nil {
|
||||
|
||||
if err := apply(cfgCtlr.flowcontrolClient.FlowSchemas(), fsu, cfgCtlr.asFieldManager); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// This object has been deleted. A notification is coming
|
||||
// and nothing more needs to be done here.
|
||||
@@ -447,18 +577,27 @@ func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.Prior
|
||||
return suggestedDelay, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// makeFlowSchemaConditionPatch takes in a condition and returns the patch status as a json.
|
||||
func makeFlowSchemaConditionPatch(condition flowcontrol.FlowSchemaCondition) ([]byte, error) {
|
||||
o := struct {
|
||||
Status flowcontrol.FlowSchemaStatus `json:"status"`
|
||||
}{
|
||||
Status: flowcontrol.FlowSchemaStatus{
|
||||
Conditions: []flowcontrol.FlowSchemaCondition{
|
||||
condition,
|
||||
},
|
||||
},
|
||||
}
|
||||
return json.Marshal(o)
|
||||
func apply(client flowcontrolclient.FlowSchemaInterface, fsu fsStatusUpdate, asFieldManager string) error {
|
||||
applyOptions := metav1.ApplyOptions{FieldManager: asFieldManager, Force: true}
|
||||
|
||||
// the condition field in fsStatusUpdate holds the new condition we want to update.
|
||||
// TODO: this will break when we have multiple conditions for a flowschema
|
||||
_, err := client.ApplyStatus(context.TODO(), toFlowSchemaApplyConfiguration(fsu), applyOptions)
|
||||
return err
|
||||
}
|
||||
|
||||
func toFlowSchemaApplyConfiguration(fsUpdate fsStatusUpdate) *flowcontrolapplyconfiguration.FlowSchemaApplyConfiguration {
|
||||
condition := flowcontrolapplyconfiguration.FlowSchemaCondition().
|
||||
WithType(fsUpdate.condition.Type).
|
||||
WithStatus(fsUpdate.condition.Status).
|
||||
WithReason(fsUpdate.condition.Reason).
|
||||
WithLastTransitionTime(fsUpdate.condition.LastTransitionTime).
|
||||
WithMessage(fsUpdate.condition.Message)
|
||||
|
||||
return flowcontrolapplyconfiguration.FlowSchema(fsUpdate.flowSchema.Name).
|
||||
WithStatus(flowcontrolapplyconfiguration.FlowSchemaStatus().
|
||||
WithConditions(condition),
|
||||
)
|
||||
}
|
||||
|
||||
// shouldDelayUpdate checks to see if a flowschema has been updated too often and returns true if a delay is needed.
|
||||
@@ -530,9 +669,16 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi
|
||||
state := meal.cfgCtlr.priorityLevelStates[pl.Name]
|
||||
if state == nil {
|
||||
labelValues := []string{pl.Name}
|
||||
state = &priorityLevelState{reqsGaugePair: metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues), execSeatsObs: meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues)}
|
||||
state = &priorityLevelState{
|
||||
reqsGaugePair: metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues),
|
||||
execSeatsObs: meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues),
|
||||
seatDemandIntegrator: fq.NewNamedIntegrator(meal.cfgCtlr.clock, pl.Name),
|
||||
seatDemandRatioedGauge: metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{pl.Name}),
|
||||
}
|
||||
}
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs)
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues,
|
||||
pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs,
|
||||
metrics.NewUnionGauge(state.seatDemandIntegrator, state.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err)
|
||||
continue
|
||||
@@ -545,7 +691,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi
|
||||
state.quiescing = false
|
||||
}
|
||||
if state.pl.Spec.Limited != nil {
|
||||
meal.shareSum += float64(state.pl.Spec.Limited.AssuredConcurrencyShares)
|
||||
meal.shareSum += float64(state.pl.Spec.Limited.NominalConcurrencyShares)
|
||||
}
|
||||
meal.haveExemptPL = meal.haveExemptPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameExempt
|
||||
meal.haveCatchAllPL = meal.haveCatchAllPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameCatchAll
|
||||
@@ -636,7 +782,9 @@ func (meal *cfgMeal) processOldPLsLocked() {
|
||||
}
|
||||
}
|
||||
var err error
|
||||
plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs)
|
||||
plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues,
|
||||
plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
// This can not happen because queueSetCompleterForPL already approved this config
|
||||
panic(fmt.Sprintf("%s from name=%q spec=%s", err, plName, fcfmt.Fmt(plState.pl.Spec)))
|
||||
@@ -648,7 +796,7 @@ func (meal *cfgMeal) processOldPLsLocked() {
|
||||
// priority level continues to get a concurrency
|
||||
// allocation determined by all the share values in the
|
||||
// regular way.
|
||||
meal.shareSum += float64(plState.pl.Spec.Limited.AssuredConcurrencyShares)
|
||||
meal.shareSum += float64(plState.pl.Spec.Limited.NominalConcurrencyShares)
|
||||
}
|
||||
meal.haveExemptPL = meal.haveExemptPL || plName == flowcontrol.PriorityLevelConfigurationNameExempt
|
||||
meal.haveCatchAllPL = meal.haveCatchAllPL || plName == flowcontrol.PriorityLevelConfigurationNameCatchAll
|
||||
@@ -666,32 +814,55 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() {
|
||||
continue
|
||||
}
|
||||
|
||||
limited := plState.pl.Spec.Limited
|
||||
// The use of math.Ceil here means that the results might sum
|
||||
// to a little more than serverConcurrencyLimit but the
|
||||
// difference will be negligible.
|
||||
concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(plState.pl.Spec.Limited.AssuredConcurrencyShares) / meal.shareSum))
|
||||
metrics.UpdateSharedConcurrencyLimit(plName, concurrencyLimit)
|
||||
concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(limited.NominalConcurrencyShares) / meal.shareSum))
|
||||
var lendableCL, borrowingCL int
|
||||
if limited.LendablePercent != nil {
|
||||
lendableCL = int(math.Round(float64(concurrencyLimit) * float64(*limited.LendablePercent) / 100))
|
||||
}
|
||||
if limited.BorrowingLimitPercent != nil {
|
||||
borrowingCL = int(math.Round(float64(concurrencyLimit) * float64(*limited.BorrowingLimitPercent) / 100))
|
||||
} else {
|
||||
borrowingCL = meal.cfgCtlr.serverConcurrencyLimit
|
||||
}
|
||||
metrics.SetPriorityLevelConfiguration(plName, concurrencyLimit, concurrencyLimit-lendableCL, concurrencyLimit+borrowingCL)
|
||||
plState.seatDemandRatioedGauge.SetDenominator(float64(concurrencyLimit))
|
||||
cfgChanged := plState.nominalCL != concurrencyLimit || plState.minCL != concurrencyLimit-lendableCL || plState.maxCL != concurrencyLimit+borrowingCL
|
||||
plState.nominalCL = concurrencyLimit
|
||||
plState.minCL = concurrencyLimit - lendableCL
|
||||
plState.maxCL = concurrencyLimit + borrowingCL
|
||||
meal.maxExecutingRequests += concurrencyLimit
|
||||
var waitLimit int
|
||||
if qCfg := plState.pl.Spec.Limited.LimitResponse.Queuing; qCfg != nil {
|
||||
if qCfg := limited.LimitResponse.Queuing; qCfg != nil {
|
||||
waitLimit = int(qCfg.Queues * qCfg.QueueLengthLimit)
|
||||
}
|
||||
meal.maxWaitingRequests += waitLimit
|
||||
|
||||
if plState.queues == nil {
|
||||
klog.V(5).Infof("Introducing queues for priority level %q: config=%s, concurrencyLimit=%d, quiescing=%v (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, plState.quiescing, plState.pl.Spec.Limited.AssuredConcurrencyShares, meal.shareSum)
|
||||
initialCL := concurrencyLimit - lendableCL/2
|
||||
klog.V(2).Infof("Introducing queues for priority level %q: config=%s, nominalCL=%d, lendableCL=%d, borrowingCL=%d, currentCL=%d, quiescing=%v (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, lendableCL, borrowingCL, initialCL, plState.quiescing, plState.pl.Spec.Limited.NominalConcurrencyShares, meal.shareSum)
|
||||
plState.seatDemandStats = seatDemandStats{}
|
||||
plState.currentCL = initialCL
|
||||
} else {
|
||||
klog.V(5).Infof("Retaining queues for priority level %q: config=%s, concurrencyLimit=%d, quiescing=%v, numPending=%d (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, plState.quiescing, plState.numPending, plState.pl.Spec.Limited.AssuredConcurrencyShares, meal.shareSum)
|
||||
logLevel := klog.Level(5)
|
||||
if cfgChanged {
|
||||
logLevel = 2
|
||||
}
|
||||
klog.V(logLevel).Infof("Retaining queues for priority level %q: config=%s, nominalCL=%d, lendableCL=%d, borrowingCL=%d, currentCL=%d, quiescing=%v, numPending=%d (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, lendableCL, borrowingCL, plState.currentCL, plState.quiescing, plState.numPending, plState.pl.Spec.Limited.NominalConcurrencyShares, meal.shareSum)
|
||||
}
|
||||
plState.queues = plState.qsCompleter.Complete(fq.DispatchingConfig{ConcurrencyLimit: concurrencyLimit})
|
||||
}
|
||||
meal.cfgCtlr.nominalCLSum = meal.maxExecutingRequests
|
||||
meal.cfgCtlr.updateBorrowingLocked(false, meal.newPLStates)
|
||||
}
|
||||
|
||||
// queueSetCompleterForPL returns an appropriate QueueSetCompleter for the
|
||||
// given priority level configuration. Returns nil if that config
|
||||
// does not call for limiting. Returns nil and an error if the given
|
||||
// object is malformed in a way that is a problem for this package.
|
||||
func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge) (fq.QueueSetCompleter, error) {
|
||||
func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) {
|
||||
if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt) != (pl.Spec.Limited == nil) {
|
||||
return nil, errors.New("broken union structure at the top")
|
||||
}
|
||||
@@ -720,7 +891,7 @@ func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flow
|
||||
if queues != nil {
|
||||
qsc, err = queues.BeginConfigChange(qcQS)
|
||||
} else {
|
||||
qsc, err = qsf.BeginConstruction(qcQS, reqsIntPair, execSeatsObs)
|
||||
qsc, err = qsf.BeginConstruction(qcQS, reqsIntPair, execSeatsObs, seatDemandGauge)
|
||||
}
|
||||
if err != nil {
|
||||
err = fmt.Errorf("priority level %q has QueuingConfiguration %#+v, which is invalid: %w", pl.Name, qcAPI, err)
|
||||
@@ -768,20 +939,26 @@ func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, re
|
||||
labelValues := []string{proto.Name}
|
||||
reqsGaugePair := metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues)
|
||||
execSeatsObs := meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues)
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, requestWaitLimit, reqsGaugePair, execSeatsObs)
|
||||
seatDemandIntegrator := fq.NewNamedIntegrator(meal.cfgCtlr.clock, proto.Name)
|
||||
seatDemandRatioedGauge := metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{proto.Name})
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto,
|
||||
requestWaitLimit, reqsGaugePair, execSeatsObs,
|
||||
metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
// This can not happen because proto is one of the mandatory
|
||||
// objects and these are not erroneous
|
||||
panic(err)
|
||||
}
|
||||
meal.newPLStates[proto.Name] = &priorityLevelState{
|
||||
pl: proto,
|
||||
qsCompleter: qsCompleter,
|
||||
reqsGaugePair: reqsGaugePair,
|
||||
execSeatsObs: execSeatsObs,
|
||||
pl: proto,
|
||||
qsCompleter: qsCompleter,
|
||||
reqsGaugePair: reqsGaugePair,
|
||||
execSeatsObs: execSeatsObs,
|
||||
seatDemandIntegrator: seatDemandIntegrator,
|
||||
seatDemandRatioedGauge: seatDemandRatioedGauge,
|
||||
}
|
||||
if proto.Spec.Limited != nil {
|
||||
meal.shareSum += float64(proto.Spec.Limited.AssuredConcurrencyShares)
|
||||
meal.shareSum += float64(proto.Spec.Limited.NominalConcurrencyShares)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -869,12 +1046,14 @@ func (cfgCtlr *configController) maybeReap(plName string) {
|
||||
klog.V(7).Infof("plName=%s, plState==nil", plName)
|
||||
return
|
||||
}
|
||||
if plState.queues != nil {
|
||||
useless := plState.quiescing && plState.numPending == 0 && plState.queues.IsIdle()
|
||||
klog.V(7).Infof("plState.quiescing=%v, plState.numPending=%d, useless=%v", plState.quiescing, plState.numPending, useless)
|
||||
if !useless {
|
||||
return
|
||||
}
|
||||
if plState.queues == nil {
|
||||
klog.V(7).Infof("plName=%s, plState.queues==nil", plName)
|
||||
return
|
||||
}
|
||||
useless := plState.quiescing && plState.numPending == 0 && plState.queues.IsIdle()
|
||||
klog.V(7).Infof("plState.quiescing=%v, plState.numPending=%d, useless=%v", plState.quiescing, plState.numPending, useless)
|
||||
if !useless {
|
||||
return
|
||||
}
|
||||
klog.V(3).Infof("Triggered API priority and fairness config reloading because priority level %s is undesired and idle", plName)
|
||||
cfgCtlr.configQueue.Add(0)
|
||||
@@ -919,3 +1098,12 @@ func hashFlowID(fsName, fDistinguisher string) uint64 {
|
||||
hash.Sum(sum[:0])
|
||||
return binary.LittleEndian.Uint64(sum[:8])
|
||||
}
|
||||
|
||||
func relDiff(x, y float64) float64 {
|
||||
diff := math.Abs(x - y)
|
||||
den := math.Max(math.Abs(x), math.Abs(y))
|
||||
if den == 0 {
|
||||
return 0
|
||||
}
|
||||
return diff / den
|
||||
}
|
||||
|
||||
8
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go
generated
vendored
8
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go
generated
vendored
@@ -32,8 +32,8 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta2"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3"
|
||||
)
|
||||
|
||||
// ConfigConsumerAsFieldManager is how the config consuminng
|
||||
@@ -83,7 +83,7 @@ type Interface interface {
|
||||
// New creates a new instance to implement API priority and fairness
|
||||
func New(
|
||||
informerFactory kubeinformers.SharedInformerFactory,
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1beta2Interface,
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface,
|
||||
serverConcurrencyLimit int,
|
||||
requestWaitLimit time.Duration,
|
||||
) Interface {
|
||||
@@ -129,7 +129,7 @@ type TestableConfig struct {
|
||||
InformerFactory kubeinformers.SharedInformerFactory
|
||||
|
||||
// FlowcontrolClient to use for manipulating config objects
|
||||
FlowcontrolClient flowcontrolclient.FlowcontrolV1beta2Interface
|
||||
FlowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface
|
||||
|
||||
// ServerConcurrencyLimit for the controller to enforce
|
||||
ServerConcurrencyLimit int
|
||||
|
||||
256
vendor/k8s.io/apiserver/pkg/util/flowcontrol/conc_alloc.go
generated
vendored
Normal file
256
vendor/k8s.io/apiserver/pkg/util/flowcontrol/conc_alloc.go
generated
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// allocProblemItem is one of the classes to which computeConcurrencyAllocation should make an allocation
|
||||
type allocProblemItem struct {
|
||||
target float64
|
||||
lowerBound float64
|
||||
upperBound float64
|
||||
}
|
||||
|
||||
// relativeAllocItem is like allocProblemItem but with target avoiding zero and the bounds divided by the target
|
||||
type relativeAllocItem struct {
|
||||
target float64
|
||||
relativeLowerBound float64
|
||||
relativeUpperBound float64
|
||||
}
|
||||
|
||||
// relativeAllocProblem collects together all the classes and holds the result of sorting by increasing bounds.
|
||||
// For J <= K, ascendingIndices[J] identifies a bound that is <= the one of ascendingIndices[K].
|
||||
// When ascendingIndices[J] = 2*N + 0, this identifies the lower bound of items[N].
|
||||
// When ascendingIndices[J] = 2*N + 1, this identifies the upper bound of items[N].
|
||||
type relativeAllocProblem struct {
|
||||
items []relativeAllocItem
|
||||
ascendingIndices []int
|
||||
}
|
||||
|
||||
// initIndices fills in ascendingIndices and sorts them
|
||||
func (rap *relativeAllocProblem) initIndices() *relativeAllocProblem {
|
||||
rap.ascendingIndices = make([]int, len(rap.items)*2)
|
||||
for idx := 0; idx < len(rap.ascendingIndices); idx++ {
|
||||
rap.ascendingIndices[idx] = idx
|
||||
}
|
||||
sort.Sort(rap)
|
||||
return rap
|
||||
}
|
||||
|
||||
func (rap *relativeAllocProblem) getItemIndex(idx int) (int, bool) {
|
||||
packedIndex := rap.ascendingIndices[idx]
|
||||
itemIndex := packedIndex / 2
|
||||
return itemIndex, packedIndex == itemIndex*2
|
||||
}
|
||||
|
||||
// decode(J) returns the bound associated with ascendingIndices[J], the associated items index,
|
||||
// and a bool indicating whether the bound is the item's lower bound.
|
||||
func (rap *relativeAllocProblem) decode(idx int) (float64, int, bool) {
|
||||
itemIdx, lower := rap.getItemIndex(idx)
|
||||
if lower {
|
||||
return rap.items[itemIdx].relativeLowerBound, itemIdx, lower
|
||||
}
|
||||
return rap.items[itemIdx].relativeUpperBound, itemIdx, lower
|
||||
}
|
||||
|
||||
func (rap *relativeAllocProblem) getProportion(idx int) float64 {
|
||||
prop, _, _ := rap.decode(idx)
|
||||
return prop
|
||||
}
|
||||
|
||||
func (rap *relativeAllocProblem) Len() int { return len(rap.items) * 2 }
|
||||
|
||||
func (rap *relativeAllocProblem) Less(i, j int) bool {
|
||||
return rap.getProportion(i) < rap.getProportion(j)
|
||||
}
|
||||
|
||||
func (rap *relativeAllocProblem) Swap(i, j int) {
|
||||
rap.ascendingIndices[i], rap.ascendingIndices[j] = rap.ascendingIndices[j], rap.ascendingIndices[i]
|
||||
}
|
||||
|
||||
// minMax records the minimum and maximum value seen while scanning a set of numbers
|
||||
type minMax struct {
|
||||
min float64
|
||||
max float64
|
||||
}
|
||||
|
||||
// note scans one more number
|
||||
func (mm *minMax) note(x float64) {
|
||||
mm.min = math.Min(mm.min, x)
|
||||
mm.max = math.Max(mm.max, x)
|
||||
}
|
||||
|
||||
const MinTarget = 0.001
|
||||
const epsilon = 0.0000001
|
||||
|
||||
// computeConcurrencyAllocation returns the unique `allocs []float64`, and
|
||||
// an associated `fairProp float64`, that jointly have
|
||||
// all of the following properties (to the degree that floating point calculations allow)
|
||||
// if possible otherwise returns an error saying why it is impossible.
|
||||
// `allocs` sums to `requiredSum`.
|
||||
// For each J in [0, len(classes)):
|
||||
// (1) `classes[J].lowerBound <= allocs[J] <= classes[J].upperBound` and
|
||||
// (2) exactly one of the following is true:
|
||||
// (2a) `allocs[J] == fairProp * classes[J].target`,
|
||||
// (2b) `allocs[J] == classes[J].lowerBound && classes[J].lowerBound > fairProp * classes[J].target`, or
|
||||
// (2c) `allocs[J] == classes[J].upperBound && classes[J].upperBound < fairProp * classes[J].target`.
|
||||
// Each allocProblemItem is required to have `target >= lowerBound >= 0` and `upperBound >= lowerBound`.
|
||||
// A target smaller than MinTarget is treated as if it were MinTarget.
|
||||
func computeConcurrencyAllocation(requiredSum int, classes []allocProblemItem) ([]float64, float64, error) {
|
||||
if requiredSum < 0 {
|
||||
return nil, 0, errors.New("negative sums are not supported")
|
||||
}
|
||||
requiredSumF := float64(requiredSum)
|
||||
var lowSum, highSum, targetSum float64
|
||||
ubRange := minMax{min: float64(math.MaxFloat32)}
|
||||
lbRange := minMax{min: float64(math.MaxFloat32)}
|
||||
relativeItems := make([]relativeAllocItem, len(classes))
|
||||
for idx, item := range classes {
|
||||
target := item.target
|
||||
if item.lowerBound < 0 {
|
||||
return nil, 0, fmt.Errorf("lower bound %d is %v but negative lower bounds are not allowed", idx, item.lowerBound)
|
||||
}
|
||||
if target < item.lowerBound {
|
||||
return nil, 0, fmt.Errorf("target %d is %v, which is below its lower bound of %v", idx, target, item.lowerBound)
|
||||
}
|
||||
if item.upperBound < item.lowerBound {
|
||||
return nil, 0, fmt.Errorf("upper bound %d is %v but should not be less than the lower bound %v", idx, item.upperBound, item.lowerBound)
|
||||
}
|
||||
if target < MinTarget {
|
||||
// tweak this to a non-zero value so avoid dividing by zero
|
||||
target = MinTarget
|
||||
}
|
||||
lowSum += item.lowerBound
|
||||
highSum += item.upperBound
|
||||
targetSum += target
|
||||
relativeItem := relativeAllocItem{
|
||||
target: target,
|
||||
relativeLowerBound: item.lowerBound / target,
|
||||
relativeUpperBound: item.upperBound / target,
|
||||
}
|
||||
ubRange.note(relativeItem.relativeUpperBound)
|
||||
lbRange.note(relativeItem.relativeLowerBound)
|
||||
relativeItems[idx] = relativeItem
|
||||
}
|
||||
if lbRange.max > 1 {
|
||||
return nil, 0, fmt.Errorf("lbRange.max-1=%v, which is impossible because lbRange.max can not be greater than 1", lbRange.max-1)
|
||||
}
|
||||
if lowSum-requiredSumF > epsilon {
|
||||
return nil, 0, fmt.Errorf("lower bounds sum to %v, which is higher than the required sum of %v", lowSum, requiredSum)
|
||||
}
|
||||
if requiredSumF-highSum > epsilon {
|
||||
return nil, 0, fmt.Errorf("upper bounds sum to %v, which is lower than the required sum of %v", highSum, requiredSum)
|
||||
}
|
||||
ans := make([]float64, len(classes))
|
||||
if requiredSum == 0 {
|
||||
return ans, 0, nil
|
||||
}
|
||||
if lowSum-requiredSumF > -epsilon { // no wiggle room, constrained from below
|
||||
for idx, item := range classes {
|
||||
ans[idx] = item.lowerBound
|
||||
}
|
||||
return ans, lbRange.min, nil
|
||||
}
|
||||
if requiredSumF-highSum > -epsilon { // no wiggle room, constrained from above
|
||||
for idx, item := range classes {
|
||||
ans[idx] = item.upperBound
|
||||
}
|
||||
return ans, ubRange.max, nil
|
||||
}
|
||||
// Now we know the solution is a unique fairProp in [lbRange.min, ubRange.max].
|
||||
// See if the solution does not run into any bounds.
|
||||
fairProp := requiredSumF / targetSum
|
||||
if lbRange.max <= fairProp && fairProp <= ubRange.min { // no bounds matter
|
||||
for idx := range classes {
|
||||
ans[idx] = relativeItems[idx].target * fairProp
|
||||
}
|
||||
return ans, fairProp, nil
|
||||
}
|
||||
// Sadly, some bounds matter.
|
||||
// We find the solution by sorting the bounds and considering progressively
|
||||
// higher values of fairProp, starting from lbRange.min.
|
||||
rap := (&relativeAllocProblem{items: relativeItems}).initIndices()
|
||||
sumSoFar := lowSum
|
||||
fairProp = lbRange.min
|
||||
var sensitiveTargetSum, deltaSensitiveTargetSum float64
|
||||
var numSensitiveClasses, deltaSensitiveClasses int
|
||||
var nextIdx int
|
||||
// `nextIdx` is the next `rap` index to consider.
|
||||
// `sumSoFar` is what the allocs would sum to if the current
|
||||
// value of `fairProp` solves the problem.
|
||||
// If the current value of fairProp were the answer then
|
||||
// `sumSoFar == requiredSum`.
|
||||
// Otherwise the next increment in fairProp involves changing the allocations
|
||||
// of `numSensitiveClasses` classes whose targets sum to `sensitiveTargetSum`;
|
||||
// for the other classes, an upper or lower bound has applied and will continue to apply.
|
||||
// The last increment of nextIdx calls for adding `deltaSensitiveClasses`
|
||||
// to `numSensitiveClasses` and adding `deltaSensitiveTargetSum` to `sensitiveTargetSum`.
|
||||
for sumSoFar < requiredSumF {
|
||||
// There might be more than one bound that is equal to the current value
|
||||
// of fairProp; find all of them because they will all be relevant to
|
||||
// the next change in fairProp.
|
||||
// Set nextBound to the next bound that is NOT equal to fairProp,
|
||||
// and advance nextIdx to the index of that bound.
|
||||
var nextBound float64
|
||||
for {
|
||||
sensitiveTargetSum += deltaSensitiveTargetSum
|
||||
numSensitiveClasses += deltaSensitiveClasses
|
||||
if nextIdx >= rap.Len() {
|
||||
return nil, 0, fmt.Errorf("impossible: ran out of bounds to consider in bound-constrained problem")
|
||||
}
|
||||
var itemIdx int
|
||||
var lower bool
|
||||
nextBound, itemIdx, lower = rap.decode(nextIdx)
|
||||
if lower {
|
||||
deltaSensitiveClasses = 1
|
||||
deltaSensitiveTargetSum = rap.items[itemIdx].target
|
||||
} else {
|
||||
deltaSensitiveClasses = -1
|
||||
deltaSensitiveTargetSum = -rap.items[itemIdx].target
|
||||
}
|
||||
nextIdx++
|
||||
if nextBound > fairProp {
|
||||
break
|
||||
}
|
||||
}
|
||||
// fairProp can increase to nextBound without passing any intermediate bounds.
|
||||
if numSensitiveClasses == 0 {
|
||||
// No classes are affected by the next range of fairProp; skip right past it
|
||||
fairProp = nextBound
|
||||
continue
|
||||
}
|
||||
// See whether fairProp can increase to the solution before passing the next bound.
|
||||
deltaFairProp := (requiredSumF - sumSoFar) / sensitiveTargetSum
|
||||
nextProp := fairProp + deltaFairProp
|
||||
if nextProp <= nextBound {
|
||||
fairProp = nextProp
|
||||
break
|
||||
}
|
||||
// No, fairProp has to increase above nextBound
|
||||
sumSoFar += (nextBound - fairProp) * sensitiveTargetSum
|
||||
fairProp = nextBound
|
||||
}
|
||||
for idx, item := range classes {
|
||||
ans[idx] = math.Max(item.lowerBound, math.Min(item.upperBound, fairProp*relativeItems[idx].target))
|
||||
}
|
||||
return ans, fairProp, nil
|
||||
}
|
||||
9
vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go
generated
vendored
9
vendor/k8s.io/apiserver/pkg/util/flowcontrol/debug/dump.go
generated
vendored
@@ -25,10 +25,11 @@ import (
|
||||
|
||||
// QueueSetDump is an instant dump of queue-set.
|
||||
type QueueSetDump struct {
|
||||
Queues []QueueDump
|
||||
Waiting int
|
||||
Executing int
|
||||
SeatsInUse int
|
||||
Queues []QueueDump
|
||||
Waiting int
|
||||
Executing int
|
||||
SeatsInUse int
|
||||
SeatsWaiting int
|
||||
}
|
||||
|
||||
// QueueDump is an instant dump of one queue in a queue-set.
|
||||
|
||||
35
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go
generated
vendored
35
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/integrator.go
generated
vendored
@@ -21,6 +21,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics"
|
||||
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@@ -29,8 +31,7 @@ import (
|
||||
// Integrator is created, and ends at the latest operation on the
|
||||
// Integrator.
|
||||
type Integrator interface {
|
||||
Set(float64)
|
||||
Add(float64)
|
||||
fcmetrics.Gauge
|
||||
|
||||
GetResults() IntegratorResults
|
||||
|
||||
@@ -53,6 +54,7 @@ func (x *IntegratorResults) Equal(y *IntegratorResults) bool {
|
||||
}
|
||||
|
||||
type integrator struct {
|
||||
name string
|
||||
clock clock.PassiveClock
|
||||
sync.Mutex
|
||||
lastTime time.Time
|
||||
@@ -61,9 +63,10 @@ type integrator struct {
|
||||
min, max float64
|
||||
}
|
||||
|
||||
// NewIntegrator makes one that uses the given clock
|
||||
func NewIntegrator(clock clock.PassiveClock) Integrator {
|
||||
// NewNamedIntegrator makes one that uses the given clock and name
|
||||
func NewNamedIntegrator(clock clock.PassiveClock, name string) Integrator {
|
||||
return &integrator{
|
||||
name: name,
|
||||
clock: clock,
|
||||
lastTime: clock.Now(),
|
||||
}
|
||||
@@ -75,6 +78,24 @@ func (igr *integrator) Set(x float64) {
|
||||
igr.Unlock()
|
||||
}
|
||||
|
||||
func (igr *integrator) Add(deltaX float64) {
|
||||
igr.Lock()
|
||||
igr.setLocked(igr.x + deltaX)
|
||||
igr.Unlock()
|
||||
}
|
||||
|
||||
func (igr *integrator) Inc() {
|
||||
igr.Add(1)
|
||||
}
|
||||
|
||||
func (igr *integrator) Dec() {
|
||||
igr.Add(-1)
|
||||
}
|
||||
|
||||
func (igr *integrator) SetToCurrentTime() {
|
||||
igr.Set(float64(time.Now().UnixNano()))
|
||||
}
|
||||
|
||||
func (igr *integrator) setLocked(x float64) {
|
||||
igr.updateLocked()
|
||||
igr.x = x
|
||||
@@ -86,12 +107,6 @@ func (igr *integrator) setLocked(x float64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (igr *integrator) Add(deltaX float64) {
|
||||
igr.Lock()
|
||||
igr.setLocked(igr.x + deltaX)
|
||||
igr.Unlock()
|
||||
}
|
||||
|
||||
func (igr *integrator) updateLocked() {
|
||||
now := igr.clock.Now()
|
||||
dt := now.Sub(igr.lastTime).Seconds()
|
||||
|
||||
3
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
3
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
@@ -35,7 +35,8 @@ type QueueSetFactory interface {
|
||||
// The RatioedGaugePair observes number of requests,
|
||||
// execution covering just the regular phase.
|
||||
// The RatioedGauge observes number of seats occupied through all phases of execution.
|
||||
BeginConstruction(QueuingConfig, metrics.RatioedGaugePair, metrics.RatioedGauge) (QueueSetCompleter, error)
|
||||
// The Gauge observes the seat demand (executing + queued seats).
|
||||
BeginConstruction(QueuingConfig, metrics.RatioedGaugePair, metrics.RatioedGauge, metrics.Gauge) (QueueSetCompleter, error)
|
||||
}
|
||||
|
||||
// QueueSetCompleter finishes the two-step process of creating or
|
||||
|
||||
105
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
105
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
@@ -60,23 +60,32 @@ type promiseFactoryFactory func(*queueSet) promiseFactory
|
||||
// `*queueSetCompleter` implements QueueSetCompleter. Exactly one of
|
||||
// the fields `factory` and `theSet` is non-nil.
|
||||
type queueSetCompleter struct {
|
||||
factory *queueSetFactory
|
||||
reqsGaugePair metrics.RatioedGaugePair
|
||||
execSeatsGauge metrics.RatioedGauge
|
||||
theSet *queueSet
|
||||
qCfg fq.QueuingConfig
|
||||
dealer *shufflesharding.Dealer
|
||||
factory *queueSetFactory
|
||||
reqsGaugePair metrics.RatioedGaugePair
|
||||
execSeatsGauge metrics.RatioedGauge
|
||||
seatDemandIntegrator metrics.Gauge
|
||||
theSet *queueSet
|
||||
qCfg fq.QueuingConfig
|
||||
dealer *shufflesharding.Dealer
|
||||
}
|
||||
|
||||
// queueSet implements the Fair Queuing for Server Requests technique
|
||||
// described in this package's doc, and a pointer to one implements
|
||||
// the QueueSet interface. The fields listed before the lock
|
||||
// should not be changed; the fields listed after the
|
||||
// lock must be accessed only while holding the lock. The methods of
|
||||
// this type follow the naming convention that the suffix "Locked"
|
||||
// means the caller must hold the lock; for a method whose name does
|
||||
// not end in "Locked" either acquires the lock or does not care about
|
||||
// locking.
|
||||
// lock must be accessed only while holding the lock.
|
||||
//
|
||||
// The methods of this type follow the naming convention that the
|
||||
// suffix "Locked" means the caller must hold the lock; for a method
|
||||
// whose name does not end in "Locked" either acquires the lock or
|
||||
// does not care about locking.
|
||||
//
|
||||
// The methods of this type also follow the convention that the suffix
|
||||
// "ToBoundLocked" means that the caller may have to follow up with a
|
||||
// call to `boundNextDispatchLocked`. This is so for a method that
|
||||
// changes what request is oldest in a queue, because that change means
|
||||
// that the anti-windup hack in boundNextDispatchLocked needs to be
|
||||
// applied wrt the revised oldest request in the queue.
|
||||
type queueSet struct {
|
||||
clock eventclock.Interface
|
||||
estimatedServiceDuration time.Duration
|
||||
@@ -85,6 +94,8 @@ type queueSet struct {
|
||||
|
||||
execSeatsGauge metrics.RatioedGauge // for all phases of execution
|
||||
|
||||
seatDemandIntegrator metrics.Gauge
|
||||
|
||||
promiseFactory promiseFactory
|
||||
|
||||
lock sync.Mutex
|
||||
@@ -131,6 +142,10 @@ type queueSet struct {
|
||||
// request(s) that are currently executing in this queueset.
|
||||
totSeatsInUse int
|
||||
|
||||
// totSeatsWaiting is the sum, over all the waiting requests, of their
|
||||
// max width.
|
||||
totSeatsWaiting int
|
||||
|
||||
// enqueues is the number of requests that have ever been enqueued
|
||||
enqueues int
|
||||
}
|
||||
@@ -148,17 +163,18 @@ func newTestableQueueSetFactory(c eventclock.Interface, promiseFactoryFactory pr
|
||||
}
|
||||
}
|
||||
|
||||
func (qsf *queueSetFactory) BeginConstruction(qCfg fq.QueuingConfig, reqsGaugePair metrics.RatioedGaugePair, execSeatsGauge metrics.RatioedGauge) (fq.QueueSetCompleter, error) {
|
||||
func (qsf *queueSetFactory) BeginConstruction(qCfg fq.QueuingConfig, reqsGaugePair metrics.RatioedGaugePair, execSeatsGauge metrics.RatioedGauge, seatDemandIntegrator metrics.Gauge) (fq.QueueSetCompleter, error) {
|
||||
dealer, err := checkConfig(qCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &queueSetCompleter{
|
||||
factory: qsf,
|
||||
reqsGaugePair: reqsGaugePair,
|
||||
execSeatsGauge: execSeatsGauge,
|
||||
qCfg: qCfg,
|
||||
dealer: dealer}, nil
|
||||
factory: qsf,
|
||||
reqsGaugePair: reqsGaugePair,
|
||||
execSeatsGauge: execSeatsGauge,
|
||||
seatDemandIntegrator: seatDemandIntegrator,
|
||||
qCfg: qCfg,
|
||||
dealer: dealer}, nil
|
||||
}
|
||||
|
||||
// checkConfig returns a non-nil Dealer if the config is valid and
|
||||
@@ -183,6 +199,7 @@ func (qsc *queueSetCompleter) Complete(dCfg fq.DispatchingConfig) fq.QueueSet {
|
||||
estimatedServiceDuration: 3 * time.Millisecond,
|
||||
reqsGaugePair: qsc.reqsGaugePair,
|
||||
execSeatsGauge: qsc.execSeatsGauge,
|
||||
seatDemandIntegrator: qsc.seatDemandIntegrator,
|
||||
qCfg: qsc.qCfg,
|
||||
currentR: 0,
|
||||
lastRealTime: qsc.factory.clock.Now(),
|
||||
@@ -396,12 +413,16 @@ func (req *request) wait() (bool, bool) {
|
||||
// TODO(aaron-prindle) add metrics for this case
|
||||
klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
// remove the request from the queue as it has timed out
|
||||
queue := req.queue
|
||||
if req.removeFromQueueLocked() != nil {
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
qs.totRequestsWaiting--
|
||||
qs.totSeatsWaiting -= req.MaxSeats()
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "cancelled")
|
||||
metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1)
|
||||
req.NoteQueued(false)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(-1)
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
}
|
||||
return false, qs.isIdleLocked()
|
||||
}
|
||||
@@ -521,7 +542,7 @@ func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Conte
|
||||
queueIdx := qs.shuffleShardLocked(hashValue, descr1, descr2)
|
||||
queue := qs.queues[queueIdx]
|
||||
// The next step is the logic to reject requests that have been waiting too long
|
||||
qs.removeTimedOutRequestsFromQueueLocked(queue, fsName)
|
||||
qs.removeTimedOutRequestsFromQueueToBoundLocked(queue, fsName)
|
||||
// NOTE: currently timeout is only checked for each new request. This means that there can be
|
||||
// requests that are in the queue longer than the timeout if there are no new requests
|
||||
// We prefer the simplicity over the promptness, at least for now.
|
||||
@@ -543,7 +564,7 @@ func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Conte
|
||||
queueNoteFn: queueNoteFn,
|
||||
workEstimate: qs.completeWorkEstimate(workEstimate),
|
||||
}
|
||||
if ok := qs.rejectOrEnqueueLocked(req); !ok {
|
||||
if ok := qs.rejectOrEnqueueToBoundLocked(req); !ok {
|
||||
return nil
|
||||
}
|
||||
metrics.ObserveQueueLength(ctx, qs.qCfg.Name, fsName, queue.requests.Length())
|
||||
@@ -583,10 +604,11 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac
|
||||
return bestQueueIdx
|
||||
}
|
||||
|
||||
// removeTimedOutRequestsFromQueueLocked rejects old requests that have been enqueued
|
||||
// removeTimedOutRequestsFromQueueToBoundLocked rejects old requests that have been enqueued
|
||||
// past the requestWaitLimit
|
||||
func (qs *queueSet) removeTimedOutRequestsFromQueueLocked(queue *queue, fsName string) {
|
||||
func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, fsName string) {
|
||||
timeoutCount := 0
|
||||
disqueueSeats := 0
|
||||
now := qs.clock.Now()
|
||||
reqs := queue.requests
|
||||
// reqs are sorted oldest -> newest
|
||||
@@ -599,6 +621,7 @@ func (qs *queueSet) removeTimedOutRequestsFromQueueLocked(queue *queue, fsName s
|
||||
if arrivalLimit.After(req.arrivalTime) {
|
||||
if req.decision.Set(decisionReject) && req.removeFromQueueLocked() != nil {
|
||||
timeoutCount++
|
||||
disqueueSeats += req.MaxSeats()
|
||||
req.NoteQueued(false)
|
||||
metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1)
|
||||
}
|
||||
@@ -612,15 +635,17 @@ func (qs *queueSet) removeTimedOutRequestsFromQueueLocked(queue *queue, fsName s
|
||||
// remove timed out requests from queue
|
||||
if timeoutCount > 0 {
|
||||
qs.totRequestsWaiting -= timeoutCount
|
||||
qs.totSeatsWaiting -= disqueueSeats
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(float64(-timeoutCount))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
}
|
||||
}
|
||||
|
||||
// rejectOrEnqueueLocked rejects or enqueues the newly arrived
|
||||
// rejectOrEnqueueToBoundLocked rejects or enqueues the newly arrived
|
||||
// request, which has been assigned to a queue. If up against the
|
||||
// queue length limit and the concurrency limit then returns false.
|
||||
// Otherwise enqueues and returns true.
|
||||
func (qs *queueSet) rejectOrEnqueueLocked(request *request) bool {
|
||||
func (qs *queueSet) rejectOrEnqueueToBoundLocked(request *request) bool {
|
||||
queue := request.queue
|
||||
curQueueLength := queue.requests.Length()
|
||||
// rejects the newly arrived request if resource criteria not met
|
||||
@@ -629,12 +654,12 @@ func (qs *queueSet) rejectOrEnqueueLocked(request *request) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
qs.enqueueLocked(request)
|
||||
qs.enqueueToBoundLocked(request)
|
||||
return true
|
||||
}
|
||||
|
||||
// enqueues a request into its queue.
|
||||
func (qs *queueSet) enqueueLocked(request *request) {
|
||||
func (qs *queueSet) enqueueToBoundLocked(request *request) {
|
||||
queue := request.queue
|
||||
now := qs.clock.Now()
|
||||
if queue.requests.Length() == 0 && queue.requestsExecuting == 0 {
|
||||
@@ -647,9 +672,11 @@ func (qs *queueSet) enqueueLocked(request *request) {
|
||||
}
|
||||
request.removeFromQueueLocked = queue.requests.Enqueue(request)
|
||||
qs.totRequestsWaiting++
|
||||
qs.totSeatsWaiting += request.MaxSeats()
|
||||
metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, 1)
|
||||
request.NoteQueued(true)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(1)
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
}
|
||||
|
||||
// dispatchAsMuchAsPossibleLocked does as many dispatches as possible now.
|
||||
@@ -680,6 +707,7 @@ func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, workEstimate *f
|
||||
metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, fsName, req.MaxSeats())
|
||||
qs.reqsGaugePair.RequestsExecuting.Add(1)
|
||||
qs.execSeatsGauge.Add(float64(req.MaxSeats()))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
klogV := klog.V(5)
|
||||
if klogV.Enabled() {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: immediate dispatch of request %q %#+v %#+v, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, fsName, descr1, descr2, qs.totRequestsExecuting)
|
||||
@@ -693,7 +721,7 @@ func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, workEstimate *f
|
||||
// be false when either all queues are empty or the request at the head
|
||||
// of the next queue cannot be dispatched.
|
||||
func (qs *queueSet) dispatchLocked() bool {
|
||||
queue, request := qs.findDispatchQueueLocked()
|
||||
queue, request := qs.findDispatchQueueToBoundLocked()
|
||||
if queue == nil {
|
||||
return false
|
||||
}
|
||||
@@ -701,11 +729,13 @@ func (qs *queueSet) dispatchLocked() bool {
|
||||
return false
|
||||
}
|
||||
qs.totRequestsWaiting--
|
||||
qs.totSeatsWaiting -= request.MaxSeats()
|
||||
metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -1)
|
||||
request.NoteQueued(false)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(-1)
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
if !request.decision.Set(decisionExecute) {
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
return true
|
||||
}
|
||||
request.startTime = qs.clock.Now()
|
||||
@@ -722,6 +752,7 @@ func (qs *queueSet) dispatchLocked() bool {
|
||||
metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, request.fsName, request.MaxSeats())
|
||||
qs.reqsGaugePair.RequestsExecuting.Add(1)
|
||||
qs.execSeatsGauge.Add(float64(request.MaxSeats()))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
klogV := klog.V(6)
|
||||
if klogV.Enabled() {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: dispatching request %#+v %#+v work %v from queue %d with start R %v, queue will have %d waiting & %d requests occupying %d seats, set will have %d seats occupied",
|
||||
@@ -729,6 +760,11 @@ func (qs *queueSet) dispatchLocked() bool {
|
||||
request.workEstimate, queue.index, queue.nextDispatchR, queue.requests.Length(), queue.requestsExecuting, queue.seatsInUse, qs.totSeatsInUse)
|
||||
}
|
||||
// When a request is dequeued for service -> qs.virtualStart += G * width
|
||||
if request.totalWork() > rDecrement/100 { // A single increment should never be so big
|
||||
klog.Errorf("QS(%s) at t=%s R=%v: dispatching request %#+v %#+v with implausibly high work %v from queue %d with start R %v",
|
||||
qs.qCfg.Name, request.startTime.Format(nsTimeFmt), qs.currentR, request.descr1, request.descr2,
|
||||
request.workEstimate, queue.index, queue.nextDispatchR)
|
||||
}
|
||||
queue.nextDispatchR += request.totalWork()
|
||||
return true
|
||||
}
|
||||
@@ -756,11 +792,12 @@ func (qs *queueSet) canAccommodateSeatsLocked(seats int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// findDispatchQueueLocked examines the queues in round robin order and
|
||||
// findDispatchQueueToBoundLocked examines the queues in round robin order and
|
||||
// returns the first one of those for which the virtual finish time of
|
||||
// the oldest waiting request is minimal, and also returns that request.
|
||||
// Returns nils if the head of the selected queue can not be dispatched now.
|
||||
func (qs *queueSet) findDispatchQueueLocked() (*queue, *request) {
|
||||
// Returns nils if the head of the selected queue can not be dispatched now,
|
||||
// in which case the caller does not need to follow up with`qs.boundNextDispatchLocked`.
|
||||
func (qs *queueSet) findDispatchQueueToBoundLocked() (*queue, *request) {
|
||||
minVirtualFinish := fqrequest.MaxSeatSeconds
|
||||
sMin := fqrequest.MaxSeatSeconds
|
||||
dsMin := fqrequest.MaxSeatSeconds
|
||||
@@ -878,6 +915,7 @@ func (qs *queueSet) finishRequestLocked(r *request) {
|
||||
qs.totSeatsInUse -= r.MaxSeats()
|
||||
metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, r.fsName, -r.MaxSeats())
|
||||
qs.execSeatsGauge.Add(-float64(r.MaxSeats()))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
if r.queue != nil {
|
||||
r.queue.seatsInUse -= r.MaxSeats()
|
||||
}
|
||||
@@ -995,10 +1033,11 @@ func (qs *queueSet) Dump(includeRequestDetails bool) debug.QueueSetDump {
|
||||
qs.lock.Lock()
|
||||
defer qs.lock.Unlock()
|
||||
d := debug.QueueSetDump{
|
||||
Queues: make([]debug.QueueDump, len(qs.queues)),
|
||||
Waiting: qs.totRequestsWaiting,
|
||||
Executing: qs.totRequestsExecuting,
|
||||
SeatsInUse: qs.totSeatsInUse,
|
||||
Queues: make([]debug.QueueDump, len(qs.queues)),
|
||||
Waiting: qs.totRequestsWaiting,
|
||||
Executing: qs.totRequestsExecuting,
|
||||
SeatsInUse: qs.totSeatsInUse,
|
||||
SeatsWaiting: qs.totSeatsWaiting,
|
||||
}
|
||||
for i, q := range qs.queues {
|
||||
d.Queues[i] = q.dumpLocked(includeRequestDetails)
|
||||
|
||||
16
vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go
generated
vendored
16
vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go
generated
vendored
@@ -21,7 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta2"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
)
|
||||
@@ -93,7 +93,7 @@ func FmtPriorityLevelConfiguration(pl *flowcontrol.PriorityLevelConfiguration) s
|
||||
return "nil"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(fmt.Sprintf("&flowcontrolv1beta1.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ",
|
||||
buf.WriteString(fmt.Sprintf("&flowcontrolv1beta3.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ",
|
||||
pl.ObjectMeta))
|
||||
BufferPriorityLevelConfigurationSpec(&buf, &pl.Spec)
|
||||
buf.WriteString(fmt.Sprintf(", Status: %#+v}", pl.Status))
|
||||
@@ -111,9 +111,9 @@ func FmtPriorityLevelConfigurationSpec(plSpec *flowcontrol.PriorityLevelConfigur
|
||||
// BufferPriorityLevelConfigurationSpec writes a golang source
|
||||
// expression for the given value to the given buffer
|
||||
func BufferPriorityLevelConfigurationSpec(buf *bytes.Buffer, plSpec *flowcontrol.PriorityLevelConfigurationSpec) {
|
||||
buf.WriteString(fmt.Sprintf("flowcontrolv1beta1.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type))
|
||||
buf.WriteString(fmt.Sprintf("flowcontrolv1beta3.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type))
|
||||
if plSpec.Limited != nil {
|
||||
buf.WriteString(fmt.Sprintf(", Limited: &flowcontrol.LimitedPriorityLevelConfiguration{AssuredConcurrencyShares:%d, LimitResponse:flowcontrol.LimitResponse{Type:%#v", plSpec.Limited.AssuredConcurrencyShares, plSpec.Limited.LimitResponse.Type))
|
||||
buf.WriteString(fmt.Sprintf(", Limited: &flowcontrol.LimitedPriorityLevelConfiguration{NominalConcurrencyShares:%d, LimitResponse:flowcontrol.LimitResponse{Type:%#v", plSpec.Limited.NominalConcurrencyShares, plSpec.Limited.LimitResponse.Type))
|
||||
if plSpec.Limited.LimitResponse.Queuing != nil {
|
||||
buf.WriteString(fmt.Sprintf(", Queuing:&%#+v", *plSpec.Limited.LimitResponse.Queuing))
|
||||
}
|
||||
@@ -128,7 +128,7 @@ func FmtFlowSchema(fs *flowcontrol.FlowSchema) string {
|
||||
return "nil"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(fmt.Sprintf("&flowcontrolv1beta1.FlowSchema{ObjectMeta: %#+v, Spec: ",
|
||||
buf.WriteString(fmt.Sprintf("&flowcontrolv1beta3.FlowSchema{ObjectMeta: %#+v, Spec: ",
|
||||
fs.ObjectMeta))
|
||||
BufferFlowSchemaSpec(&buf, &fs.Spec)
|
||||
buf.WriteString(fmt.Sprintf(", Status: %#+v}", fs.Status))
|
||||
@@ -146,7 +146,7 @@ func FmtFlowSchemaSpec(fsSpec *flowcontrol.FlowSchemaSpec) string {
|
||||
// BufferFlowSchemaSpec writes a golang source expression for the
|
||||
// given value to the given buffer
|
||||
func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec) {
|
||||
buf.WriteString(fmt.Sprintf("flowcontrolv1beta1.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ",
|
||||
buf.WriteString(fmt.Sprintf("flowcontrolv1beta3.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ",
|
||||
fsSpec.PriorityLevelConfiguration,
|
||||
fsSpec.MatchingPrecedence))
|
||||
if fsSpec.DistinguisherMethod == nil {
|
||||
@@ -166,7 +166,7 @@ func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec)
|
||||
|
||||
// FmtPolicyRulesWithSubjects produces a golang source expression of the value.
|
||||
func FmtPolicyRulesWithSubjects(rule flowcontrol.PolicyRulesWithSubjects) string {
|
||||
return "flowcontrolv1beta1.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule)
|
||||
return "flowcontrolv1beta3.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule)
|
||||
}
|
||||
|
||||
// FmtPolicyRulesWithSubjectsSlim produces a golang source expression
|
||||
@@ -182,7 +182,7 @@ func FmtPolicyRulesWithSubjectsSlim(rule flowcontrol.PolicyRulesWithSubjects) st
|
||||
// expression for the given value to the given buffer but excludes the
|
||||
// leading type name
|
||||
func BufferFmtPolicyRulesWithSubjectsSlim(buf *bytes.Buffer, rule flowcontrol.PolicyRulesWithSubjects) {
|
||||
buf.WriteString("{Subjects: []flowcontrolv1beta1.Subject{")
|
||||
buf.WriteString("{Subjects: []flowcontrolv1beta3.Subject{")
|
||||
for jdx, subj := range rule.Subjects {
|
||||
if jdx > 0 {
|
||||
buf.WriteString(", ")
|
||||
|
||||
159
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
generated
vendored
159
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
generated
vendored
@@ -65,11 +65,12 @@ type resettable interface {
|
||||
Reset()
|
||||
}
|
||||
|
||||
// Reset all metrics to zero
|
||||
// Reset all resettable metrics to zero
|
||||
func Reset() {
|
||||
for _, metric := range metrics {
|
||||
rm := metric.(resettable)
|
||||
rm.Reset()
|
||||
if rm, ok := metric.(resettable); ok {
|
||||
rm.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -316,6 +317,120 @@ var (
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
apiserverNominalConcurrencyLimits = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "nominal_limit_seats",
|
||||
Help: "Nominal number of execution seats configured for each priority level",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverMinimumConcurrencyLimits = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "lower_limit_seats",
|
||||
Help: "Configured lower bound on number of execution seats available to each priority level",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverMaximumConcurrencyLimits = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "upper_limit_seats",
|
||||
Help: "Configured upper bound on number of execution seats available to each priority level",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
ApiserverSeatDemands = NewTimingRatioHistogramVec(
|
||||
&compbasemetrics.TimingHistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "demand_seats",
|
||||
Help: "Observations, at the end of every nanosecond, of (the number of seats each priority level could use) / (nominal number of seats for that level)",
|
||||
// Rationale for the bucket boundaries:
|
||||
// For 0--1, evenly spaced and not too many;
|
||||
// For 1--2, roughly powers of sqrt(sqrt(2));
|
||||
// For 2--6, roughly powers of sqrt(2);
|
||||
// We need coverage over 1, but do not want too many buckets.
|
||||
Buckets: []float64{0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.7, 2, 2.8, 4, 6},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
priorityLevel,
|
||||
)
|
||||
apiserverSeatDemandHighWatermarks = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "demand_seats_high_watermark",
|
||||
Help: "High watermark, over last adjustment period, of demand_seats",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverSeatDemandAverages = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "demand_seats_average",
|
||||
Help: "Time-weighted average, over last adjustment period, of demand_seats",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverSeatDemandStandardDeviations = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "demand_seats_stdev",
|
||||
Help: "Time-weighted standard deviation, over last adjustment period, of demand_seats",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverSeatDemandSmootheds = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "demand_seats_smoothed",
|
||||
Help: "Smoothed seat demands",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverSeatDemandTargets = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "target_seats",
|
||||
Help: "Seat allocation targets",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
apiserverFairFracs = compbasemetrics.NewGauge(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "seat_fair_frac",
|
||||
Help: "Fair fraction of server's concurrency to allocate to each priority level that can use it",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
})
|
||||
apiserverCurrentConcurrencyLimits = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "current_limit_seats",
|
||||
Help: "current derived number of execution seats available to each priority level",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel},
|
||||
)
|
||||
|
||||
metrics = Registerables{
|
||||
apiserverRejectedRequestsTotal,
|
||||
@@ -336,10 +451,21 @@ var (
|
||||
apiserverEpochAdvances,
|
||||
apiserverWorkEstimatedSeats,
|
||||
apiserverDispatchWithNoAccommodation,
|
||||
apiserverNominalConcurrencyLimits,
|
||||
apiserverMinimumConcurrencyLimits,
|
||||
apiserverMaximumConcurrencyLimits,
|
||||
apiserverSeatDemandHighWatermarks,
|
||||
apiserverSeatDemandAverages,
|
||||
apiserverSeatDemandStandardDeviations,
|
||||
apiserverSeatDemandSmootheds,
|
||||
apiserverSeatDemandTargets,
|
||||
apiserverFairFracs,
|
||||
apiserverCurrentConcurrencyLimits,
|
||||
}.
|
||||
Append(PriorityLevelExecutionSeatsGaugeVec.metrics()...).
|
||||
Append(PriorityLevelConcurrencyGaugeVec.metrics()...).
|
||||
Append(readWriteConcurrencyGaugeVec.metrics()...)
|
||||
Append(readWriteConcurrencyGaugeVec.metrics()...).
|
||||
Append(ApiserverSeatDemands.metrics()...)
|
||||
)
|
||||
|
||||
type indexOnce struct {
|
||||
@@ -403,11 +529,6 @@ func AddRequestConcurrencyInUse(priorityLevel, flowSchema string, delta int) {
|
||||
apiserverRequestConcurrencyInUse.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
}
|
||||
|
||||
// UpdateSharedConcurrencyLimit updates the value for the concurrency limit in flow control
|
||||
func UpdateSharedConcurrencyLimit(priorityLevel string, limit int) {
|
||||
apiserverRequestConcurrencyLimit.WithLabelValues(priorityLevel).Set(float64(limit))
|
||||
}
|
||||
|
||||
// AddReject increments the # of rejected requests for flow control
|
||||
func AddReject(ctx context.Context, priorityLevel, flowSchema, reason string) {
|
||||
apiserverRejectedRequestsTotal.WithContext(ctx).WithLabelValues(priorityLevel, flowSchema, reason).Add(1)
|
||||
@@ -457,3 +578,23 @@ func ObserveWorkEstimatedSeats(priorityLevel, flowSchema string, seats int) {
|
||||
func AddDispatchWithNoAccommodation(priorityLevel, flowSchema string) {
|
||||
apiserverDispatchWithNoAccommodation.WithLabelValues(priorityLevel, flowSchema).Inc()
|
||||
}
|
||||
|
||||
func SetPriorityLevelConfiguration(priorityLevel string, nominalCL, minCL, maxCL int) {
|
||||
apiserverRequestConcurrencyLimit.WithLabelValues(priorityLevel).Set(float64(nominalCL))
|
||||
apiserverNominalConcurrencyLimits.WithLabelValues(priorityLevel).Set(float64(nominalCL))
|
||||
apiserverMinimumConcurrencyLimits.WithLabelValues(priorityLevel).Set(float64(minCL))
|
||||
apiserverMaximumConcurrencyLimits.WithLabelValues(priorityLevel).Set(float64(maxCL))
|
||||
}
|
||||
|
||||
func NotePriorityLevelConcurrencyAdjustment(priorityLevel string, seatDemandHWM, seatDemandAvg, seatDemandStdev, seatDemandSmoothed, seatDemandTarget float64, currentCL int) {
|
||||
apiserverSeatDemandHighWatermarks.WithLabelValues(priorityLevel).Set(seatDemandHWM)
|
||||
apiserverSeatDemandAverages.WithLabelValues(priorityLevel).Set(seatDemandAvg)
|
||||
apiserverSeatDemandStandardDeviations.WithLabelValues(priorityLevel).Set(seatDemandStdev)
|
||||
apiserverSeatDemandSmootheds.WithLabelValues(priorityLevel).Set(seatDemandSmoothed)
|
||||
apiserverSeatDemandTargets.WithLabelValues(priorityLevel).Set(seatDemandTarget)
|
||||
apiserverCurrentConcurrencyLimits.WithLabelValues(priorityLevel).Set(float64(currentCL))
|
||||
}
|
||||
|
||||
func SetFairFrac(fairFrac float64) {
|
||||
apiserverFairFracs.Set(fairFrac)
|
||||
}
|
||||
|
||||
56
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/union_gauge.go
generated
vendored
Normal file
56
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/union_gauge.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
type unionGauge []Gauge
|
||||
|
||||
var _ Gauge = unionGauge(nil)
|
||||
|
||||
// NewUnionGauge constructs a Gauge that delegates to all of the given Gauges
|
||||
func NewUnionGauge(elts ...Gauge) Gauge {
|
||||
return unionGauge(elts)
|
||||
}
|
||||
|
||||
func (ug unionGauge) Set(x float64) {
|
||||
for _, gauge := range ug {
|
||||
gauge.Set(x)
|
||||
}
|
||||
}
|
||||
|
||||
func (ug unionGauge) Add(x float64) {
|
||||
for _, gauge := range ug {
|
||||
gauge.Add(x)
|
||||
}
|
||||
}
|
||||
|
||||
func (ug unionGauge) Inc() {
|
||||
for _, gauge := range ug {
|
||||
gauge.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func (ug unionGauge) Dec() {
|
||||
for _, gauge := range ug {
|
||||
gauge.Dec()
|
||||
}
|
||||
}
|
||||
|
||||
func (ug unionGauge) SetToCurrentTime() {
|
||||
for _, gauge := range ug {
|
||||
gauge.SetToCurrentTime()
|
||||
}
|
||||
}
|
||||
18
vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/mutating_work_estimator.go
generated
vendored
18
vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/mutating_work_estimator.go
generated
vendored
@@ -57,6 +57,15 @@ func (e *mutatingWorkEstimator) estimate(r *http.Request, flowSchemaName, priori
|
||||
AdditionalLatency: e.config.eventAdditionalDuration(),
|
||||
}
|
||||
}
|
||||
|
||||
if isRequestExemptFromWatchEvents(requestInfo) {
|
||||
return WorkEstimate{
|
||||
InitialSeats: e.config.MinimumSeats,
|
||||
FinalSeats: 0,
|
||||
AdditionalLatency: time.Duration(0),
|
||||
}
|
||||
}
|
||||
|
||||
watchCount := e.countFn(requestInfo)
|
||||
metrics.ObserveWatchCount(r.Context(), priorityLevelName, flowSchemaName, watchCount)
|
||||
|
||||
@@ -129,3 +138,12 @@ func (e *mutatingWorkEstimator) estimate(r *http.Request, flowSchemaName, priori
|
||||
AdditionalLatency: additionalLatency,
|
||||
}
|
||||
}
|
||||
|
||||
func isRequestExemptFromWatchEvents(requestInfo *apirequest.RequestInfo) bool {
|
||||
// Creating token for service account does not produce any event,
|
||||
// but still serviceaccounts can have multiple watchers.
|
||||
if requestInfo.Resource == "serviceaccounts" && requestInfo.Subresource == "token" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
2
vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go
generated
vendored
@@ -19,7 +19,7 @@ package flowcontrol
|
||||
import (
|
||||
"strings"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta2"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
|
||||
2
vendor/k8s.io/apiserver/pkg/util/webhook/client.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/util/webhook/client.go
generated
vendored
@@ -141,7 +141,7 @@ func (cm *ClientManager) HookClient(cc ClientConfig) (*rest.RESTClient, error) {
|
||||
|
||||
// Use http/1.1 instead of http/2.
|
||||
// This is a workaround for http/2-enabled clients not load-balancing concurrent requests to multiple backends.
|
||||
// See http://issue.k8s.io/75791 for details.
|
||||
// See https://issue.k8s.io/75791 for details.
|
||||
cfg.NextProtos = []string{"http/1.1"}
|
||||
|
||||
cfg.ContentConfig.NegotiatedSerializer = cm.negotiatedSerializer
|
||||
|
||||
2
vendor/k8s.io/apiserver/pkg/util/webhook/validation.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/util/webhook/validation.go
generated
vendored
@@ -36,7 +36,7 @@ func ValidateWebhookURL(fldPath *field.Path, URL string, forceHttps bool) field.
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, u.Scheme, "'https' is the only allowed URL scheme"+form))
|
||||
}
|
||||
if len(u.Host) == 0 {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, u.Host, "host must be provided"+form))
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, u.Host, "host must be specified"+form))
|
||||
}
|
||||
if u.User != nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, u.User.String(), "user information is not permitted in the URL"))
|
||||
|
||||
Reference in New Issue
Block a user