Bump sigs.k8s.io/controller-runtime to v0.14.4 (#5507)
* Bump sigs.k8s.io/controller-runtime to v0.14.4 * Update gofmt
This commit is contained in:
294
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
294
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
@@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -34,7 +33,6 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
apitypes "k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -52,9 +50,10 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta2"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2"
|
||||
flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta2"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
flowcontrolapplyconfiguration "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3"
|
||||
flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta3"
|
||||
)
|
||||
|
||||
const timeFmt = "2006-01-02T15:04:05.999"
|
||||
@@ -68,6 +67,22 @@ const timeFmt = "2006-01-02T15:04:05.999"
|
||||
// undesired becomes completely unused, all the config objects are
|
||||
// read and processed as a whole.
|
||||
|
||||
const (
|
||||
// Borrowing among priority levels will be accomplished by periodically
|
||||
// adjusting the current concurrency limits (CurrentCLs);
|
||||
// borrowingAdjustmentPeriod is that period.
|
||||
borrowingAdjustmentPeriod = 10 * time.Second
|
||||
|
||||
// The input to the seat borrowing is smoothed seat demand figures.
|
||||
// This constant controls the decay rate of that smoothing,
|
||||
// as described in the comment on the `seatDemandStats` field of `priorityLevelState`.
|
||||
// The particular number appearing here has the property that half-life
|
||||
// of that decay is 5 minutes.
|
||||
// This is a very preliminary guess at a good value and is likely to be tweaked
|
||||
// once we get some experience with borrowing.
|
||||
seatDemandSmoothingCoefficient = 0.977
|
||||
)
|
||||
|
||||
// The funcs in this package follow the naming convention that the suffix
|
||||
// "Locked" means the relevant mutex must be locked at the start of each
|
||||
// call and will be locked upon return. For a configController, the
|
||||
@@ -123,7 +138,7 @@ type configController struct {
|
||||
fsLister flowcontrollister.FlowSchemaLister
|
||||
fsInformerSynced cache.InformerSynced
|
||||
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1beta2Interface
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface
|
||||
|
||||
// serverConcurrencyLimit is the limit on the server's total
|
||||
// number of non-exempt requests being served at once. This comes
|
||||
@@ -162,6 +177,12 @@ type configController struct {
|
||||
// name to the state for that level. Every name referenced from a
|
||||
// member of `flowSchemas` has an entry here.
|
||||
priorityLevelStates map[string]*priorityLevelState
|
||||
|
||||
// nominalCLSum is the sum of the nominalCL fields in the priorityLevelState records.
|
||||
// This can exceed serverConcurrencyLimit because of the deliberate rounding up
|
||||
// in the computation of the nominalCL values.
|
||||
// This is tracked because it is an input to the allocation adjustment algorithm.
|
||||
nominalCLSum int
|
||||
}
|
||||
|
||||
type updateAttempt struct {
|
||||
@@ -197,6 +218,46 @@ type priorityLevelState struct {
|
||||
|
||||
// Observer of number of seats occupied throughout execution
|
||||
execSeatsObs metrics.RatioedGauge
|
||||
|
||||
// Integrator of seat demand, reset every CurrentCL adjustment period
|
||||
seatDemandIntegrator fq.Integrator
|
||||
|
||||
// Gauge of seat demand / nominalCL
|
||||
seatDemandRatioedGauge metrics.RatioedGauge
|
||||
|
||||
// seatDemandStats is derived from periodically examining the seatDemandIntegrator.
|
||||
// The average, standard deviation, and high watermark come directly from the integrator.
|
||||
// envelope = avg + stdDev.
|
||||
// Periodically smoothed gets replaced with `max(envelope, A*smoothed + (1-A)*envelope)`,
|
||||
// where A is seatDemandSmoothingCoefficient.
|
||||
seatDemandStats seatDemandStats
|
||||
|
||||
// nominalCL is the nominal concurrency limit configured in the PriorityLevelConfiguration
|
||||
nominalCL int
|
||||
|
||||
// minCL is the nominal limit less the lendable amount
|
||||
minCL int
|
||||
|
||||
//maxCL is the nominal limit plus the amount that may be borrowed
|
||||
maxCL int
|
||||
|
||||
// currentCL is the dynamically derived concurrency limit to impose for now
|
||||
currentCL int
|
||||
}
|
||||
|
||||
type seatDemandStats struct {
|
||||
avg float64
|
||||
stdDev float64
|
||||
highWatermark float64
|
||||
smoothed float64
|
||||
}
|
||||
|
||||
func (stats *seatDemandStats) update(obs fq.IntegratorResults) {
|
||||
stats.avg = obs.Average
|
||||
stats.stdDev = obs.Deviation
|
||||
stats.highWatermark = obs.Max
|
||||
envelope := obs.Average + obs.Deviation
|
||||
stats.smoothed = math.Max(envelope, seatDemandSmoothingCoefficient*stats.smoothed+(1-seatDemandSmoothingCoefficient)*envelope)
|
||||
}
|
||||
|
||||
// NewTestableController is extra flexible to facilitate testing
|
||||
@@ -221,7 +282,7 @@ func newTestableController(config TestableConfig) *configController {
|
||||
cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue")
|
||||
// ensure the data structure reflects the mandatory config
|
||||
cfgCtlr.lockAndDigestConfigObjects(nil, nil)
|
||||
fci := config.InformerFactory.Flowcontrol().V1beta2()
|
||||
fci := config.InformerFactory.Flowcontrol().V1beta3()
|
||||
pli := fci.PriorityLevelConfigurations()
|
||||
fsi := fci.FlowSchemas()
|
||||
cfgCtlr.plLister = pli.Lister()
|
||||
@@ -305,11 +366,87 @@ func (cfgCtlr *configController) Run(stopCh <-chan struct{}) error {
|
||||
klog.Info("Running API Priority and Fairness config worker")
|
||||
go wait.Until(cfgCtlr.runWorker, time.Second, stopCh)
|
||||
|
||||
klog.Info("Running API Priority and Fairness periodic rebalancing process")
|
||||
go wait.Until(cfgCtlr.updateBorrowing, borrowingAdjustmentPeriod, stopCh)
|
||||
|
||||
<-stopCh
|
||||
klog.Info("Shutting down API Priority and Fairness config worker")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfgCtlr *configController) updateBorrowing() {
|
||||
cfgCtlr.lock.Lock()
|
||||
defer cfgCtlr.lock.Unlock()
|
||||
cfgCtlr.updateBorrowingLocked(true, cfgCtlr.priorityLevelStates)
|
||||
}
|
||||
|
||||
func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plStates map[string]*priorityLevelState) {
|
||||
items := make([]allocProblemItem, 0, len(plStates))
|
||||
plNames := make([]string, 0, len(plStates))
|
||||
for plName, plState := range plStates {
|
||||
if plState.pl.Spec.Limited == nil {
|
||||
continue
|
||||
}
|
||||
obs := plState.seatDemandIntegrator.Reset()
|
||||
plState.seatDemandStats.update(obs)
|
||||
// Lower bound on this priority level's adjusted concurreny limit is the lesser of:
|
||||
// - its seat demamd high watermark over the last adjustment period, and
|
||||
// - its configured concurrency limit.
|
||||
// BUT: we do not want this to be lower than the lower bound from configuration.
|
||||
// See KEP-1040 for a more detailed explanation.
|
||||
minCurrentCL := math.Max(float64(plState.minCL), math.Min(float64(plState.nominalCL), plState.seatDemandStats.highWatermark))
|
||||
plNames = append(plNames, plName)
|
||||
items = append(items, allocProblemItem{
|
||||
lowerBound: minCurrentCL,
|
||||
upperBound: float64(plState.maxCL),
|
||||
target: math.Max(minCurrentCL, plState.seatDemandStats.smoothed),
|
||||
})
|
||||
}
|
||||
if len(items) == 0 && cfgCtlr.nominalCLSum > 0 {
|
||||
klog.ErrorS(nil, "Impossible: no non-exempt priority levels", "plStates", cfgCtlr.priorityLevelStates)
|
||||
return
|
||||
}
|
||||
allocs, fairFrac, err := computeConcurrencyAllocation(cfgCtlr.nominalCLSum, items)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to derive new concurrency limits", "plNames", plNames, "items", items)
|
||||
allocs = make([]float64, len(items))
|
||||
for idx, plName := range plNames {
|
||||
plState := plStates[plName]
|
||||
if plState.pl.Spec.Limited == nil {
|
||||
continue
|
||||
}
|
||||
allocs[idx] = float64(plState.currentCL)
|
||||
}
|
||||
}
|
||||
for idx, plName := range plNames {
|
||||
plState := plStates[plName]
|
||||
if plState.pl.Spec.Limited == nil {
|
||||
continue
|
||||
}
|
||||
if setCompleters {
|
||||
qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues,
|
||||
plState.pl, cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Inconceivable! Configuration error in existing priority level", "pl", plState.pl)
|
||||
continue
|
||||
}
|
||||
plState.qsCompleter = qsCompleter
|
||||
}
|
||||
currentCL := int(math.Round(float64(allocs[idx])))
|
||||
relChange := relDiff(float64(currentCL), float64(plState.currentCL))
|
||||
plState.currentCL = currentCL
|
||||
metrics.NotePriorityLevelConcurrencyAdjustment(plState.pl.Name, plState.seatDemandStats.highWatermark, plState.seatDemandStats.avg, plState.seatDemandStats.stdDev, plState.seatDemandStats.smoothed, float64(items[idx].target), currentCL)
|
||||
logLevel := klog.Level(4)
|
||||
if relChange >= 0.05 {
|
||||
logLevel = 2
|
||||
}
|
||||
klog.V(logLevel).InfoS("Update CurrentCL", "plName", plName, "seatDemandHighWatermark", plState.seatDemandStats.highWatermark, "seatDemandAvg", plState.seatDemandStats.avg, "seatDemandStdev", plState.seatDemandStats.stdDev, "seatDemandSmoothed", plState.seatDemandStats.smoothed, "fairFrac", fairFrac, "currentCL", currentCL, "backstop", err != nil)
|
||||
plState.queues = plState.qsCompleter.Complete(fq.DispatchingConfig{ConcurrencyLimit: currentCL})
|
||||
}
|
||||
metrics.SetFairFrac(float64(fairFrac))
|
||||
}
|
||||
|
||||
// runWorker is the logic of the one and only worker goroutine. We
|
||||
// limit the number to one in order to obviate explicit
|
||||
// synchronization around access to `cfgCtlr.mostRecentUpdates`.
|
||||
@@ -420,19 +557,12 @@ func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.Prior
|
||||
|
||||
// if we are going to issue an update, be sure we track every name we update so we know if we update it too often.
|
||||
currResult.updatedItems.Insert(fsu.flowSchema.Name)
|
||||
patchBytes, err := makeFlowSchemaConditionPatch(fsu.condition)
|
||||
if err != nil {
|
||||
// should never happen because these conditions are created here and well formed
|
||||
panic(fmt.Sprintf("Failed to json.Marshall(%#+v): %s", fsu.condition, err.Error()))
|
||||
}
|
||||
if klogV := klog.V(4); klogV.Enabled() {
|
||||
klogV.Infof("%s writing Condition %s to FlowSchema %s, which had ResourceVersion=%s, because its previous value was %s, diff: %s",
|
||||
cfgCtlr.name, fsu.condition, fsu.flowSchema.Name, fsu.flowSchema.ResourceVersion, fcfmt.Fmt(fsu.oldValue), cmp.Diff(fsu.oldValue, fsu.condition))
|
||||
}
|
||||
fsIfc := cfgCtlr.flowcontrolClient.FlowSchemas()
|
||||
patchOptions := metav1.PatchOptions{FieldManager: cfgCtlr.asFieldManager}
|
||||
_, err = fsIfc.Patch(context.TODO(), fsu.flowSchema.Name, apitypes.StrategicMergePatchType, patchBytes, patchOptions, "status")
|
||||
if err != nil {
|
||||
|
||||
if err := apply(cfgCtlr.flowcontrolClient.FlowSchemas(), fsu, cfgCtlr.asFieldManager); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// This object has been deleted. A notification is coming
|
||||
// and nothing more needs to be done here.
|
||||
@@ -447,18 +577,27 @@ func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.Prior
|
||||
return suggestedDelay, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// makeFlowSchemaConditionPatch takes in a condition and returns the patch status as a json.
|
||||
func makeFlowSchemaConditionPatch(condition flowcontrol.FlowSchemaCondition) ([]byte, error) {
|
||||
o := struct {
|
||||
Status flowcontrol.FlowSchemaStatus `json:"status"`
|
||||
}{
|
||||
Status: flowcontrol.FlowSchemaStatus{
|
||||
Conditions: []flowcontrol.FlowSchemaCondition{
|
||||
condition,
|
||||
},
|
||||
},
|
||||
}
|
||||
return json.Marshal(o)
|
||||
func apply(client flowcontrolclient.FlowSchemaInterface, fsu fsStatusUpdate, asFieldManager string) error {
|
||||
applyOptions := metav1.ApplyOptions{FieldManager: asFieldManager, Force: true}
|
||||
|
||||
// the condition field in fsStatusUpdate holds the new condition we want to update.
|
||||
// TODO: this will break when we have multiple conditions for a flowschema
|
||||
_, err := client.ApplyStatus(context.TODO(), toFlowSchemaApplyConfiguration(fsu), applyOptions)
|
||||
return err
|
||||
}
|
||||
|
||||
func toFlowSchemaApplyConfiguration(fsUpdate fsStatusUpdate) *flowcontrolapplyconfiguration.FlowSchemaApplyConfiguration {
|
||||
condition := flowcontrolapplyconfiguration.FlowSchemaCondition().
|
||||
WithType(fsUpdate.condition.Type).
|
||||
WithStatus(fsUpdate.condition.Status).
|
||||
WithReason(fsUpdate.condition.Reason).
|
||||
WithLastTransitionTime(fsUpdate.condition.LastTransitionTime).
|
||||
WithMessage(fsUpdate.condition.Message)
|
||||
|
||||
return flowcontrolapplyconfiguration.FlowSchema(fsUpdate.flowSchema.Name).
|
||||
WithStatus(flowcontrolapplyconfiguration.FlowSchemaStatus().
|
||||
WithConditions(condition),
|
||||
)
|
||||
}
|
||||
|
||||
// shouldDelayUpdate checks to see if a flowschema has been updated too often and returns true if a delay is needed.
|
||||
@@ -530,9 +669,16 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi
|
||||
state := meal.cfgCtlr.priorityLevelStates[pl.Name]
|
||||
if state == nil {
|
||||
labelValues := []string{pl.Name}
|
||||
state = &priorityLevelState{reqsGaugePair: metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues), execSeatsObs: meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues)}
|
||||
state = &priorityLevelState{
|
||||
reqsGaugePair: metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues),
|
||||
execSeatsObs: meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues),
|
||||
seatDemandIntegrator: fq.NewNamedIntegrator(meal.cfgCtlr.clock, pl.Name),
|
||||
seatDemandRatioedGauge: metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{pl.Name}),
|
||||
}
|
||||
}
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs)
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues,
|
||||
pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs,
|
||||
metrics.NewUnionGauge(state.seatDemandIntegrator, state.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err)
|
||||
continue
|
||||
@@ -545,7 +691,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi
|
||||
state.quiescing = false
|
||||
}
|
||||
if state.pl.Spec.Limited != nil {
|
||||
meal.shareSum += float64(state.pl.Spec.Limited.AssuredConcurrencyShares)
|
||||
meal.shareSum += float64(state.pl.Spec.Limited.NominalConcurrencyShares)
|
||||
}
|
||||
meal.haveExemptPL = meal.haveExemptPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameExempt
|
||||
meal.haveCatchAllPL = meal.haveCatchAllPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameCatchAll
|
||||
@@ -636,7 +782,9 @@ func (meal *cfgMeal) processOldPLsLocked() {
|
||||
}
|
||||
}
|
||||
var err error
|
||||
plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs)
|
||||
plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues,
|
||||
plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
// This can not happen because queueSetCompleterForPL already approved this config
|
||||
panic(fmt.Sprintf("%s from name=%q spec=%s", err, plName, fcfmt.Fmt(plState.pl.Spec)))
|
||||
@@ -648,7 +796,7 @@ func (meal *cfgMeal) processOldPLsLocked() {
|
||||
// priority level continues to get a concurrency
|
||||
// allocation determined by all the share values in the
|
||||
// regular way.
|
||||
meal.shareSum += float64(plState.pl.Spec.Limited.AssuredConcurrencyShares)
|
||||
meal.shareSum += float64(plState.pl.Spec.Limited.NominalConcurrencyShares)
|
||||
}
|
||||
meal.haveExemptPL = meal.haveExemptPL || plName == flowcontrol.PriorityLevelConfigurationNameExempt
|
||||
meal.haveCatchAllPL = meal.haveCatchAllPL || plName == flowcontrol.PriorityLevelConfigurationNameCatchAll
|
||||
@@ -666,32 +814,55 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() {
|
||||
continue
|
||||
}
|
||||
|
||||
limited := plState.pl.Spec.Limited
|
||||
// The use of math.Ceil here means that the results might sum
|
||||
// to a little more than serverConcurrencyLimit but the
|
||||
// difference will be negligible.
|
||||
concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(plState.pl.Spec.Limited.AssuredConcurrencyShares) / meal.shareSum))
|
||||
metrics.UpdateSharedConcurrencyLimit(plName, concurrencyLimit)
|
||||
concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(limited.NominalConcurrencyShares) / meal.shareSum))
|
||||
var lendableCL, borrowingCL int
|
||||
if limited.LendablePercent != nil {
|
||||
lendableCL = int(math.Round(float64(concurrencyLimit) * float64(*limited.LendablePercent) / 100))
|
||||
}
|
||||
if limited.BorrowingLimitPercent != nil {
|
||||
borrowingCL = int(math.Round(float64(concurrencyLimit) * float64(*limited.BorrowingLimitPercent) / 100))
|
||||
} else {
|
||||
borrowingCL = meal.cfgCtlr.serverConcurrencyLimit
|
||||
}
|
||||
metrics.SetPriorityLevelConfiguration(plName, concurrencyLimit, concurrencyLimit-lendableCL, concurrencyLimit+borrowingCL)
|
||||
plState.seatDemandRatioedGauge.SetDenominator(float64(concurrencyLimit))
|
||||
cfgChanged := plState.nominalCL != concurrencyLimit || plState.minCL != concurrencyLimit-lendableCL || plState.maxCL != concurrencyLimit+borrowingCL
|
||||
plState.nominalCL = concurrencyLimit
|
||||
plState.minCL = concurrencyLimit - lendableCL
|
||||
plState.maxCL = concurrencyLimit + borrowingCL
|
||||
meal.maxExecutingRequests += concurrencyLimit
|
||||
var waitLimit int
|
||||
if qCfg := plState.pl.Spec.Limited.LimitResponse.Queuing; qCfg != nil {
|
||||
if qCfg := limited.LimitResponse.Queuing; qCfg != nil {
|
||||
waitLimit = int(qCfg.Queues * qCfg.QueueLengthLimit)
|
||||
}
|
||||
meal.maxWaitingRequests += waitLimit
|
||||
|
||||
if plState.queues == nil {
|
||||
klog.V(5).Infof("Introducing queues for priority level %q: config=%s, concurrencyLimit=%d, quiescing=%v (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, plState.quiescing, plState.pl.Spec.Limited.AssuredConcurrencyShares, meal.shareSum)
|
||||
initialCL := concurrencyLimit - lendableCL/2
|
||||
klog.V(2).Infof("Introducing queues for priority level %q: config=%s, nominalCL=%d, lendableCL=%d, borrowingCL=%d, currentCL=%d, quiescing=%v (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, lendableCL, borrowingCL, initialCL, plState.quiescing, plState.pl.Spec.Limited.NominalConcurrencyShares, meal.shareSum)
|
||||
plState.seatDemandStats = seatDemandStats{}
|
||||
plState.currentCL = initialCL
|
||||
} else {
|
||||
klog.V(5).Infof("Retaining queues for priority level %q: config=%s, concurrencyLimit=%d, quiescing=%v, numPending=%d (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, plState.quiescing, plState.numPending, plState.pl.Spec.Limited.AssuredConcurrencyShares, meal.shareSum)
|
||||
logLevel := klog.Level(5)
|
||||
if cfgChanged {
|
||||
logLevel = 2
|
||||
}
|
||||
klog.V(logLevel).Infof("Retaining queues for priority level %q: config=%s, nominalCL=%d, lendableCL=%d, borrowingCL=%d, currentCL=%d, quiescing=%v, numPending=%d (shares=%v, shareSum=%v)", plName, fcfmt.Fmt(plState.pl.Spec), concurrencyLimit, lendableCL, borrowingCL, plState.currentCL, plState.quiescing, plState.numPending, plState.pl.Spec.Limited.NominalConcurrencyShares, meal.shareSum)
|
||||
}
|
||||
plState.queues = plState.qsCompleter.Complete(fq.DispatchingConfig{ConcurrencyLimit: concurrencyLimit})
|
||||
}
|
||||
meal.cfgCtlr.nominalCLSum = meal.maxExecutingRequests
|
||||
meal.cfgCtlr.updateBorrowingLocked(false, meal.newPLStates)
|
||||
}
|
||||
|
||||
// queueSetCompleterForPL returns an appropriate QueueSetCompleter for the
|
||||
// given priority level configuration. Returns nil if that config
|
||||
// does not call for limiting. Returns nil and an error if the given
|
||||
// object is malformed in a way that is a problem for this package.
|
||||
func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge) (fq.QueueSetCompleter, error) {
|
||||
func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) {
|
||||
if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt) != (pl.Spec.Limited == nil) {
|
||||
return nil, errors.New("broken union structure at the top")
|
||||
}
|
||||
@@ -720,7 +891,7 @@ func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flow
|
||||
if queues != nil {
|
||||
qsc, err = queues.BeginConfigChange(qcQS)
|
||||
} else {
|
||||
qsc, err = qsf.BeginConstruction(qcQS, reqsIntPair, execSeatsObs)
|
||||
qsc, err = qsf.BeginConstruction(qcQS, reqsIntPair, execSeatsObs, seatDemandGauge)
|
||||
}
|
||||
if err != nil {
|
||||
err = fmt.Errorf("priority level %q has QueuingConfiguration %#+v, which is invalid: %w", pl.Name, qcAPI, err)
|
||||
@@ -768,20 +939,26 @@ func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, re
|
||||
labelValues := []string{proto.Name}
|
||||
reqsGaugePair := metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues)
|
||||
execSeatsObs := meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues)
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, requestWaitLimit, reqsGaugePair, execSeatsObs)
|
||||
seatDemandIntegrator := fq.NewNamedIntegrator(meal.cfgCtlr.clock, proto.Name)
|
||||
seatDemandRatioedGauge := metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{proto.Name})
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto,
|
||||
requestWaitLimit, reqsGaugePair, execSeatsObs,
|
||||
metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
// This can not happen because proto is one of the mandatory
|
||||
// objects and these are not erroneous
|
||||
panic(err)
|
||||
}
|
||||
meal.newPLStates[proto.Name] = &priorityLevelState{
|
||||
pl: proto,
|
||||
qsCompleter: qsCompleter,
|
||||
reqsGaugePair: reqsGaugePair,
|
||||
execSeatsObs: execSeatsObs,
|
||||
pl: proto,
|
||||
qsCompleter: qsCompleter,
|
||||
reqsGaugePair: reqsGaugePair,
|
||||
execSeatsObs: execSeatsObs,
|
||||
seatDemandIntegrator: seatDemandIntegrator,
|
||||
seatDemandRatioedGauge: seatDemandRatioedGauge,
|
||||
}
|
||||
if proto.Spec.Limited != nil {
|
||||
meal.shareSum += float64(proto.Spec.Limited.AssuredConcurrencyShares)
|
||||
meal.shareSum += float64(proto.Spec.Limited.NominalConcurrencyShares)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -869,12 +1046,14 @@ func (cfgCtlr *configController) maybeReap(plName string) {
|
||||
klog.V(7).Infof("plName=%s, plState==nil", plName)
|
||||
return
|
||||
}
|
||||
if plState.queues != nil {
|
||||
useless := plState.quiescing && plState.numPending == 0 && plState.queues.IsIdle()
|
||||
klog.V(7).Infof("plState.quiescing=%v, plState.numPending=%d, useless=%v", plState.quiescing, plState.numPending, useless)
|
||||
if !useless {
|
||||
return
|
||||
}
|
||||
if plState.queues == nil {
|
||||
klog.V(7).Infof("plName=%s, plState.queues==nil", plName)
|
||||
return
|
||||
}
|
||||
useless := plState.quiescing && plState.numPending == 0 && plState.queues.IsIdle()
|
||||
klog.V(7).Infof("plState.quiescing=%v, plState.numPending=%d, useless=%v", plState.quiescing, plState.numPending, useless)
|
||||
if !useless {
|
||||
return
|
||||
}
|
||||
klog.V(3).Infof("Triggered API priority and fairness config reloading because priority level %s is undesired and idle", plName)
|
||||
cfgCtlr.configQueue.Add(0)
|
||||
@@ -919,3 +1098,12 @@ func hashFlowID(fsName, fDistinguisher string) uint64 {
|
||||
hash.Sum(sum[:0])
|
||||
return binary.LittleEndian.Uint64(sum[:8])
|
||||
}
|
||||
|
||||
func relDiff(x, y float64) float64 {
|
||||
diff := math.Abs(x - y)
|
||||
den := math.Max(math.Abs(x), math.Abs(y))
|
||||
if den == 0 {
|
||||
return 0
|
||||
}
|
||||
return diff / den
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user