Lines Matching +full:steps +full:- +full:per +full:- +full:period

1 // SPDX-License-Identifier: GPL-2.0
3 * Per Entity Load Tracking (PELT)
29 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
38 /* after bounds checking we can collapse to 32-bit */ in decay_load()
42 * As y^PERIOD = 1/2, we can combine in decay_load()
43 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD) in decay_load()
44 * With a look-up table which covers y^n (n<PERIOD) in decay_load()
67 * p-1 in __accumulate_pelt_segments()
72 * = 1024 ( \Sum y^n - \Sum y^n - y^0 ) in __accumulate_pelt_segments()
75 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024; in __accumulate_pelt_segments()
82 * of the last (incomplete) period, d2 the span of full periods and d3
83 * the remainder of the (incomplete) current period.
88 * |<->|<----------------->|<--->|
89 * ... |---x---|------| ... |------|-----x (now)
91 * p-1
97 * p-1
105 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ in accumulate_sum()
108 delta += sa->period_contrib; in accumulate_sum()
109 periods = delta / 1024; /* A period is 1024us (~1ms) */ in accumulate_sum()
112 * Step 1: decay old *_sum if we crossed period boundaries. in accumulate_sum()
115 sa->load_sum = decay_load(sa->load_sum, periods); in accumulate_sum()
116 sa->runnable_sum = in accumulate_sum()
117 decay_load(sa->runnable_sum, periods); in accumulate_sum()
118 sa->util_sum = decay_load((u64)(sa->util_sum), periods); in accumulate_sum()
136 1024 - sa->period_contrib, delta); in accumulate_sum()
139 sa->period_contrib = delta; in accumulate_sum()
142 sa->load_sum += load * contrib; in accumulate_sum()
144 sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum()
146 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum()
153 * coefficients of a geometric series. To do this we sub-divide our runnable
155 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
157 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
163 * We then designate the fractions u_i as our co-efficients, yielding the
167 * We choose y based on the with of a reasonably scheduling period, fixing:
174 * When a period "rolls over" and we have new u_0`, multiplying the previous
177 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
185 delta = now - sa->last_update_time; in ___update_load_sum()
191 sa->last_update_time = now; in ___update_load_sum()
203 sa->last_update_time += delta << 10; in ___update_load_sum()
208 * se has been already dequeued but cfs_rq->curr still points to it. in ___update_load_sum()
221 * accrues by two steps: in ___update_load_sum()
224 * crossed period boundaries, finish. in ___update_load_sum()
241 * LOAD_AVG_MAX*y + sa->period_contrib
245 * LOAD_AVG_MAX - 1024 + sa->period_contrib
247 * because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024
264 sa->load_avg = div_u64(load * sa->load_sum, divider); in ___update_load_avg()
265 sa->runnable_avg = div_u64(sa->runnable_sum, divider); in ___update_load_avg()
266 WRITE_ONCE(sa->util_avg, sa->util_sum / divider); in ___update_load_avg()
273 * se_weight() = se->load.weight
277 * se_weight() = tg->weight * grq->load_avg / tg->load_avg
278 * se_runnable() = grq->h_nr_runnable
280 * runnable_sum = se_runnable() * runnable = grq->runnable_sum
288 * runnable_sum = \Sum se->avg.runnable_sum
289 * runnable_avg = \Sum se->avg.runnable_avg
291 * load_sum = \Sum se_weight(se) * se->avg.load_sum
292 * load_avg = \Sum se->avg.load_avg
297 if (___update_load_sum(now, &se->avg, 0, 0, 0)) { in __update_load_avg_blocked_se()
298 ___update_load_avg(&se->avg, se_weight(se)); in __update_load_avg_blocked_se()
308 if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se), in __update_load_avg_se()
309 cfs_rq->curr == se)) { in __update_load_avg_se()
311 ___update_load_avg(&se->avg, se_weight(se)); in __update_load_avg_se()
312 cfs_se_util_change(&se->avg); in __update_load_avg_se()
322 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq()
323 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq()
324 cfs_rq->h_nr_runnable, in __update_load_avg_cfs_rq()
325 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
327 ___update_load_avg(&cfs_rq->avg, 1); in __update_load_avg_cfs_rq()
338 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
348 if (___update_load_sum(now, &rq->avg_rt, in update_rt_rq_load_avg()
353 ___update_load_avg(&rq->avg_rt, 1); in update_rt_rq_load_avg()
364 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
374 if (___update_load_sum(now, &rq->avg_dl, in update_dl_rq_load_avg()
379 ___update_load_avg(&rq->avg_dl, 1); in update_dl_rq_load_avg()
391 * load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
399 * "delta capacity" = actual capacity -
405 if (___update_load_sum(now, &rq->avg_hw, in update_hw_load_avg()
409 ___update_load_avg(&rq->avg_hw, 1); in update_hw_load_avg()
422 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
450 * We can safely remove running from rq->clock because in update_irq_load_avg()
451 * rq->clock += delta with delta >= running in update_irq_load_avg()
453 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq, in update_irq_load_avg()
457 ret += ___update_load_sum(rq->clock, &rq->avg_irq, in update_irq_load_avg()
463 ___update_load_avg(&rq->avg_irq, 1); in update_irq_load_avg()
479 const struct sched_class *curr_class = rq->donor->sched_class; in update_other_load_avgs()