Lines Matching +full:steps +full:- +full:per +full:- +full:period
1 // SPDX-License-Identifier: GPL-2.0
3 * Per Entity Load Tracking (PELT)
30 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
39 /* after bounds checking we can collapse to 32-bit */ in decay_load()
43 * As y^PERIOD = 1/2, we can combine in decay_load()
44 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD) in decay_load()
45 * With a look-up table which covers y^n (n<PERIOD) in decay_load()
68 * p-1 in __accumulate_pelt_segments()
73 * = 1024 ( \Sum y^n - \Sum y^n - y^0 ) in __accumulate_pelt_segments()
76 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024; in __accumulate_pelt_segments()
83 * of the last (incomplete) period, d2 the span of full periods and d3
84 * the remainder of the (incomplete) current period.
89 * |<->|<----------------->|<--->|
90 * ... |---x---|------| ... |------|-----x (now)
92 * p-1
98 * p-1
106 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ in accumulate_sum()
109 delta += sa->period_contrib; in accumulate_sum()
110 periods = delta / 1024; /* A period is 1024us (~1ms) */ in accumulate_sum()
113 * Step 1: decay old *_sum if we crossed period boundaries. in accumulate_sum()
116 sa->load_sum = decay_load(sa->load_sum, periods); in accumulate_sum()
117 sa->runnable_sum = in accumulate_sum()
118 decay_load(sa->runnable_sum, periods); in accumulate_sum()
119 sa->util_sum = decay_load((u64)(sa->util_sum), periods); in accumulate_sum()
137 1024 - sa->period_contrib, delta); in accumulate_sum()
140 sa->period_contrib = delta; in accumulate_sum()
143 sa->load_sum += load * contrib; in accumulate_sum()
145 sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum()
147 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum()
154 * coefficients of a geometric series. To do this we sub-divide our runnable
156 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
158 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
164 * We then designate the fractions u_i as our co-efficients, yielding the
168 * We choose y based on the with of a reasonably scheduling period, fixing:
175 * When a period "rolls over" and we have new u_0`, multiplying the previous
178 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
186 delta = now - sa->last_update_time; in ___update_load_sum()
192 sa->last_update_time = now; in ___update_load_sum()
204 sa->last_update_time += delta << 10; in ___update_load_sum()
209 * se has been already dequeued but cfs_rq->curr still points to it. in ___update_load_sum()
222 * accrues by two steps: in ___update_load_sum()
225 * crossed period boundaries, finish. in ___update_load_sum()
242 * LOAD_AVG_MAX*y + sa->period_contrib
246 * LOAD_AVG_MAX - 1024 + sa->period_contrib
248 * because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024
265 sa->load_avg = div_u64(load * sa->load_sum, divider); in ___update_load_avg()
266 sa->runnable_avg = div_u64(sa->runnable_sum, divider); in ___update_load_avg()
267 WRITE_ONCE(sa->util_avg, sa->util_sum / divider); in ___update_load_avg()
274 * se_weight() = se->load.weight
278 * se_weight() = tg->weight * grq->load_avg / tg->load_avg
279 * se_runnable() = grq->h_nr_runnable
281 * runnable_sum = se_runnable() * runnable = grq->runnable_sum
289 * runnable_sum = \Sum se->avg.runnable_sum
290 * runnable_avg = \Sum se->avg.runnable_avg
292 * load_sum = \Sum se_weight(se) * se->avg.load_sum
293 * load_avg = \Sum se->avg.load_avg
298 if (___update_load_sum(now, &se->avg, 0, 0, 0)) { in __update_load_avg_blocked_se()
299 ___update_load_avg(&se->avg, se_weight(se)); in __update_load_avg_blocked_se()
309 if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se), in __update_load_avg_se()
310 cfs_rq->curr == se)) { in __update_load_avg_se()
312 ___update_load_avg(&se->avg, se_weight(se)); in __update_load_avg_se()
313 cfs_se_util_change(&se->avg); in __update_load_avg_se()
323 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq()
324 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq()
325 cfs_rq->h_nr_runnable, in __update_load_avg_cfs_rq()
326 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
328 ___update_load_avg(&cfs_rq->avg, 1); in __update_load_avg_cfs_rq()
339 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
349 if (___update_load_sum(now, &rq->avg_rt, in update_rt_rq_load_avg()
354 ___update_load_avg(&rq->avg_rt, 1); in update_rt_rq_load_avg()
365 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
375 if (___update_load_sum(now, &rq->avg_dl, in update_dl_rq_load_avg()
380 ___update_load_avg(&rq->avg_dl, 1); in update_dl_rq_load_avg()
392 * load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
400 * "delta capacity" = actual capacity -
406 if (___update_load_sum(now, &rq->avg_hw, in update_hw_load_avg()
410 ___update_load_avg(&rq->avg_hw, 1); in update_hw_load_avg()
423 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
451 * We can safely remove running from rq->clock because in update_irq_load_avg()
452 * rq->clock += delta with delta >= running in update_irq_load_avg()
454 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq, in update_irq_load_avg()
458 ret += ___update_load_sum(rq->clock, &rq->avg_irq, in update_irq_load_avg()
464 ___update_load_avg(&rq->avg_irq, 1); in update_irq_load_avg()
480 const struct sched_class *curr_class = rq->donor->sched_class; in update_other_load_avgs()