1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Pressure stall information for CPU, memory and IO
4 *
5 * Copyright (c) 2018 Facebook, Inc.
6 * Author: Johannes Weiner <hannes@cmpxchg.org>
7 *
8 * Polling support by Suren Baghdasaryan <surenb@google.com>
9 * Copyright (c) 2018 Google, Inc.
10 *
11 * When CPU, memory and IO are contended, tasks experience delays that
12 * reduce throughput and introduce latencies into the workload. Memory
13 * and IO contention, in addition, can cause a full loss of forward
14 * progress in which the CPU goes idle.
15 *
16 * This code aggregates individual task delays into resource pressure
17 * metrics that indicate problems with both workload health and
18 * resource utilization.
19 *
20 * Model
21 *
22 * The time in which a task can execute on a CPU is our baseline for
23 * productivity. Pressure expresses the amount of time in which this
24 * potential cannot be realized due to resource contention.
25 *
26 * This concept of productivity has two components: the workload and
27 * the CPU. To measure the impact of pressure on both, we define two
28 * contention states for a resource: SOME and FULL.
29 *
30 * In the SOME state of a given resource, one or more tasks are
31 * delayed on that resource. This affects the workload's ability to
32 * perform work, but the CPU may still be executing other tasks.
33 *
34 * In the FULL state of a given resource, all non-idle tasks are
35 * delayed on that resource such that nobody is advancing and the CPU
36 * goes idle. This leaves both workload and CPU unproductive.
37 *
38 * SOME = nr_delayed_tasks != 0
39 * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
40 *
41 * What it means for a task to be productive is defined differently
42 * for each resource. For IO, productive means a running task. For
43 * memory, productive means a running task that isn't a reclaimer. For
44 * CPU, productive means an on-CPU task.
45 *
46 * Naturally, the FULL state doesn't exist for the CPU resource at the
47 * system level, but exist at the cgroup level. At the cgroup level,
48 * FULL means all non-idle tasks in the cgroup are delayed on the CPU
49 * resource which is being used by others outside of the cgroup or
50 * throttled by the cgroup cpu.max configuration.
51 *
52 * The percentage of wall clock time spent in those compound stall
53 * states gives pressure numbers between 0 and 100 for each resource,
54 * where the SOME percentage indicates workload slowdowns and the FULL
55 * percentage indicates reduced CPU utilization:
56 *
57 * %SOME = time(SOME) / period
58 * %FULL = time(FULL) / period
59 *
60 * Multiple CPUs
61 *
62 * The more tasks and available CPUs there are, the more work can be
63 * performed concurrently. This means that the potential that can go
64 * unrealized due to resource contention *also* scales with non-idle
65 * tasks and CPUs.
66 *
67 * Consider a scenario where 257 number crunching tasks are trying to
68 * run concurrently on 256 CPUs. If we simply aggregated the task
69 * states, we would have to conclude a CPU SOME pressure number of
70 * 100%, since *somebody* is waiting on a runqueue at all
71 * times. However, that is clearly not the amount of contention the
72 * workload is experiencing: only one out of 256 possible execution
73 * threads will be contended at any given time, or about 0.4%.
74 *
75 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
76 * given time *one* of the tasks is delayed due to a lack of memory.
77 * Again, looking purely at the task state would yield a memory FULL
78 * pressure number of 0%, since *somebody* is always making forward
79 * progress. But again this wouldn't capture the amount of execution
80 * potential lost, which is 1 out of 4 CPUs, or 25%.
81 *
82 * To calculate wasted potential (pressure) with multiple processors,
83 * we have to base our calculation on the number of non-idle tasks in
84 * conjunction with the number of available CPUs, which is the number
85 * of potential execution threads. SOME becomes then the proportion of
86 * delayed tasks to possible threads, and FULL is the share of possible
87 * threads that are unproductive due to delays:
88 *
89 * threads = min(nr_nonidle_tasks, nr_cpus)
90 * SOME = min(nr_delayed_tasks / threads, 1)
91 * FULL = (threads - min(nr_productive_tasks, threads)) / threads
92 *
93 * For the 257 number crunchers on 256 CPUs, this yields:
94 *
95 * threads = min(257, 256)
96 * SOME = min(1 / 256, 1) = 0.4%
97 * FULL = (256 - min(256, 256)) / 256 = 0%
98 *
99 * For the 1 out of 4 memory-delayed tasks, this yields:
100 *
101 * threads = min(4, 4)
102 * SOME = min(1 / 4, 1) = 25%
103 * FULL = (4 - min(3, 4)) / 4 = 25%
104 *
105 * [ Substitute nr_cpus with 1, and you can see that it's a natural
106 * extension of the single-CPU model. ]
107 *
108 * Implementation
109 *
110 * To assess the precise time spent in each such state, we would have
111 * to freeze the system on task changes and start/stop the state
112 * clocks accordingly. Obviously that doesn't scale in practice.
113 *
114 * Because the scheduler aims to distribute the compute load evenly
115 * among the available CPUs, we can track task state locally to each
116 * CPU and, at much lower frequency, extrapolate the global state for
117 * the cumulative stall times and the running averages.
118 *
119 * For each runqueue, we track:
120 *
121 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
122 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
123 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
124 *
125 * and then periodically aggregate:
126 *
127 * tNONIDLE = sum(tNONIDLE[i])
128 *
129 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
130 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
131 *
132 * %SOME = tSOME / period
133 * %FULL = tFULL / period
134 *
135 * This gives us an approximation of pressure that is practical
136 * cost-wise, yet way more sensitive and accurate than periodic
137 * sampling of the aggregate task states would be.
138 */
139 #include <linux/sched/clock.h>
140 #include <linux/workqueue.h>
141 #include <linux/psi.h>
142 #include "sched.h"
143
144 static int psi_bug __read_mostly;
145
146 DEFINE_STATIC_KEY_FALSE(psi_disabled);
147 static DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
148
149 #ifdef CONFIG_PSI_DEFAULT_DISABLED
150 static bool psi_enable;
151 #else
152 static bool psi_enable = true;
153 #endif
setup_psi(char * str)154 static int __init setup_psi(char *str)
155 {
156 return kstrtobool(str, &psi_enable) == 0;
157 }
158 __setup("psi=", setup_psi);
159
160 /* Running averages - we need to be higher-res than loadavg */
161 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
162 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
163 #define EXP_60s 1981 /* 1/exp(2s/60s) */
164 #define EXP_300s 2034 /* 1/exp(2s/300s) */
165
166 /* PSI trigger definitions */
167 #define WINDOW_MAX_US 10000000 /* Max window size is 10s */
168 #define UPDATES_PER_WINDOW 10 /* 10 updates per window */
169
170 /* Sampling frequency in nanoseconds */
171 static u64 psi_period __read_mostly;
172
173 /* System-level pressure and stall tracking */
174 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
175 struct psi_group psi_system = {
176 .pcpu = &system_group_pcpu,
177 };
178
179 static DEFINE_PER_CPU(seqcount_t, psi_seq);
180
psi_write_begin(int cpu)181 static inline void psi_write_begin(int cpu)
182 {
183 write_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
184 }
185
psi_write_end(int cpu)186 static inline void psi_write_end(int cpu)
187 {
188 write_seqcount_end(per_cpu_ptr(&psi_seq, cpu));
189 }
190
psi_read_begin(int cpu)191 static inline u32 psi_read_begin(int cpu)
192 {
193 return read_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
194 }
195
psi_read_retry(int cpu,u32 seq)196 static inline bool psi_read_retry(int cpu, u32 seq)
197 {
198 return read_seqcount_retry(per_cpu_ptr(&psi_seq, cpu), seq);
199 }
200
201 static void psi_avgs_work(struct work_struct *work);
202
203 static void poll_timer_fn(struct timer_list *t);
204
group_init(struct psi_group * group)205 static void group_init(struct psi_group *group)
206 {
207 int cpu;
208
209 group->enabled = true;
210 for_each_possible_cpu(cpu)
211 seqcount_init(per_cpu_ptr(&psi_seq, cpu));
212 group->avg_last_update = sched_clock();
213 group->avg_next_update = group->avg_last_update + psi_period;
214 mutex_init(&group->avgs_lock);
215
216 /* Init avg trigger-related members */
217 INIT_LIST_HEAD(&group->avg_triggers);
218 memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers));
219 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
220
221 /* Init rtpoll trigger-related members */
222 atomic_set(&group->rtpoll_scheduled, 0);
223 mutex_init(&group->rtpoll_trigger_lock);
224 INIT_LIST_HEAD(&group->rtpoll_triggers);
225 group->rtpoll_min_period = U32_MAX;
226 group->rtpoll_next_update = ULLONG_MAX;
227 init_waitqueue_head(&group->rtpoll_wait);
228 timer_setup(&group->rtpoll_timer, poll_timer_fn, 0);
229 rcu_assign_pointer(group->rtpoll_task, NULL);
230 }
231
psi_init(void)232 void __init psi_init(void)
233 {
234 if (!psi_enable) {
235 static_branch_enable(&psi_disabled);
236 static_branch_disable(&psi_cgroups_enabled);
237 return;
238 }
239
240 if (!cgroup_psi_enabled())
241 static_branch_disable(&psi_cgroups_enabled);
242
243 psi_period = jiffies_to_nsecs(PSI_FREQ);
244 group_init(&psi_system);
245 }
246
test_states(unsigned int * tasks,u32 state_mask)247 static u32 test_states(unsigned int *tasks, u32 state_mask)
248 {
249 const bool oncpu = state_mask & PSI_ONCPU;
250
251 if (tasks[NR_IOWAIT]) {
252 state_mask |= BIT(PSI_IO_SOME);
253 if (!tasks[NR_RUNNING])
254 state_mask |= BIT(PSI_IO_FULL);
255 }
256
257 if (tasks[NR_MEMSTALL]) {
258 state_mask |= BIT(PSI_MEM_SOME);
259 if (tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING])
260 state_mask |= BIT(PSI_MEM_FULL);
261 }
262
263 if (tasks[NR_RUNNING] > oncpu)
264 state_mask |= BIT(PSI_CPU_SOME);
265
266 if (tasks[NR_RUNNING] && !oncpu)
267 state_mask |= BIT(PSI_CPU_FULL);
268
269 if (tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || tasks[NR_RUNNING])
270 state_mask |= BIT(PSI_NONIDLE);
271
272 return state_mask;
273 }
274
get_recent_times(struct psi_group * group,int cpu,enum psi_aggregators aggregator,u32 * times,u32 * pchanged_states)275 static void get_recent_times(struct psi_group *group, int cpu,
276 enum psi_aggregators aggregator, u32 *times,
277 u32 *pchanged_states)
278 {
279 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
280 int current_cpu = raw_smp_processor_id();
281 unsigned int tasks[NR_PSI_TASK_COUNTS];
282 u64 now, state_start;
283 enum psi_states s;
284 unsigned int seq;
285 u32 state_mask;
286
287 *pchanged_states = 0;
288
289 /* Snapshot a coherent view of the CPU state */
290 do {
291 seq = psi_read_begin(cpu);
292 now = cpu_clock(cpu);
293 memcpy(times, groupc->times, sizeof(groupc->times));
294 state_mask = groupc->state_mask;
295 state_start = groupc->state_start;
296 if (cpu == current_cpu)
297 memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
298 } while (psi_read_retry(cpu, seq));
299
300 /* Calculate state time deltas against the previous snapshot */
301 for (s = 0; s < NR_PSI_STATES; s++) {
302 u32 delta;
303 /*
304 * In addition to already concluded states, we also
305 * incorporate currently active states on the CPU,
306 * since states may last for many sampling periods.
307 *
308 * This way we keep our delta sampling buckets small
309 * (u32) and our reported pressure close to what's
310 * actually happening.
311 */
312 if (state_mask & (1 << s))
313 times[s] += now - state_start;
314
315 delta = times[s] - groupc->times_prev[aggregator][s];
316 groupc->times_prev[aggregator][s] = times[s];
317
318 times[s] = delta;
319 if (delta)
320 *pchanged_states |= (1 << s);
321 }
322
323 /*
324 * When collect_percpu_times() from the avgs_work, we don't want to
325 * re-arm avgs_work when all CPUs are IDLE. But the current CPU running
326 * this avgs_work is never IDLE, cause avgs_work can't be shut off.
327 * So for the current CPU, we need to re-arm avgs_work only when
328 * (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs
329 * we can just check PSI_NONIDLE delta.
330 */
331 if (current_work() == &group->avgs_work.work) {
332 bool reschedule;
333
334 if (cpu == current_cpu)
335 reschedule = tasks[NR_RUNNING] +
336 tasks[NR_IOWAIT] +
337 tasks[NR_MEMSTALL] > 1;
338 else
339 reschedule = *pchanged_states & (1 << PSI_NONIDLE);
340
341 if (reschedule)
342 *pchanged_states |= PSI_STATE_RESCHEDULE;
343 }
344 }
345
calc_avgs(unsigned long avg[3],int missed_periods,u64 time,u64 period)346 static void calc_avgs(unsigned long avg[3], int missed_periods,
347 u64 time, u64 period)
348 {
349 unsigned long pct;
350
351 /* Fill in zeroes for periods of no activity */
352 if (missed_periods) {
353 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
354 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
355 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
356 }
357
358 /* Sample the most recent active period */
359 pct = div_u64(time * 100, period);
360 pct *= FIXED_1;
361 avg[0] = calc_load(avg[0], EXP_10s, pct);
362 avg[1] = calc_load(avg[1], EXP_60s, pct);
363 avg[2] = calc_load(avg[2], EXP_300s, pct);
364 }
365
collect_percpu_times(struct psi_group * group,enum psi_aggregators aggregator,u32 * pchanged_states)366 static void collect_percpu_times(struct psi_group *group,
367 enum psi_aggregators aggregator,
368 u32 *pchanged_states)
369 {
370 u64 deltas[NR_PSI_STATES - 1] = { 0, };
371 unsigned long nonidle_total = 0;
372 u32 changed_states = 0;
373 int cpu;
374 int s;
375
376 /*
377 * Collect the per-cpu time buckets and average them into a
378 * single time sample that is normalized to wall clock time.
379 *
380 * For averaging, each CPU is weighted by its non-idle time in
381 * the sampling period. This eliminates artifacts from uneven
382 * loading, or even entirely idle CPUs.
383 */
384 for_each_possible_cpu(cpu) {
385 u32 times[NR_PSI_STATES];
386 u32 nonidle;
387 u32 cpu_changed_states;
388
389 get_recent_times(group, cpu, aggregator, times,
390 &cpu_changed_states);
391 changed_states |= cpu_changed_states;
392
393 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
394 nonidle_total += nonidle;
395
396 for (s = 0; s < PSI_NONIDLE; s++)
397 deltas[s] += (u64)times[s] * nonidle;
398 }
399
400 /*
401 * Integrate the sample into the running statistics that are
402 * reported to userspace: the cumulative stall times and the
403 * decaying averages.
404 *
405 * Pressure percentages are sampled at PSI_FREQ. We might be
406 * called more often when the user polls more frequently than
407 * that; we might be called less often when there is no task
408 * activity, thus no data, and clock ticks are sporadic. The
409 * below handles both.
410 */
411
412 /* total= */
413 for (s = 0; s < NR_PSI_STATES - 1; s++)
414 group->total[aggregator][s] +=
415 div_u64(deltas[s], max(nonidle_total, 1UL));
416
417 if (pchanged_states)
418 *pchanged_states = changed_states;
419 }
420
421 /* Trigger tracking window manipulations */
window_reset(struct psi_window * win,u64 now,u64 value,u64 prev_growth)422 static void window_reset(struct psi_window *win, u64 now, u64 value,
423 u64 prev_growth)
424 {
425 win->start_time = now;
426 win->start_value = value;
427 win->prev_growth = prev_growth;
428 }
429
430 /*
431 * PSI growth tracking window update and growth calculation routine.
432 *
433 * This approximates a sliding tracking window by interpolating
434 * partially elapsed windows using historical growth data from the
435 * previous intervals. This minimizes memory requirements (by not storing
436 * all the intermediate values in the previous window) and simplifies
437 * the calculations. It works well because PSI signal changes only in
438 * positive direction and over relatively small window sizes the growth
439 * is close to linear.
440 */
window_update(struct psi_window * win,u64 now,u64 value)441 static u64 window_update(struct psi_window *win, u64 now, u64 value)
442 {
443 u64 elapsed;
444 u64 growth;
445
446 elapsed = now - win->start_time;
447 growth = value - win->start_value;
448 /*
449 * After each tracking window passes win->start_value and
450 * win->start_time get reset and win->prev_growth stores
451 * the average per-window growth of the previous window.
452 * win->prev_growth is then used to interpolate additional
453 * growth from the previous window assuming it was linear.
454 */
455 if (elapsed > win->size)
456 window_reset(win, now, value, growth);
457 else {
458 u32 remaining;
459
460 remaining = win->size - elapsed;
461 growth += div64_u64(win->prev_growth * remaining, win->size);
462 }
463
464 return growth;
465 }
466
update_triggers(struct psi_group * group,u64 now,enum psi_aggregators aggregator)467 static void update_triggers(struct psi_group *group, u64 now,
468 enum psi_aggregators aggregator)
469 {
470 struct psi_trigger *t;
471 u64 *total = group->total[aggregator];
472 struct list_head *triggers;
473 u64 *aggregator_total;
474
475 if (aggregator == PSI_AVGS) {
476 triggers = &group->avg_triggers;
477 aggregator_total = group->avg_total;
478 } else {
479 triggers = &group->rtpoll_triggers;
480 aggregator_total = group->rtpoll_total;
481 }
482
483 /*
484 * On subsequent updates, calculate growth deltas and let
485 * watchers know when their specified thresholds are exceeded.
486 */
487 list_for_each_entry(t, triggers, node) {
488 u64 growth;
489 bool new_stall;
490
491 new_stall = aggregator_total[t->state] != total[t->state];
492
493 /* Check for stall activity or a previous threshold breach */
494 if (!new_stall && !t->pending_event)
495 continue;
496 /*
497 * Check for new stall activity, as well as deferred
498 * events that occurred in the last window after the
499 * trigger had already fired (we want to ratelimit
500 * events without dropping any).
501 */
502 if (new_stall) {
503 /* Calculate growth since last update */
504 growth = window_update(&t->win, now, total[t->state]);
505 if (!t->pending_event) {
506 if (growth < t->threshold)
507 continue;
508
509 t->pending_event = true;
510 }
511 }
512 /* Limit event signaling to once per window */
513 if (now < t->last_event_time + t->win.size)
514 continue;
515
516 /* Generate an event */
517 if (cmpxchg(&t->event, 0, 1) == 0) {
518 if (t->of)
519 kernfs_notify(t->of->kn);
520 else
521 wake_up_interruptible(&t->event_wait);
522 }
523 t->last_event_time = now;
524 /* Reset threshold breach flag once event got generated */
525 t->pending_event = false;
526 }
527 }
528
update_averages(struct psi_group * group,u64 now)529 static u64 update_averages(struct psi_group *group, u64 now)
530 {
531 unsigned long missed_periods = 0;
532 u64 expires, period;
533 u64 avg_next_update;
534 int s;
535
536 /* avgX= */
537 expires = group->avg_next_update;
538 if (now - expires >= psi_period)
539 missed_periods = div_u64(now - expires, psi_period);
540
541 /*
542 * The periodic clock tick can get delayed for various
543 * reasons, especially on loaded systems. To avoid clock
544 * drift, we schedule the clock in fixed psi_period intervals.
545 * But the deltas we sample out of the per-cpu buckets above
546 * are based on the actual time elapsing between clock ticks.
547 */
548 avg_next_update = expires + ((1 + missed_periods) * psi_period);
549 period = now - (group->avg_last_update + (missed_periods * psi_period));
550 group->avg_last_update = now;
551
552 for (s = 0; s < NR_PSI_STATES - 1; s++) {
553 u32 sample;
554
555 sample = group->total[PSI_AVGS][s] - group->avg_total[s];
556 /*
557 * Due to the lockless sampling of the time buckets,
558 * recorded time deltas can slip into the next period,
559 * which under full pressure can result in samples in
560 * excess of the period length.
561 *
562 * We don't want to report non-sensical pressures in
563 * excess of 100%, nor do we want to drop such events
564 * on the floor. Instead we punt any overage into the
565 * future until pressure subsides. By doing this we
566 * don't underreport the occurring pressure curve, we
567 * just report it delayed by one period length.
568 *
569 * The error isn't cumulative. As soon as another
570 * delta slips from a period P to P+1, by definition
571 * it frees up its time T in P.
572 */
573 if (sample > period)
574 sample = period;
575 group->avg_total[s] += sample;
576 calc_avgs(group->avg[s], missed_periods, sample, period);
577 }
578
579 return avg_next_update;
580 }
581
psi_avgs_work(struct work_struct * work)582 static void psi_avgs_work(struct work_struct *work)
583 {
584 struct delayed_work *dwork;
585 struct psi_group *group;
586 u32 changed_states;
587 u64 now;
588
589 dwork = to_delayed_work(work);
590 group = container_of(dwork, struct psi_group, avgs_work);
591
592 mutex_lock(&group->avgs_lock);
593
594 now = sched_clock();
595
596 collect_percpu_times(group, PSI_AVGS, &changed_states);
597 /*
598 * If there is task activity, periodically fold the per-cpu
599 * times and feed samples into the running averages. If things
600 * are idle and there is no data to process, stop the clock.
601 * Once restarted, we'll catch up the running averages in one
602 * go - see calc_avgs() and missed_periods.
603 */
604 if (now >= group->avg_next_update) {
605 update_triggers(group, now, PSI_AVGS);
606 group->avg_next_update = update_averages(group, now);
607 }
608
609 if (changed_states & PSI_STATE_RESCHEDULE) {
610 schedule_delayed_work(dwork, nsecs_to_jiffies(
611 group->avg_next_update - now) + 1);
612 }
613
614 mutex_unlock(&group->avgs_lock);
615 }
616
init_rtpoll_triggers(struct psi_group * group,u64 now)617 static void init_rtpoll_triggers(struct psi_group *group, u64 now)
618 {
619 struct psi_trigger *t;
620
621 list_for_each_entry(t, &group->rtpoll_triggers, node)
622 window_reset(&t->win, now,
623 group->total[PSI_POLL][t->state], 0);
624 memcpy(group->rtpoll_total, group->total[PSI_POLL],
625 sizeof(group->rtpoll_total));
626 group->rtpoll_next_update = now + group->rtpoll_min_period;
627 }
628
629 /* Schedule rtpolling if it's not already scheduled or forced. */
psi_schedule_rtpoll_work(struct psi_group * group,unsigned long delay,bool force)630 static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long delay,
631 bool force)
632 {
633 struct task_struct *task;
634
635 /*
636 * atomic_xchg should be called even when !force to provide a
637 * full memory barrier (see the comment inside psi_rtpoll_work).
638 */
639 if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force)
640 return;
641
642 rcu_read_lock();
643
644 task = rcu_dereference(group->rtpoll_task);
645 /*
646 * kworker might be NULL in case psi_trigger_destroy races with
647 * psi_task_change (hotpath) which can't use locks
648 */
649 if (likely(task))
650 mod_timer(&group->rtpoll_timer, jiffies + delay);
651 else
652 atomic_set(&group->rtpoll_scheduled, 0);
653
654 rcu_read_unlock();
655 }
656
psi_rtpoll_work(struct psi_group * group)657 static void psi_rtpoll_work(struct psi_group *group)
658 {
659 bool force_reschedule = false;
660 u32 changed_states;
661 u64 now;
662
663 mutex_lock(&group->rtpoll_trigger_lock);
664
665 now = sched_clock();
666
667 if (now > group->rtpoll_until) {
668 /*
669 * We are either about to start or might stop rtpolling if no
670 * state change was recorded. Resetting rtpoll_scheduled leaves
671 * a small window for psi_group_change to sneak in and schedule
672 * an immediate rtpoll_work before we get to rescheduling. One
673 * potential extra wakeup at the end of the rtpolling window
674 * should be negligible and rtpoll_next_update still keeps
675 * updates correctly on schedule.
676 */
677 atomic_set(&group->rtpoll_scheduled, 0);
678 /*
679 * A task change can race with the rtpoll worker that is supposed to
680 * report on it. To avoid missing events, ensure ordering between
681 * rtpoll_scheduled and the task state accesses, such that if the
682 * rtpoll worker misses the state update, the task change is
683 * guaranteed to reschedule the rtpoll worker:
684 *
685 * rtpoll worker:
686 * atomic_set(rtpoll_scheduled, 0)
687 * smp_mb()
688 * LOAD states
689 *
690 * task change:
691 * STORE states
692 * if atomic_xchg(rtpoll_scheduled, 1) == 0:
693 * schedule rtpoll worker
694 *
695 * The atomic_xchg() implies a full barrier.
696 */
697 smp_mb();
698 } else {
699 /* The rtpolling window is not over, keep rescheduling */
700 force_reschedule = true;
701 }
702
703
704 collect_percpu_times(group, PSI_POLL, &changed_states);
705
706 if (changed_states & group->rtpoll_states) {
707 /* Initialize trigger windows when entering rtpolling mode */
708 if (now > group->rtpoll_until)
709 init_rtpoll_triggers(group, now);
710
711 /*
712 * Keep the monitor active for at least the duration of the
713 * minimum tracking window as long as monitor states are
714 * changing.
715 */
716 group->rtpoll_until = now +
717 group->rtpoll_min_period * UPDATES_PER_WINDOW;
718 }
719
720 if (now > group->rtpoll_until) {
721 group->rtpoll_next_update = ULLONG_MAX;
722 goto out;
723 }
724
725 if (now >= group->rtpoll_next_update) {
726 if (changed_states & group->rtpoll_states) {
727 update_triggers(group, now, PSI_POLL);
728 memcpy(group->rtpoll_total, group->total[PSI_POLL],
729 sizeof(group->rtpoll_total));
730 }
731 group->rtpoll_next_update = now + group->rtpoll_min_period;
732 }
733
734 psi_schedule_rtpoll_work(group,
735 nsecs_to_jiffies(group->rtpoll_next_update - now) + 1,
736 force_reschedule);
737
738 out:
739 mutex_unlock(&group->rtpoll_trigger_lock);
740 }
741
psi_rtpoll_worker(void * data)742 static int psi_rtpoll_worker(void *data)
743 {
744 struct psi_group *group = (struct psi_group *)data;
745
746 sched_set_fifo_low(current);
747
748 while (true) {
749 wait_event_interruptible(group->rtpoll_wait,
750 atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) ||
751 kthread_should_stop());
752 if (kthread_should_stop())
753 break;
754
755 psi_rtpoll_work(group);
756 }
757 return 0;
758 }
759
poll_timer_fn(struct timer_list * t)760 static void poll_timer_fn(struct timer_list *t)
761 {
762 struct psi_group *group = timer_container_of(group, t, rtpoll_timer);
763
764 atomic_set(&group->rtpoll_wakeup, 1);
765 wake_up_interruptible(&group->rtpoll_wait);
766 }
767
record_times(struct psi_group_cpu * groupc,u64 now)768 static void record_times(struct psi_group_cpu *groupc, u64 now)
769 {
770 u32 delta;
771
772 delta = now - groupc->state_start;
773 groupc->state_start = now;
774
775 if (groupc->state_mask & (1 << PSI_IO_SOME)) {
776 groupc->times[PSI_IO_SOME] += delta;
777 if (groupc->state_mask & (1 << PSI_IO_FULL))
778 groupc->times[PSI_IO_FULL] += delta;
779 }
780
781 if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
782 groupc->times[PSI_MEM_SOME] += delta;
783 if (groupc->state_mask & (1 << PSI_MEM_FULL))
784 groupc->times[PSI_MEM_FULL] += delta;
785 }
786
787 if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
788 groupc->times[PSI_CPU_SOME] += delta;
789 if (groupc->state_mask & (1 << PSI_CPU_FULL))
790 groupc->times[PSI_CPU_FULL] += delta;
791 }
792
793 if (groupc->state_mask & (1 << PSI_NONIDLE))
794 groupc->times[PSI_NONIDLE] += delta;
795 }
796
797 #define for_each_group(iter, group) \
798 for (typeof(group) iter = group; iter; iter = iter->parent)
799
psi_group_change(struct psi_group * group,int cpu,unsigned int clear,unsigned int set,u64 now,bool wake_clock)800 static void psi_group_change(struct psi_group *group, int cpu,
801 unsigned int clear, unsigned int set,
802 u64 now, bool wake_clock)
803 {
804 struct psi_group_cpu *groupc;
805 unsigned int t, m;
806 u32 state_mask;
807
808 lockdep_assert_rq_held(cpu_rq(cpu));
809 groupc = per_cpu_ptr(group->pcpu, cpu);
810
811 /*
812 * Start with TSK_ONCPU, which doesn't have a corresponding
813 * task count - it's just a boolean flag directly encoded in
814 * the state mask. Clear, set, or carry the current state if
815 * no changes are requested.
816 */
817 if (unlikely(clear & TSK_ONCPU)) {
818 state_mask = 0;
819 clear &= ~TSK_ONCPU;
820 } else if (unlikely(set & TSK_ONCPU)) {
821 state_mask = PSI_ONCPU;
822 set &= ~TSK_ONCPU;
823 } else {
824 state_mask = groupc->state_mask & PSI_ONCPU;
825 }
826
827 /*
828 * The rest of the state mask is calculated based on the task
829 * counts. Update those first, then construct the mask.
830 */
831 for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
832 if (!(m & (1 << t)))
833 continue;
834 if (groupc->tasks[t]) {
835 groupc->tasks[t]--;
836 } else if (!psi_bug) {
837 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
838 cpu, t, groupc->tasks[0],
839 groupc->tasks[1], groupc->tasks[2],
840 groupc->tasks[3], clear, set);
841 psi_bug = 1;
842 }
843 }
844
845 for (t = 0; set; set &= ~(1 << t), t++)
846 if (set & (1 << t))
847 groupc->tasks[t]++;
848
849 if (!group->enabled) {
850 /*
851 * On the first group change after disabling PSI, conclude
852 * the current state and flush its time. This is unlikely
853 * to matter to the user, but aggregation (get_recent_times)
854 * may have already incorporated the live state into times_prev;
855 * avoid a delta sample underflow when PSI is later re-enabled.
856 */
857 if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
858 record_times(groupc, now);
859
860 groupc->state_mask = state_mask;
861
862 return;
863 }
864
865 state_mask = test_states(groupc->tasks, state_mask);
866
867 /*
868 * Since we care about lost potential, a memstall is FULL
869 * when there are no other working tasks, but also when
870 * the CPU is actively reclaiming and nothing productive
871 * could run even if it were runnable. So when the current
872 * task in a cgroup is in_memstall, the corresponding groupc
873 * on that cpu is in PSI_MEM_FULL state.
874 */
875 if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
876 state_mask |= (1 << PSI_MEM_FULL);
877
878 record_times(groupc, now);
879
880 groupc->state_mask = state_mask;
881
882 if (state_mask & group->rtpoll_states)
883 psi_schedule_rtpoll_work(group, 1, false);
884
885 if (wake_clock && !delayed_work_pending(&group->avgs_work))
886 schedule_delayed_work(&group->avgs_work, PSI_FREQ);
887 }
888
task_psi_group(struct task_struct * task)889 static inline struct psi_group *task_psi_group(struct task_struct *task)
890 {
891 #ifdef CONFIG_CGROUPS
892 if (static_branch_likely(&psi_cgroups_enabled))
893 return cgroup_psi(task_dfl_cgroup(task));
894 #endif
895 return &psi_system;
896 }
897
psi_flags_change(struct task_struct * task,int clear,int set)898 static void psi_flags_change(struct task_struct *task, int clear, int set)
899 {
900 if (((task->psi_flags & set) ||
901 (task->psi_flags & clear) != clear) &&
902 !psi_bug) {
903 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
904 task->pid, task->comm, task_cpu(task),
905 task->psi_flags, clear, set);
906 psi_bug = 1;
907 }
908
909 task->psi_flags &= ~clear;
910 task->psi_flags |= set;
911 }
912
psi_task_change(struct task_struct * task,int clear,int set)913 void psi_task_change(struct task_struct *task, int clear, int set)
914 {
915 int cpu = task_cpu(task);
916 u64 now;
917
918 if (!task->pid)
919 return;
920
921 psi_flags_change(task, clear, set);
922
923 psi_write_begin(cpu);
924 now = cpu_clock(cpu);
925 for_each_group(group, task_psi_group(task))
926 psi_group_change(group, cpu, clear, set, now, true);
927 psi_write_end(cpu);
928 }
929
psi_task_switch(struct task_struct * prev,struct task_struct * next,bool sleep)930 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
931 bool sleep)
932 {
933 struct psi_group *common = NULL;
934 int cpu = task_cpu(prev);
935 u64 now;
936
937 psi_write_begin(cpu);
938 now = cpu_clock(cpu);
939
940 if (next->pid) {
941 psi_flags_change(next, 0, TSK_ONCPU);
942 /*
943 * Set TSK_ONCPU on @next's cgroups. If @next shares any
944 * ancestors with @prev, those will already have @prev's
945 * TSK_ONCPU bit set, and we can stop the iteration there.
946 */
947 for_each_group(group, task_psi_group(next)) {
948 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
949
950 if (groupc->state_mask & PSI_ONCPU) {
951 common = group;
952 break;
953 }
954 psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
955 }
956 }
957
958 if (prev->pid) {
959 int clear = TSK_ONCPU, set = 0;
960 bool wake_clock = true;
961
962 /*
963 * When we're going to sleep, psi_dequeue() lets us
964 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
965 * TSK_IOWAIT here, where we can combine it with
966 * TSK_ONCPU and save walking common ancestors twice.
967 */
968 if (sleep) {
969 clear |= TSK_RUNNING;
970 if (prev->in_memstall)
971 clear |= TSK_MEMSTALL_RUNNING;
972 if (prev->in_iowait)
973 set |= TSK_IOWAIT;
974
975 /*
976 * Periodic aggregation shuts off if there is a period of no
977 * task changes, so we wake it back up if necessary. However,
978 * don't do this if the task change is the aggregation worker
979 * itself going to sleep, or we'll ping-pong forever.
980 */
981 if (unlikely((prev->flags & PF_WQ_WORKER) &&
982 wq_worker_last_func(prev) == psi_avgs_work))
983 wake_clock = false;
984 }
985
986 psi_flags_change(prev, clear, set);
987
988 for_each_group(group, task_psi_group(prev)) {
989 if (group == common)
990 break;
991 psi_group_change(group, cpu, clear, set, now, wake_clock);
992 }
993
994 /*
995 * TSK_ONCPU is handled up to the common ancestor. If there are
996 * any other differences between the two tasks (e.g. prev goes
997 * to sleep, or only one task is memstall), finish propagating
998 * those differences all the way up to the root.
999 */
1000 if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
1001 clear &= ~TSK_ONCPU;
1002 for_each_group(group, common)
1003 psi_group_change(group, cpu, clear, set, now, wake_clock);
1004 }
1005 }
1006 psi_write_end(cpu);
1007 }
1008
1009 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
psi_account_irqtime(struct rq * rq,struct task_struct * curr,struct task_struct * prev)1010 void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
1011 {
1012 int cpu = task_cpu(curr);
1013 struct psi_group_cpu *groupc;
1014 s64 delta;
1015 u64 irq;
1016 u64 now;
1017
1018 if (static_branch_likely(&psi_disabled) || !irqtime_enabled())
1019 return;
1020
1021 if (!curr->pid)
1022 return;
1023
1024 lockdep_assert_rq_held(rq);
1025 if (prev && task_psi_group(prev) == task_psi_group(curr))
1026 return;
1027
1028 irq = irq_time_read(cpu);
1029 delta = (s64)(irq - rq->psi_irq_time);
1030 if (delta < 0)
1031 return;
1032 rq->psi_irq_time = irq;
1033
1034 psi_write_begin(cpu);
1035 now = cpu_clock(cpu);
1036
1037 for_each_group(group, task_psi_group(curr)) {
1038 if (!group->enabled)
1039 continue;
1040
1041 groupc = per_cpu_ptr(group->pcpu, cpu);
1042
1043 record_times(groupc, now);
1044 groupc->times[PSI_IRQ_FULL] += delta;
1045
1046 if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
1047 psi_schedule_rtpoll_work(group, 1, false);
1048 }
1049 psi_write_end(cpu);
1050 }
1051 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1052
1053 /**
1054 * psi_memstall_enter - mark the beginning of a memory stall section
1055 * @flags: flags to handle nested sections
1056 *
1057 * Marks the calling task as being stalled due to a lack of memory,
1058 * such as waiting for a refault or performing reclaim.
1059 */
psi_memstall_enter(unsigned long * flags)1060 void psi_memstall_enter(unsigned long *flags)
1061 {
1062 struct rq_flags rf;
1063 struct rq *rq;
1064
1065 if (static_branch_likely(&psi_disabled))
1066 return;
1067
1068 *flags = current->in_memstall;
1069 if (*flags)
1070 return;
1071 /*
1072 * in_memstall setting & accounting needs to be atomic wrt
1073 * changes to the task's scheduling state, otherwise we can
1074 * race with CPU migration.
1075 */
1076 rq = this_rq_lock_irq(&rf);
1077
1078 current->in_memstall = 1;
1079 psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
1080
1081 rq_unlock_irq(rq, &rf);
1082 }
1083 EXPORT_SYMBOL_GPL(psi_memstall_enter);
1084
1085 /**
1086 * psi_memstall_leave - mark the end of an memory stall section
1087 * @flags: flags to handle nested memdelay sections
1088 *
1089 * Marks the calling task as no longer stalled due to lack of memory.
1090 */
psi_memstall_leave(unsigned long * flags)1091 void psi_memstall_leave(unsigned long *flags)
1092 {
1093 struct rq_flags rf;
1094 struct rq *rq;
1095
1096 if (static_branch_likely(&psi_disabled))
1097 return;
1098
1099 if (*flags)
1100 return;
1101 /*
1102 * in_memstall clearing & accounting needs to be atomic wrt
1103 * changes to the task's scheduling state, otherwise we could
1104 * race with CPU migration.
1105 */
1106 rq = this_rq_lock_irq(&rf);
1107
1108 current->in_memstall = 0;
1109 psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
1110
1111 rq_unlock_irq(rq, &rf);
1112 }
1113 EXPORT_SYMBOL_GPL(psi_memstall_leave);
1114
1115 #ifdef CONFIG_CGROUPS
psi_cgroup_alloc(struct cgroup * cgroup)1116 int psi_cgroup_alloc(struct cgroup *cgroup)
1117 {
1118 if (!static_branch_likely(&psi_cgroups_enabled))
1119 return 0;
1120
1121 cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
1122 if (!cgroup->psi)
1123 return -ENOMEM;
1124
1125 cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
1126 if (!cgroup->psi->pcpu) {
1127 kfree(cgroup->psi);
1128 return -ENOMEM;
1129 }
1130 group_init(cgroup->psi);
1131 cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup));
1132 return 0;
1133 }
1134
psi_cgroup_free(struct cgroup * cgroup)1135 void psi_cgroup_free(struct cgroup *cgroup)
1136 {
1137 if (!static_branch_likely(&psi_cgroups_enabled))
1138 return;
1139
1140 cancel_delayed_work_sync(&cgroup->psi->avgs_work);
1141 free_percpu(cgroup->psi->pcpu);
1142 /* All triggers must be removed by now */
1143 WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n");
1144 kfree(cgroup->psi);
1145 }
1146
1147 /**
1148 * cgroup_move_task - move task to a different cgroup
1149 * @task: the task
1150 * @to: the target css_set
1151 *
1152 * Move task to a new cgroup and safely migrate its associated stall
1153 * state between the different groups.
1154 *
1155 * This function acquires the task's rq lock to lock out concurrent
1156 * changes to the task's scheduling state and - in case the task is
1157 * running - concurrent changes to its stall state.
1158 */
cgroup_move_task(struct task_struct * task,struct css_set * to)1159 void cgroup_move_task(struct task_struct *task, struct css_set *to)
1160 {
1161 unsigned int task_flags;
1162 struct rq_flags rf;
1163 struct rq *rq;
1164
1165 if (!static_branch_likely(&psi_cgroups_enabled)) {
1166 /*
1167 * Lame to do this here, but the scheduler cannot be locked
1168 * from the outside, so we move cgroups from inside sched/.
1169 */
1170 rcu_assign_pointer(task->cgroups, to);
1171 return;
1172 }
1173
1174 rq = task_rq_lock(task, &rf);
1175
1176 /*
1177 * We may race with schedule() dropping the rq lock between
1178 * deactivating prev and switching to next. Because the psi
1179 * updates from the deactivation are deferred to the switch
1180 * callback to save cgroup tree updates, the task's scheduling
1181 * state here is not coherent with its psi state:
1182 *
1183 * schedule() cgroup_move_task()
1184 * rq_lock()
1185 * deactivate_task()
1186 * p->on_rq = 0
1187 * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1188 * pick_next_task()
1189 * rq_unlock()
1190 * rq_lock()
1191 * psi_task_change() // old cgroup
1192 * task->cgroups = to
1193 * psi_task_change() // new cgroup
1194 * rq_unlock()
1195 * rq_lock()
1196 * psi_sched_switch() // does deferred updates in new cgroup
1197 *
1198 * Don't rely on the scheduling state. Use psi_flags instead.
1199 */
1200 task_flags = task->psi_flags;
1201
1202 if (task_flags)
1203 psi_task_change(task, task_flags, 0);
1204
1205 /* See comment above */
1206 rcu_assign_pointer(task->cgroups, to);
1207
1208 if (task_flags)
1209 psi_task_change(task, 0, task_flags);
1210
1211 task_rq_unlock(rq, task, &rf);
1212 }
1213
psi_cgroup_restart(struct psi_group * group)1214 void psi_cgroup_restart(struct psi_group *group)
1215 {
1216 int cpu;
1217
1218 /*
1219 * After we disable psi_group->enabled, we don't actually
1220 * stop percpu tasks accounting in each psi_group_cpu,
1221 * instead only stop test_states() loop, record_times()
1222 * and averaging worker, see psi_group_change() for details.
1223 *
1224 * When disable cgroup PSI, this function has nothing to sync
1225 * since cgroup pressure files are hidden and percpu psi_group_cpu
1226 * would see !psi_group->enabled and only do task accounting.
1227 *
1228 * When re-enable cgroup PSI, this function use psi_group_change()
1229 * to get correct state mask from test_states() loop on tasks[],
1230 * and restart groupc->state_start from now, use .clear = .set = 0
1231 * here since no task status really changed.
1232 */
1233 if (!group->enabled)
1234 return;
1235
1236 for_each_possible_cpu(cpu) {
1237 u64 now;
1238
1239 guard(rq_lock_irq)(cpu_rq(cpu));
1240
1241 psi_write_begin(cpu);
1242 now = cpu_clock(cpu);
1243 psi_group_change(group, cpu, 0, 0, now, true);
1244 psi_write_end(cpu);
1245 }
1246 }
1247 #endif /* CONFIG_CGROUPS */
1248
psi_show(struct seq_file * m,struct psi_group * group,enum psi_res res)1249 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1250 {
1251 bool only_full = false;
1252 int full;
1253 u64 now;
1254
1255 if (static_branch_likely(&psi_disabled))
1256 return -EOPNOTSUPP;
1257
1258 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1259 if (!irqtime_enabled() && res == PSI_IRQ)
1260 return -EOPNOTSUPP;
1261 #endif
1262
1263 /* Update averages before reporting them */
1264 mutex_lock(&group->avgs_lock);
1265 now = sched_clock();
1266 collect_percpu_times(group, PSI_AVGS, NULL);
1267 if (now >= group->avg_next_update)
1268 group->avg_next_update = update_averages(group, now);
1269 mutex_unlock(&group->avgs_lock);
1270
1271 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1272 only_full = res == PSI_IRQ;
1273 #endif
1274
1275 for (full = 0; full < 2 - only_full; full++) {
1276 unsigned long avg[3] = { 0, };
1277 u64 total = 0;
1278 int w;
1279
1280 /* CPU FULL is undefined at the system level */
1281 if (!(group == &psi_system && res == PSI_CPU && full)) {
1282 for (w = 0; w < 3; w++)
1283 avg[w] = group->avg[res * 2 + full][w];
1284 total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1285 NSEC_PER_USEC);
1286 }
1287
1288 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1289 full || only_full ? "full" : "some",
1290 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1291 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1292 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1293 total);
1294 }
1295
1296 return 0;
1297 }
1298
psi_trigger_create(struct psi_group * group,char * buf,enum psi_res res,struct file * file,struct kernfs_open_file * of)1299 struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
1300 enum psi_res res, struct file *file,
1301 struct kernfs_open_file *of)
1302 {
1303 struct psi_trigger *t;
1304 enum psi_states state;
1305 u32 threshold_us;
1306 bool privileged;
1307 u32 window_us;
1308
1309 if (static_branch_likely(&psi_disabled))
1310 return ERR_PTR(-EOPNOTSUPP);
1311
1312 /*
1313 * Checking the privilege here on file->f_cred implies that a privileged user
1314 * could open the file and delegate the write to an unprivileged one.
1315 */
1316 privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE);
1317
1318 if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1319 state = PSI_IO_SOME + res * 2;
1320 else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1321 state = PSI_IO_FULL + res * 2;
1322 else
1323 return ERR_PTR(-EINVAL);
1324
1325 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1326 if (res == PSI_IRQ && --state != PSI_IRQ_FULL)
1327 return ERR_PTR(-EINVAL);
1328 #endif
1329
1330 if (state >= PSI_NONIDLE)
1331 return ERR_PTR(-EINVAL);
1332
1333 if (window_us == 0 || window_us > WINDOW_MAX_US)
1334 return ERR_PTR(-EINVAL);
1335
1336 /*
1337 * Unprivileged users can only use 2s windows so that averages aggregation
1338 * work is used, and no RT threads need to be spawned.
1339 */
1340 if (!privileged && window_us % 2000000)
1341 return ERR_PTR(-EINVAL);
1342
1343 /* Check threshold */
1344 if (threshold_us == 0 || threshold_us > window_us)
1345 return ERR_PTR(-EINVAL);
1346
1347 t = kmalloc(sizeof(*t), GFP_KERNEL);
1348 if (!t)
1349 return ERR_PTR(-ENOMEM);
1350
1351 t->group = group;
1352 t->state = state;
1353 t->threshold = threshold_us * NSEC_PER_USEC;
1354 t->win.size = window_us * NSEC_PER_USEC;
1355 window_reset(&t->win, sched_clock(),
1356 group->total[PSI_POLL][t->state], 0);
1357
1358 t->event = 0;
1359 t->last_event_time = 0;
1360 t->of = of;
1361 if (!of)
1362 init_waitqueue_head(&t->event_wait);
1363 t->pending_event = false;
1364 t->aggregator = privileged ? PSI_POLL : PSI_AVGS;
1365
1366 if (privileged) {
1367 mutex_lock(&group->rtpoll_trigger_lock);
1368
1369 if (!rcu_access_pointer(group->rtpoll_task)) {
1370 struct task_struct *task;
1371
1372 task = kthread_create(psi_rtpoll_worker, group, "psimon");
1373 if (IS_ERR(task)) {
1374 kfree(t);
1375 mutex_unlock(&group->rtpoll_trigger_lock);
1376 return ERR_CAST(task);
1377 }
1378 atomic_set(&group->rtpoll_wakeup, 0);
1379 wake_up_process(task);
1380 rcu_assign_pointer(group->rtpoll_task, task);
1381 }
1382
1383 list_add(&t->node, &group->rtpoll_triggers);
1384 group->rtpoll_min_period = min(group->rtpoll_min_period,
1385 div_u64(t->win.size, UPDATES_PER_WINDOW));
1386 group->rtpoll_nr_triggers[t->state]++;
1387 group->rtpoll_states |= (1 << t->state);
1388
1389 mutex_unlock(&group->rtpoll_trigger_lock);
1390 } else {
1391 mutex_lock(&group->avgs_lock);
1392
1393 list_add(&t->node, &group->avg_triggers);
1394 group->avg_nr_triggers[t->state]++;
1395
1396 mutex_unlock(&group->avgs_lock);
1397 }
1398 return t;
1399 }
1400
psi_trigger_destroy(struct psi_trigger * t)1401 void psi_trigger_destroy(struct psi_trigger *t)
1402 {
1403 struct psi_group *group;
1404 struct task_struct *task_to_destroy = NULL;
1405
1406 /*
1407 * We do not check psi_disabled since it might have been disabled after
1408 * the trigger got created.
1409 */
1410 if (!t)
1411 return;
1412
1413 group = t->group;
1414 /*
1415 * Wakeup waiters to stop polling and clear the queue to prevent it from
1416 * being accessed later. Can happen if cgroup is deleted from under a
1417 * polling process.
1418 */
1419 if (t->of)
1420 kernfs_notify(t->of->kn);
1421 else
1422 wake_up_interruptible(&t->event_wait);
1423
1424 if (t->aggregator == PSI_AVGS) {
1425 mutex_lock(&group->avgs_lock);
1426 if (!list_empty(&t->node)) {
1427 list_del(&t->node);
1428 group->avg_nr_triggers[t->state]--;
1429 }
1430 mutex_unlock(&group->avgs_lock);
1431 } else {
1432 mutex_lock(&group->rtpoll_trigger_lock);
1433 if (!list_empty(&t->node)) {
1434 struct psi_trigger *tmp;
1435 u64 period = ULLONG_MAX;
1436
1437 list_del(&t->node);
1438 group->rtpoll_nr_triggers[t->state]--;
1439 if (!group->rtpoll_nr_triggers[t->state])
1440 group->rtpoll_states &= ~(1 << t->state);
1441 /*
1442 * Reset min update period for the remaining triggers
1443 * iff the destroying trigger had the min window size.
1444 */
1445 if (group->rtpoll_min_period == div_u64(t->win.size, UPDATES_PER_WINDOW)) {
1446 list_for_each_entry(tmp, &group->rtpoll_triggers, node)
1447 period = min(period, div_u64(tmp->win.size,
1448 UPDATES_PER_WINDOW));
1449 group->rtpoll_min_period = period;
1450 }
1451 /* Destroy rtpoll_task when the last trigger is destroyed */
1452 if (group->rtpoll_states == 0) {
1453 group->rtpoll_until = 0;
1454 task_to_destroy = rcu_dereference_protected(
1455 group->rtpoll_task,
1456 lockdep_is_held(&group->rtpoll_trigger_lock));
1457 rcu_assign_pointer(group->rtpoll_task, NULL);
1458 timer_delete(&group->rtpoll_timer);
1459 }
1460 }
1461 mutex_unlock(&group->rtpoll_trigger_lock);
1462 }
1463
1464 /*
1465 * Wait for psi_schedule_rtpoll_work RCU to complete its read-side
1466 * critical section before destroying the trigger and optionally the
1467 * rtpoll_task.
1468 */
1469 synchronize_rcu();
1470 /*
1471 * Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent
1472 * a deadlock while waiting for psi_rtpoll_work to acquire
1473 * rtpoll_trigger_lock
1474 */
1475 if (task_to_destroy) {
1476 /*
1477 * After the RCU grace period has expired, the worker
1478 * can no longer be found through group->rtpoll_task.
1479 */
1480 kthread_stop(task_to_destroy);
1481 atomic_set(&group->rtpoll_scheduled, 0);
1482 }
1483 kfree(t);
1484 }
1485
psi_trigger_poll(void ** trigger_ptr,struct file * file,poll_table * wait)1486 __poll_t psi_trigger_poll(void **trigger_ptr,
1487 struct file *file, poll_table *wait)
1488 {
1489 __poll_t ret = DEFAULT_POLLMASK;
1490 struct psi_trigger *t;
1491
1492 if (static_branch_likely(&psi_disabled))
1493 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1494
1495 t = smp_load_acquire(trigger_ptr);
1496 if (!t)
1497 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1498
1499 if (t->of)
1500 kernfs_generic_poll(t->of, wait);
1501 else
1502 poll_wait(file, &t->event_wait, wait);
1503
1504 if (cmpxchg(&t->event, 1, 0) == 1)
1505 ret |= EPOLLPRI;
1506
1507 return ret;
1508 }
1509
1510 #ifdef CONFIG_PROC_FS
psi_io_show(struct seq_file * m,void * v)1511 static int psi_io_show(struct seq_file *m, void *v)
1512 {
1513 return psi_show(m, &psi_system, PSI_IO);
1514 }
1515
psi_memory_show(struct seq_file * m,void * v)1516 static int psi_memory_show(struct seq_file *m, void *v)
1517 {
1518 return psi_show(m, &psi_system, PSI_MEM);
1519 }
1520
psi_cpu_show(struct seq_file * m,void * v)1521 static int psi_cpu_show(struct seq_file *m, void *v)
1522 {
1523 return psi_show(m, &psi_system, PSI_CPU);
1524 }
1525
psi_io_open(struct inode * inode,struct file * file)1526 static int psi_io_open(struct inode *inode, struct file *file)
1527 {
1528 return single_open(file, psi_io_show, NULL);
1529 }
1530
psi_memory_open(struct inode * inode,struct file * file)1531 static int psi_memory_open(struct inode *inode, struct file *file)
1532 {
1533 return single_open(file, psi_memory_show, NULL);
1534 }
1535
psi_cpu_open(struct inode * inode,struct file * file)1536 static int psi_cpu_open(struct inode *inode, struct file *file)
1537 {
1538 return single_open(file, psi_cpu_show, NULL);
1539 }
1540
psi_write(struct file * file,const char __user * user_buf,size_t nbytes,enum psi_res res)1541 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1542 size_t nbytes, enum psi_res res)
1543 {
1544 char buf[32];
1545 size_t buf_size;
1546 struct seq_file *seq;
1547 struct psi_trigger *new;
1548
1549 if (static_branch_likely(&psi_disabled))
1550 return -EOPNOTSUPP;
1551
1552 if (!nbytes)
1553 return -EINVAL;
1554
1555 buf_size = min(nbytes, sizeof(buf));
1556 if (copy_from_user(buf, user_buf, buf_size))
1557 return -EFAULT;
1558
1559 buf[buf_size - 1] = '\0';
1560
1561 seq = file->private_data;
1562
1563 /* Take seq->lock to protect seq->private from concurrent writes */
1564 mutex_lock(&seq->lock);
1565
1566 /* Allow only one trigger per file descriptor */
1567 if (seq->private) {
1568 mutex_unlock(&seq->lock);
1569 return -EBUSY;
1570 }
1571
1572 new = psi_trigger_create(&psi_system, buf, res, file, NULL);
1573 if (IS_ERR(new)) {
1574 mutex_unlock(&seq->lock);
1575 return PTR_ERR(new);
1576 }
1577
1578 smp_store_release(&seq->private, new);
1579 mutex_unlock(&seq->lock);
1580
1581 return nbytes;
1582 }
1583
psi_io_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1584 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1585 size_t nbytes, loff_t *ppos)
1586 {
1587 return psi_write(file, user_buf, nbytes, PSI_IO);
1588 }
1589
psi_memory_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1590 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1591 size_t nbytes, loff_t *ppos)
1592 {
1593 return psi_write(file, user_buf, nbytes, PSI_MEM);
1594 }
1595
psi_cpu_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1596 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1597 size_t nbytes, loff_t *ppos)
1598 {
1599 return psi_write(file, user_buf, nbytes, PSI_CPU);
1600 }
1601
psi_fop_poll(struct file * file,poll_table * wait)1602 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1603 {
1604 struct seq_file *seq = file->private_data;
1605
1606 return psi_trigger_poll(&seq->private, file, wait);
1607 }
1608
psi_fop_release(struct inode * inode,struct file * file)1609 static int psi_fop_release(struct inode *inode, struct file *file)
1610 {
1611 struct seq_file *seq = file->private_data;
1612
1613 psi_trigger_destroy(seq->private);
1614 return single_release(inode, file);
1615 }
1616
1617 static const struct proc_ops psi_io_proc_ops = {
1618 .proc_open = psi_io_open,
1619 .proc_read = seq_read,
1620 .proc_lseek = seq_lseek,
1621 .proc_write = psi_io_write,
1622 .proc_poll = psi_fop_poll,
1623 .proc_release = psi_fop_release,
1624 };
1625
1626 static const struct proc_ops psi_memory_proc_ops = {
1627 .proc_open = psi_memory_open,
1628 .proc_read = seq_read,
1629 .proc_lseek = seq_lseek,
1630 .proc_write = psi_memory_write,
1631 .proc_poll = psi_fop_poll,
1632 .proc_release = psi_fop_release,
1633 };
1634
1635 static const struct proc_ops psi_cpu_proc_ops = {
1636 .proc_open = psi_cpu_open,
1637 .proc_read = seq_read,
1638 .proc_lseek = seq_lseek,
1639 .proc_write = psi_cpu_write,
1640 .proc_poll = psi_fop_poll,
1641 .proc_release = psi_fop_release,
1642 };
1643
1644 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
psi_irq_show(struct seq_file * m,void * v)1645 static int psi_irq_show(struct seq_file *m, void *v)
1646 {
1647 return psi_show(m, &psi_system, PSI_IRQ);
1648 }
1649
psi_irq_open(struct inode * inode,struct file * file)1650 static int psi_irq_open(struct inode *inode, struct file *file)
1651 {
1652 return single_open(file, psi_irq_show, NULL);
1653 }
1654
psi_irq_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1655 static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
1656 size_t nbytes, loff_t *ppos)
1657 {
1658 return psi_write(file, user_buf, nbytes, PSI_IRQ);
1659 }
1660
1661 static const struct proc_ops psi_irq_proc_ops = {
1662 .proc_open = psi_irq_open,
1663 .proc_read = seq_read,
1664 .proc_lseek = seq_lseek,
1665 .proc_write = psi_irq_write,
1666 .proc_poll = psi_fop_poll,
1667 .proc_release = psi_fop_release,
1668 };
1669 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1670
psi_proc_init(void)1671 static int __init psi_proc_init(void)
1672 {
1673 if (psi_enable) {
1674 proc_mkdir("pressure", NULL);
1675 proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
1676 proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
1677 proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
1678 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1679 proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops);
1680 #endif
1681 }
1682 return 0;
1683 }
1684 module_init(psi_proc_init);
1685
1686 #endif /* CONFIG_PROC_FS */
1687