xref: /linux/kernel/sched/psi.c (revision 99b773d720aeea1ef2170dce5fcfa80649e26b78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Pressure stall information for CPU, memory and IO
4  *
5  * Copyright (c) 2018 Facebook, Inc.
6  * Author: Johannes Weiner <hannes@cmpxchg.org>
7  *
8  * Polling support by Suren Baghdasaryan <surenb@google.com>
9  * Copyright (c) 2018 Google, Inc.
10  *
11  * When CPU, memory and IO are contended, tasks experience delays that
12  * reduce throughput and introduce latencies into the workload. Memory
13  * and IO contention, in addition, can cause a full loss of forward
14  * progress in which the CPU goes idle.
15  *
16  * This code aggregates individual task delays into resource pressure
17  * metrics that indicate problems with both workload health and
18  * resource utilization.
19  *
20  *			Model
21  *
22  * The time in which a task can execute on a CPU is our baseline for
23  * productivity. Pressure expresses the amount of time in which this
24  * potential cannot be realized due to resource contention.
25  *
26  * This concept of productivity has two components: the workload and
27  * the CPU. To measure the impact of pressure on both, we define two
28  * contention states for a resource: SOME and FULL.
29  *
30  * In the SOME state of a given resource, one or more tasks are
31  * delayed on that resource. This affects the workload's ability to
32  * perform work, but the CPU may still be executing other tasks.
33  *
34  * In the FULL state of a given resource, all non-idle tasks are
35  * delayed on that resource such that nobody is advancing and the CPU
36  * goes idle. This leaves both workload and CPU unproductive.
37  *
38  *	SOME = nr_delayed_tasks != 0
39  *	FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
40  *
41  * What it means for a task to be productive is defined differently
42  * for each resource. For IO, productive means a running task. For
43  * memory, productive means a running task that isn't a reclaimer. For
44  * CPU, productive means an on-CPU task.
45  *
46  * Naturally, the FULL state doesn't exist for the CPU resource at the
47  * system level, but exist at the cgroup level. At the cgroup level,
48  * FULL means all non-idle tasks in the cgroup are delayed on the CPU
49  * resource which is being used by others outside of the cgroup or
50  * throttled by the cgroup cpu.max configuration.
51  *
52  * The percentage of wall clock time spent in those compound stall
53  * states gives pressure numbers between 0 and 100 for each resource,
54  * where the SOME percentage indicates workload slowdowns and the FULL
55  * percentage indicates reduced CPU utilization:
56  *
57  *	%SOME = time(SOME) / period
58  *	%FULL = time(FULL) / period
59  *
60  *			Multiple CPUs
61  *
62  * The more tasks and available CPUs there are, the more work can be
63  * performed concurrently. This means that the potential that can go
64  * unrealized due to resource contention *also* scales with non-idle
65  * tasks and CPUs.
66  *
67  * Consider a scenario where 257 number crunching tasks are trying to
68  * run concurrently on 256 CPUs. If we simply aggregated the task
69  * states, we would have to conclude a CPU SOME pressure number of
70  * 100%, since *somebody* is waiting on a runqueue at all
71  * times. However, that is clearly not the amount of contention the
72  * workload is experiencing: only one out of 256 possible execution
73  * threads will be contended at any given time, or about 0.4%.
74  *
75  * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
76  * given time *one* of the tasks is delayed due to a lack of memory.
77  * Again, looking purely at the task state would yield a memory FULL
78  * pressure number of 0%, since *somebody* is always making forward
79  * progress. But again this wouldn't capture the amount of execution
80  * potential lost, which is 1 out of 4 CPUs, or 25%.
81  *
82  * To calculate wasted potential (pressure) with multiple processors,
83  * we have to base our calculation on the number of non-idle tasks in
84  * conjunction with the number of available CPUs, which is the number
85  * of potential execution threads. SOME becomes then the proportion of
86  * delayed tasks to possible threads, and FULL is the share of possible
87  * threads that are unproductive due to delays:
88  *
89  *	threads = min(nr_nonidle_tasks, nr_cpus)
90  *	   SOME = min(nr_delayed_tasks / threads, 1)
91  *	   FULL = (threads - min(nr_productive_tasks, threads)) / threads
92  *
93  * For the 257 number crunchers on 256 CPUs, this yields:
94  *
95  *	threads = min(257, 256)
96  *	   SOME = min(1 / 256, 1)             = 0.4%
97  *	   FULL = (256 - min(256, 256)) / 256 = 0%
98  *
99  * For the 1 out of 4 memory-delayed tasks, this yields:
100  *
101  *	threads = min(4, 4)
102  *	   SOME = min(1 / 4, 1)               = 25%
103  *	   FULL = (4 - min(3, 4)) / 4         = 25%
104  *
105  * [ Substitute nr_cpus with 1, and you can see that it's a natural
106  *   extension of the single-CPU model. ]
107  *
108  *			Implementation
109  *
110  * To assess the precise time spent in each such state, we would have
111  * to freeze the system on task changes and start/stop the state
112  * clocks accordingly. Obviously that doesn't scale in practice.
113  *
114  * Because the scheduler aims to distribute the compute load evenly
115  * among the available CPUs, we can track task state locally to each
116  * CPU and, at much lower frequency, extrapolate the global state for
117  * the cumulative stall times and the running averages.
118  *
119  * For each runqueue, we track:
120  *
121  *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
122  *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
123  *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
124  *
125  * and then periodically aggregate:
126  *
127  *	tNONIDLE = sum(tNONIDLE[i])
128  *
129  *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
130  *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
131  *
132  *	   %SOME = tSOME / period
133  *	   %FULL = tFULL / period
134  *
135  * This gives us an approximation of pressure that is practical
136  * cost-wise, yet way more sensitive and accurate than periodic
137  * sampling of the aggregate task states would be.
138  */
139 #include <linux/sched/clock.h>
140 #include <linux/workqueue.h>
141 #include <linux/psi.h>
142 #include "sched.h"
143 
144 static int psi_bug __read_mostly;
145 
146 DEFINE_STATIC_KEY_FALSE(psi_disabled);
147 static DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
148 
149 #ifdef CONFIG_PSI_DEFAULT_DISABLED
150 static bool psi_enable;
151 #else
152 static bool psi_enable = true;
153 #endif
setup_psi(char * str)154 static int __init setup_psi(char *str)
155 {
156 	return kstrtobool(str, &psi_enable) == 0;
157 }
158 __setup("psi=", setup_psi);
159 
160 /* Running averages - we need to be higher-res than loadavg */
161 #define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
162 #define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
163 #define EXP_60s		1981		/* 1/exp(2s/60s) */
164 #define EXP_300s	2034		/* 1/exp(2s/300s) */
165 
166 /* PSI trigger definitions */
167 #define WINDOW_MAX_US 10000000	/* Max window size is 10s */
168 #define UPDATES_PER_WINDOW 10	/* 10 updates per window */
169 
170 /* Sampling frequency in nanoseconds */
171 static u64 psi_period __read_mostly;
172 
173 /* System-level pressure and stall tracking */
174 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
175 struct psi_group psi_system = {
176 	.pcpu = &system_group_pcpu,
177 };
178 
179 static DEFINE_PER_CPU(seqcount_t, psi_seq) = SEQCNT_ZERO(psi_seq);
180 
psi_write_begin(int cpu)181 static inline void psi_write_begin(int cpu)
182 {
183 	write_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
184 }
185 
psi_write_end(int cpu)186 static inline void psi_write_end(int cpu)
187 {
188 	write_seqcount_end(per_cpu_ptr(&psi_seq, cpu));
189 }
190 
psi_read_begin(int cpu)191 static inline u32 psi_read_begin(int cpu)
192 {
193 	return read_seqcount_begin(per_cpu_ptr(&psi_seq, cpu));
194 }
195 
psi_read_retry(int cpu,u32 seq)196 static inline bool psi_read_retry(int cpu, u32 seq)
197 {
198 	return read_seqcount_retry(per_cpu_ptr(&psi_seq, cpu), seq);
199 }
200 
201 static void psi_avgs_work(struct work_struct *work);
202 
203 static void poll_timer_fn(struct timer_list *t);
204 
group_init(struct psi_group * group)205 static void group_init(struct psi_group *group)
206 {
207 	group->enabled = true;
208 	group->avg_last_update = sched_clock();
209 	group->avg_next_update = group->avg_last_update + psi_period;
210 	mutex_init(&group->avgs_lock);
211 
212 	/* Init avg trigger-related members */
213 	INIT_LIST_HEAD(&group->avg_triggers);
214 	memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers));
215 	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
216 
217 	/* Init rtpoll trigger-related members */
218 	atomic_set(&group->rtpoll_scheduled, 0);
219 	mutex_init(&group->rtpoll_trigger_lock);
220 	INIT_LIST_HEAD(&group->rtpoll_triggers);
221 	group->rtpoll_min_period = U32_MAX;
222 	group->rtpoll_next_update = ULLONG_MAX;
223 	init_waitqueue_head(&group->rtpoll_wait);
224 	timer_setup(&group->rtpoll_timer, poll_timer_fn, 0);
225 	rcu_assign_pointer(group->rtpoll_task, NULL);
226 }
227 
psi_init(void)228 void __init psi_init(void)
229 {
230 	if (!psi_enable) {
231 		static_branch_enable(&psi_disabled);
232 		static_branch_disable(&psi_cgroups_enabled);
233 		return;
234 	}
235 
236 	if (!cgroup_psi_enabled())
237 		static_branch_disable(&psi_cgroups_enabled);
238 
239 	psi_period = jiffies_to_nsecs(PSI_FREQ);
240 	group_init(&psi_system);
241 }
242 
test_states(unsigned int * tasks,u32 state_mask)243 static u32 test_states(unsigned int *tasks, u32 state_mask)
244 {
245 	const bool oncpu = state_mask & PSI_ONCPU;
246 
247 	if (tasks[NR_IOWAIT]) {
248 		state_mask |= BIT(PSI_IO_SOME);
249 		if (!tasks[NR_RUNNING])
250 			state_mask |= BIT(PSI_IO_FULL);
251 	}
252 
253 	if (tasks[NR_MEMSTALL]) {
254 		state_mask |= BIT(PSI_MEM_SOME);
255 		if (tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING])
256 			state_mask |= BIT(PSI_MEM_FULL);
257 	}
258 
259 	if (tasks[NR_RUNNING] > oncpu)
260 		state_mask |= BIT(PSI_CPU_SOME);
261 
262 	if (tasks[NR_RUNNING] && !oncpu)
263 		state_mask |= BIT(PSI_CPU_FULL);
264 
265 	if (tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || tasks[NR_RUNNING])
266 		state_mask |= BIT(PSI_NONIDLE);
267 
268 	return state_mask;
269 }
270 
get_recent_times(struct psi_group * group,int cpu,enum psi_aggregators aggregator,u32 * times,u32 * pchanged_states)271 static void get_recent_times(struct psi_group *group, int cpu,
272 			     enum psi_aggregators aggregator, u32 *times,
273 			     u32 *pchanged_states)
274 {
275 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
276 	int current_cpu = raw_smp_processor_id();
277 	unsigned int tasks[NR_PSI_TASK_COUNTS];
278 	u64 now, state_start;
279 	enum psi_states s;
280 	unsigned int seq;
281 	u32 state_mask;
282 
283 	*pchanged_states = 0;
284 
285 	/* Snapshot a coherent view of the CPU state */
286 	do {
287 		seq = psi_read_begin(cpu);
288 		now = cpu_clock(cpu);
289 		memcpy(times, groupc->times, sizeof(groupc->times));
290 		state_mask = groupc->state_mask;
291 		state_start = groupc->state_start;
292 		if (cpu == current_cpu)
293 			memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
294 	} while (psi_read_retry(cpu, seq));
295 
296 	/* Calculate state time deltas against the previous snapshot */
297 	for (s = 0; s < NR_PSI_STATES; s++) {
298 		u32 delta;
299 		/*
300 		 * In addition to already concluded states, we also
301 		 * incorporate currently active states on the CPU,
302 		 * since states may last for many sampling periods.
303 		 *
304 		 * This way we keep our delta sampling buckets small
305 		 * (u32) and our reported pressure close to what's
306 		 * actually happening.
307 		 */
308 		if (state_mask & (1 << s))
309 			times[s] += now - state_start;
310 
311 		delta = times[s] - groupc->times_prev[aggregator][s];
312 		groupc->times_prev[aggregator][s] = times[s];
313 
314 		times[s] = delta;
315 		if (delta)
316 			*pchanged_states |= (1 << s);
317 	}
318 
319 	/*
320 	 * When collect_percpu_times() from the avgs_work, we don't want to
321 	 * re-arm avgs_work when all CPUs are IDLE. But the current CPU running
322 	 * this avgs_work is never IDLE, cause avgs_work can't be shut off.
323 	 * So for the current CPU, we need to re-arm avgs_work only when
324 	 * (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs
325 	 * we can just check PSI_NONIDLE delta.
326 	 */
327 	if (current_work() == &group->avgs_work.work) {
328 		bool reschedule;
329 
330 		if (cpu == current_cpu)
331 			reschedule = tasks[NR_RUNNING] +
332 				     tasks[NR_IOWAIT] +
333 				     tasks[NR_MEMSTALL] > 1;
334 		else
335 			reschedule = *pchanged_states & (1 << PSI_NONIDLE);
336 
337 		if (reschedule)
338 			*pchanged_states |= PSI_STATE_RESCHEDULE;
339 	}
340 }
341 
calc_avgs(unsigned long avg[3],int missed_periods,u64 time,u64 period)342 static void calc_avgs(unsigned long avg[3], int missed_periods,
343 		      u64 time, u64 period)
344 {
345 	unsigned long pct;
346 
347 	/* Fill in zeroes for periods of no activity */
348 	if (missed_periods) {
349 		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
350 		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
351 		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
352 	}
353 
354 	/* Sample the most recent active period */
355 	pct = div_u64(time * 100, period);
356 	pct *= FIXED_1;
357 	avg[0] = calc_load(avg[0], EXP_10s, pct);
358 	avg[1] = calc_load(avg[1], EXP_60s, pct);
359 	avg[2] = calc_load(avg[2], EXP_300s, pct);
360 }
361 
collect_percpu_times(struct psi_group * group,enum psi_aggregators aggregator,u32 * pchanged_states)362 static void collect_percpu_times(struct psi_group *group,
363 				 enum psi_aggregators aggregator,
364 				 u32 *pchanged_states)
365 {
366 	u64 deltas[NR_PSI_STATES - 1] = { 0, };
367 	unsigned long nonidle_total = 0;
368 	u32 changed_states = 0;
369 	int cpu;
370 	int s;
371 
372 	/*
373 	 * Collect the per-cpu time buckets and average them into a
374 	 * single time sample that is normalized to wall clock time.
375 	 *
376 	 * For averaging, each CPU is weighted by its non-idle time in
377 	 * the sampling period. This eliminates artifacts from uneven
378 	 * loading, or even entirely idle CPUs.
379 	 */
380 	for_each_possible_cpu(cpu) {
381 		u32 times[NR_PSI_STATES];
382 		u32 nonidle;
383 		u32 cpu_changed_states;
384 
385 		get_recent_times(group, cpu, aggregator, times,
386 				&cpu_changed_states);
387 		changed_states |= cpu_changed_states;
388 
389 		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
390 		nonidle_total += nonidle;
391 
392 		for (s = 0; s < PSI_NONIDLE; s++)
393 			deltas[s] += (u64)times[s] * nonidle;
394 	}
395 
396 	/*
397 	 * Integrate the sample into the running statistics that are
398 	 * reported to userspace: the cumulative stall times and the
399 	 * decaying averages.
400 	 *
401 	 * Pressure percentages are sampled at PSI_FREQ. We might be
402 	 * called more often when the user polls more frequently than
403 	 * that; we might be called less often when there is no task
404 	 * activity, thus no data, and clock ticks are sporadic. The
405 	 * below handles both.
406 	 */
407 
408 	/* total= */
409 	for (s = 0; s < NR_PSI_STATES - 1; s++)
410 		group->total[aggregator][s] +=
411 				div_u64(deltas[s], max(nonidle_total, 1UL));
412 
413 	if (pchanged_states)
414 		*pchanged_states = changed_states;
415 }
416 
417 /* Trigger tracking window manipulations */
window_reset(struct psi_window * win,u64 now,u64 value,u64 prev_growth)418 static void window_reset(struct psi_window *win, u64 now, u64 value,
419 			 u64 prev_growth)
420 {
421 	win->start_time = now;
422 	win->start_value = value;
423 	win->prev_growth = prev_growth;
424 }
425 
426 /*
427  * PSI growth tracking window update and growth calculation routine.
428  *
429  * This approximates a sliding tracking window by interpolating
430  * partially elapsed windows using historical growth data from the
431  * previous intervals. This minimizes memory requirements (by not storing
432  * all the intermediate values in the previous window) and simplifies
433  * the calculations. It works well because PSI signal changes only in
434  * positive direction and over relatively small window sizes the growth
435  * is close to linear.
436  */
window_update(struct psi_window * win,u64 now,u64 value)437 static u64 window_update(struct psi_window *win, u64 now, u64 value)
438 {
439 	u64 elapsed;
440 	u64 growth;
441 
442 	elapsed = now - win->start_time;
443 	growth = value - win->start_value;
444 	/*
445 	 * After each tracking window passes win->start_value and
446 	 * win->start_time get reset and win->prev_growth stores
447 	 * the average per-window growth of the previous window.
448 	 * win->prev_growth is then used to interpolate additional
449 	 * growth from the previous window assuming it was linear.
450 	 */
451 	if (elapsed > win->size)
452 		window_reset(win, now, value, growth);
453 	else {
454 		u32 remaining;
455 
456 		remaining = win->size - elapsed;
457 		growth += div64_u64(win->prev_growth * remaining, win->size);
458 	}
459 
460 	return growth;
461 }
462 
update_triggers(struct psi_group * group,u64 now,enum psi_aggregators aggregator)463 static void update_triggers(struct psi_group *group, u64 now,
464 						   enum psi_aggregators aggregator)
465 {
466 	struct psi_trigger *t;
467 	u64 *total = group->total[aggregator];
468 	struct list_head *triggers;
469 	u64 *aggregator_total;
470 
471 	if (aggregator == PSI_AVGS) {
472 		triggers = &group->avg_triggers;
473 		aggregator_total = group->avg_total;
474 	} else {
475 		triggers = &group->rtpoll_triggers;
476 		aggregator_total = group->rtpoll_total;
477 	}
478 
479 	/*
480 	 * On subsequent updates, calculate growth deltas and let
481 	 * watchers know when their specified thresholds are exceeded.
482 	 */
483 	list_for_each_entry(t, triggers, node) {
484 		u64 growth;
485 		bool new_stall;
486 
487 		new_stall = aggregator_total[t->state] != total[t->state];
488 
489 		/* Check for stall activity or a previous threshold breach */
490 		if (!new_stall && !t->pending_event)
491 			continue;
492 		/*
493 		 * Check for new stall activity, as well as deferred
494 		 * events that occurred in the last window after the
495 		 * trigger had already fired (we want to ratelimit
496 		 * events without dropping any).
497 		 */
498 		if (new_stall) {
499 			/* Calculate growth since last update */
500 			growth = window_update(&t->win, now, total[t->state]);
501 			if (!t->pending_event) {
502 				if (growth < t->threshold)
503 					continue;
504 
505 				t->pending_event = true;
506 			}
507 		}
508 		/* Limit event signaling to once per window */
509 		if (now < t->last_event_time + t->win.size)
510 			continue;
511 
512 		/* Generate an event */
513 		if (cmpxchg(&t->event, 0, 1) == 0) {
514 			if (t->of)
515 				kernfs_notify(t->of->kn);
516 			else
517 				wake_up_interruptible(&t->event_wait);
518 		}
519 		t->last_event_time = now;
520 		/* Reset threshold breach flag once event got generated */
521 		t->pending_event = false;
522 	}
523 }
524 
update_averages(struct psi_group * group,u64 now)525 static u64 update_averages(struct psi_group *group, u64 now)
526 {
527 	unsigned long missed_periods = 0;
528 	u64 expires, period;
529 	u64 avg_next_update;
530 	int s;
531 
532 	/* avgX= */
533 	expires = group->avg_next_update;
534 	if (now - expires >= psi_period)
535 		missed_periods = div_u64(now - expires, psi_period);
536 
537 	/*
538 	 * The periodic clock tick can get delayed for various
539 	 * reasons, especially on loaded systems. To avoid clock
540 	 * drift, we schedule the clock in fixed psi_period intervals.
541 	 * But the deltas we sample out of the per-cpu buckets above
542 	 * are based on the actual time elapsing between clock ticks.
543 	 */
544 	avg_next_update = expires + ((1 + missed_periods) * psi_period);
545 	period = now - (group->avg_last_update + (missed_periods * psi_period));
546 	group->avg_last_update = now;
547 
548 	for (s = 0; s < NR_PSI_STATES - 1; s++) {
549 		u32 sample;
550 
551 		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
552 		/*
553 		 * Due to the lockless sampling of the time buckets,
554 		 * recorded time deltas can slip into the next period,
555 		 * which under full pressure can result in samples in
556 		 * excess of the period length.
557 		 *
558 		 * We don't want to report non-sensical pressures in
559 		 * excess of 100%, nor do we want to drop such events
560 		 * on the floor. Instead we punt any overage into the
561 		 * future until pressure subsides. By doing this we
562 		 * don't underreport the occurring pressure curve, we
563 		 * just report it delayed by one period length.
564 		 *
565 		 * The error isn't cumulative. As soon as another
566 		 * delta slips from a period P to P+1, by definition
567 		 * it frees up its time T in P.
568 		 */
569 		if (sample > period)
570 			sample = period;
571 		group->avg_total[s] += sample;
572 		calc_avgs(group->avg[s], missed_periods, sample, period);
573 	}
574 
575 	return avg_next_update;
576 }
577 
psi_avgs_work(struct work_struct * work)578 static void psi_avgs_work(struct work_struct *work)
579 {
580 	struct delayed_work *dwork;
581 	struct psi_group *group;
582 	u32 changed_states;
583 	u64 now;
584 
585 	dwork = to_delayed_work(work);
586 	group = container_of(dwork, struct psi_group, avgs_work);
587 
588 	mutex_lock(&group->avgs_lock);
589 
590 	now = sched_clock();
591 
592 	collect_percpu_times(group, PSI_AVGS, &changed_states);
593 	/*
594 	 * If there is task activity, periodically fold the per-cpu
595 	 * times and feed samples into the running averages. If things
596 	 * are idle and there is no data to process, stop the clock.
597 	 * Once restarted, we'll catch up the running averages in one
598 	 * go - see calc_avgs() and missed_periods.
599 	 */
600 	if (now >= group->avg_next_update) {
601 		update_triggers(group, now, PSI_AVGS);
602 		group->avg_next_update = update_averages(group, now);
603 	}
604 
605 	if (changed_states & PSI_STATE_RESCHEDULE) {
606 		schedule_delayed_work(dwork, nsecs_to_jiffies(
607 				group->avg_next_update - now) + 1);
608 	}
609 
610 	mutex_unlock(&group->avgs_lock);
611 }
612 
init_rtpoll_triggers(struct psi_group * group,u64 now)613 static void init_rtpoll_triggers(struct psi_group *group, u64 now)
614 {
615 	struct psi_trigger *t;
616 
617 	list_for_each_entry(t, &group->rtpoll_triggers, node)
618 		window_reset(&t->win, now,
619 				group->total[PSI_POLL][t->state], 0);
620 	memcpy(group->rtpoll_total, group->total[PSI_POLL],
621 		   sizeof(group->rtpoll_total));
622 	group->rtpoll_next_update = now + group->rtpoll_min_period;
623 }
624 
625 /* Schedule rtpolling if it's not already scheduled or forced. */
psi_schedule_rtpoll_work(struct psi_group * group,unsigned long delay,bool force)626 static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long delay,
627 				   bool force)
628 {
629 	struct task_struct *task;
630 
631 	/*
632 	 * atomic_xchg should be called even when !force to provide a
633 	 * full memory barrier (see the comment inside psi_rtpoll_work).
634 	 */
635 	if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force)
636 		return;
637 
638 	rcu_read_lock();
639 
640 	task = rcu_dereference(group->rtpoll_task);
641 	/*
642 	 * kworker might be NULL in case psi_trigger_destroy races with
643 	 * psi_task_change (hotpath) which can't use locks
644 	 */
645 	if (likely(task))
646 		mod_timer(&group->rtpoll_timer, jiffies + delay);
647 	else
648 		atomic_set(&group->rtpoll_scheduled, 0);
649 
650 	rcu_read_unlock();
651 }
652 
psi_rtpoll_work(struct psi_group * group)653 static void psi_rtpoll_work(struct psi_group *group)
654 {
655 	bool force_reschedule = false;
656 	u32 changed_states;
657 	u64 now;
658 
659 	mutex_lock(&group->rtpoll_trigger_lock);
660 
661 	now = sched_clock();
662 
663 	if (now > group->rtpoll_until) {
664 		/*
665 		 * We are either about to start or might stop rtpolling if no
666 		 * state change was recorded. Resetting rtpoll_scheduled leaves
667 		 * a small window for psi_group_change to sneak in and schedule
668 		 * an immediate rtpoll_work before we get to rescheduling. One
669 		 * potential extra wakeup at the end of the rtpolling window
670 		 * should be negligible and rtpoll_next_update still keeps
671 		 * updates correctly on schedule.
672 		 */
673 		atomic_set(&group->rtpoll_scheduled, 0);
674 		/*
675 		 * A task change can race with the rtpoll worker that is supposed to
676 		 * report on it. To avoid missing events, ensure ordering between
677 		 * rtpoll_scheduled and the task state accesses, such that if the
678 		 * rtpoll worker misses the state update, the task change is
679 		 * guaranteed to reschedule the rtpoll worker:
680 		 *
681 		 * rtpoll worker:
682 		 *   atomic_set(rtpoll_scheduled, 0)
683 		 *   smp_mb()
684 		 *   LOAD states
685 		 *
686 		 * task change:
687 		 *   STORE states
688 		 *   if atomic_xchg(rtpoll_scheduled, 1) == 0:
689 		 *     schedule rtpoll worker
690 		 *
691 		 * The atomic_xchg() implies a full barrier.
692 		 */
693 		smp_mb();
694 	} else {
695 		/* The rtpolling window is not over, keep rescheduling */
696 		force_reschedule = true;
697 	}
698 
699 
700 	collect_percpu_times(group, PSI_POLL, &changed_states);
701 
702 	if (changed_states & group->rtpoll_states) {
703 		/* Initialize trigger windows when entering rtpolling mode */
704 		if (now > group->rtpoll_until)
705 			init_rtpoll_triggers(group, now);
706 
707 		/*
708 		 * Keep the monitor active for at least the duration of the
709 		 * minimum tracking window as long as monitor states are
710 		 * changing.
711 		 */
712 		group->rtpoll_until = now +
713 			group->rtpoll_min_period * UPDATES_PER_WINDOW;
714 	}
715 
716 	if (now > group->rtpoll_until) {
717 		group->rtpoll_next_update = ULLONG_MAX;
718 		goto out;
719 	}
720 
721 	if (now >= group->rtpoll_next_update) {
722 		if (changed_states & group->rtpoll_states) {
723 			update_triggers(group, now, PSI_POLL);
724 			memcpy(group->rtpoll_total, group->total[PSI_POLL],
725 				   sizeof(group->rtpoll_total));
726 		}
727 		group->rtpoll_next_update = now + group->rtpoll_min_period;
728 	}
729 
730 	psi_schedule_rtpoll_work(group,
731 		nsecs_to_jiffies(group->rtpoll_next_update - now) + 1,
732 		force_reschedule);
733 
734 out:
735 	mutex_unlock(&group->rtpoll_trigger_lock);
736 }
737 
psi_rtpoll_worker(void * data)738 static int psi_rtpoll_worker(void *data)
739 {
740 	struct psi_group *group = (struct psi_group *)data;
741 
742 	sched_set_fifo_low(current);
743 
744 	while (true) {
745 		wait_event_interruptible(group->rtpoll_wait,
746 				atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) ||
747 				kthread_should_stop());
748 		if (kthread_should_stop())
749 			break;
750 
751 		psi_rtpoll_work(group);
752 	}
753 	return 0;
754 }
755 
poll_timer_fn(struct timer_list * t)756 static void poll_timer_fn(struct timer_list *t)
757 {
758 	struct psi_group *group = timer_container_of(group, t, rtpoll_timer);
759 
760 	atomic_set(&group->rtpoll_wakeup, 1);
761 	wake_up_interruptible(&group->rtpoll_wait);
762 }
763 
record_times(struct psi_group_cpu * groupc,u64 now)764 static void record_times(struct psi_group_cpu *groupc, u64 now)
765 {
766 	u32 delta;
767 
768 	delta = now - groupc->state_start;
769 	groupc->state_start = now;
770 
771 	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
772 		groupc->times[PSI_IO_SOME] += delta;
773 		if (groupc->state_mask & (1 << PSI_IO_FULL))
774 			groupc->times[PSI_IO_FULL] += delta;
775 	}
776 
777 	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
778 		groupc->times[PSI_MEM_SOME] += delta;
779 		if (groupc->state_mask & (1 << PSI_MEM_FULL))
780 			groupc->times[PSI_MEM_FULL] += delta;
781 	}
782 
783 	if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
784 		groupc->times[PSI_CPU_SOME] += delta;
785 		if (groupc->state_mask & (1 << PSI_CPU_FULL))
786 			groupc->times[PSI_CPU_FULL] += delta;
787 	}
788 
789 	if (groupc->state_mask & (1 << PSI_NONIDLE))
790 		groupc->times[PSI_NONIDLE] += delta;
791 }
792 
793 #define for_each_group(iter, group) \
794 	for (typeof(group) iter = group; iter; iter = iter->parent)
795 
psi_group_change(struct psi_group * group,int cpu,unsigned int clear,unsigned int set,u64 now,bool wake_clock)796 static void psi_group_change(struct psi_group *group, int cpu,
797 			     unsigned int clear, unsigned int set,
798 			     u64 now, bool wake_clock)
799 {
800 	struct psi_group_cpu *groupc;
801 	unsigned int t, m;
802 	u32 state_mask;
803 
804 	lockdep_assert_rq_held(cpu_rq(cpu));
805 	groupc = per_cpu_ptr(group->pcpu, cpu);
806 
807 	/*
808 	 * Start with TSK_ONCPU, which doesn't have a corresponding
809 	 * task count - it's just a boolean flag directly encoded in
810 	 * the state mask. Clear, set, or carry the current state if
811 	 * no changes are requested.
812 	 */
813 	if (unlikely(clear & TSK_ONCPU)) {
814 		state_mask = 0;
815 		clear &= ~TSK_ONCPU;
816 	} else if (unlikely(set & TSK_ONCPU)) {
817 		state_mask = PSI_ONCPU;
818 		set &= ~TSK_ONCPU;
819 	} else {
820 		state_mask = groupc->state_mask & PSI_ONCPU;
821 	}
822 
823 	/*
824 	 * The rest of the state mask is calculated based on the task
825 	 * counts. Update those first, then construct the mask.
826 	 */
827 	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
828 		if (!(m & (1 << t)))
829 			continue;
830 		if (groupc->tasks[t]) {
831 			groupc->tasks[t]--;
832 		} else if (!psi_bug) {
833 			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
834 					cpu, t, groupc->tasks[0],
835 					groupc->tasks[1], groupc->tasks[2],
836 					groupc->tasks[3], clear, set);
837 			psi_bug = 1;
838 		}
839 	}
840 
841 	for (t = 0; set; set &= ~(1 << t), t++)
842 		if (set & (1 << t))
843 			groupc->tasks[t]++;
844 
845 	if (!group->enabled) {
846 		/*
847 		 * On the first group change after disabling PSI, conclude
848 		 * the current state and flush its time. This is unlikely
849 		 * to matter to the user, but aggregation (get_recent_times)
850 		 * may have already incorporated the live state into times_prev;
851 		 * avoid a delta sample underflow when PSI is later re-enabled.
852 		 */
853 		if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
854 			record_times(groupc, now);
855 
856 		groupc->state_mask = state_mask;
857 
858 		return;
859 	}
860 
861 	state_mask = test_states(groupc->tasks, state_mask);
862 
863 	/*
864 	 * Since we care about lost potential, a memstall is FULL
865 	 * when there are no other working tasks, but also when
866 	 * the CPU is actively reclaiming and nothing productive
867 	 * could run even if it were runnable. So when the current
868 	 * task in a cgroup is in_memstall, the corresponding groupc
869 	 * on that cpu is in PSI_MEM_FULL state.
870 	 */
871 	if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
872 		state_mask |= (1 << PSI_MEM_FULL);
873 
874 	record_times(groupc, now);
875 
876 	groupc->state_mask = state_mask;
877 
878 	if (state_mask & group->rtpoll_states)
879 		psi_schedule_rtpoll_work(group, 1, false);
880 
881 	if (wake_clock && !delayed_work_pending(&group->avgs_work))
882 		schedule_delayed_work(&group->avgs_work, PSI_FREQ);
883 }
884 
task_psi_group(struct task_struct * task)885 static inline struct psi_group *task_psi_group(struct task_struct *task)
886 {
887 #ifdef CONFIG_CGROUPS
888 	if (static_branch_likely(&psi_cgroups_enabled))
889 		return cgroup_psi(task_dfl_cgroup(task));
890 #endif
891 	return &psi_system;
892 }
893 
psi_flags_change(struct task_struct * task,int clear,int set)894 static void psi_flags_change(struct task_struct *task, int clear, int set)
895 {
896 	if (((task->psi_flags & set) ||
897 	     (task->psi_flags & clear) != clear) &&
898 	    !psi_bug) {
899 		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
900 				task->pid, task->comm, task_cpu(task),
901 				task->psi_flags, clear, set);
902 		psi_bug = 1;
903 	}
904 
905 	task->psi_flags &= ~clear;
906 	task->psi_flags |= set;
907 }
908 
psi_task_change(struct task_struct * task,int clear,int set)909 void psi_task_change(struct task_struct *task, int clear, int set)
910 {
911 	int cpu = task_cpu(task);
912 	u64 now;
913 
914 	if (!task->pid)
915 		return;
916 
917 	psi_flags_change(task, clear, set);
918 
919 	psi_write_begin(cpu);
920 	now = cpu_clock(cpu);
921 	for_each_group(group, task_psi_group(task))
922 		psi_group_change(group, cpu, clear, set, now, true);
923 	psi_write_end(cpu);
924 }
925 
psi_task_switch(struct task_struct * prev,struct task_struct * next,bool sleep)926 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
927 		     bool sleep)
928 {
929 	struct psi_group *common = NULL;
930 	int cpu = task_cpu(prev);
931 	u64 now;
932 
933 	psi_write_begin(cpu);
934 	now = cpu_clock(cpu);
935 
936 	if (next->pid) {
937 		psi_flags_change(next, 0, TSK_ONCPU);
938 		/*
939 		 * Set TSK_ONCPU on @next's cgroups. If @next shares any
940 		 * ancestors with @prev, those will already have @prev's
941 		 * TSK_ONCPU bit set, and we can stop the iteration there.
942 		 */
943 		for_each_group(group, task_psi_group(next)) {
944 			struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
945 
946 			if (groupc->state_mask & PSI_ONCPU) {
947 				common = group;
948 				break;
949 			}
950 			psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
951 		}
952 	}
953 
954 	if (prev->pid) {
955 		int clear = TSK_ONCPU, set = 0;
956 		bool wake_clock = true;
957 
958 		/*
959 		 * When we're going to sleep, psi_dequeue() lets us
960 		 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
961 		 * TSK_IOWAIT here, where we can combine it with
962 		 * TSK_ONCPU and save walking common ancestors twice.
963 		 */
964 		if (sleep) {
965 			clear |= TSK_RUNNING;
966 			if (prev->in_memstall)
967 				clear |= TSK_MEMSTALL_RUNNING;
968 			if (prev->in_iowait)
969 				set |= TSK_IOWAIT;
970 
971 			/*
972 			 * Periodic aggregation shuts off if there is a period of no
973 			 * task changes, so we wake it back up if necessary. However,
974 			 * don't do this if the task change is the aggregation worker
975 			 * itself going to sleep, or we'll ping-pong forever.
976 			 */
977 			if (unlikely((prev->flags & PF_WQ_WORKER) &&
978 				     wq_worker_last_func(prev) == psi_avgs_work))
979 				wake_clock = false;
980 		}
981 
982 		psi_flags_change(prev, clear, set);
983 
984 		for_each_group(group, task_psi_group(prev)) {
985 			if (group == common)
986 				break;
987 			psi_group_change(group, cpu, clear, set, now, wake_clock);
988 		}
989 
990 		/*
991 		 * TSK_ONCPU is handled up to the common ancestor. If there are
992 		 * any other differences between the two tasks (e.g. prev goes
993 		 * to sleep, or only one task is memstall), finish propagating
994 		 * those differences all the way up to the root.
995 		 */
996 		if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
997 			clear &= ~TSK_ONCPU;
998 			for_each_group(group, common)
999 				psi_group_change(group, cpu, clear, set, now, wake_clock);
1000 		}
1001 	}
1002 	psi_write_end(cpu);
1003 }
1004 
1005 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
psi_account_irqtime(struct rq * rq,struct task_struct * curr,struct task_struct * prev)1006 void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
1007 {
1008 	int cpu = task_cpu(curr);
1009 	struct psi_group_cpu *groupc;
1010 	s64 delta;
1011 	u64 irq;
1012 	u64 now;
1013 
1014 	if (static_branch_likely(&psi_disabled) || !irqtime_enabled())
1015 		return;
1016 
1017 	if (!curr->pid)
1018 		return;
1019 
1020 	lockdep_assert_rq_held(rq);
1021 	if (prev && task_psi_group(prev) == task_psi_group(curr))
1022 		return;
1023 
1024 	irq = irq_time_read(cpu);
1025 	delta = (s64)(irq - rq->psi_irq_time);
1026 	if (delta < 0)
1027 		return;
1028 	rq->psi_irq_time = irq;
1029 
1030 	psi_write_begin(cpu);
1031 	now = cpu_clock(cpu);
1032 
1033 	for_each_group(group, task_psi_group(curr)) {
1034 		if (!group->enabled)
1035 			continue;
1036 
1037 		groupc = per_cpu_ptr(group->pcpu, cpu);
1038 
1039 		record_times(groupc, now);
1040 		groupc->times[PSI_IRQ_FULL] += delta;
1041 
1042 		if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
1043 			psi_schedule_rtpoll_work(group, 1, false);
1044 	}
1045 	psi_write_end(cpu);
1046 }
1047 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1048 
1049 /**
1050  * psi_memstall_enter - mark the beginning of a memory stall section
1051  * @flags: flags to handle nested sections
1052  *
1053  * Marks the calling task as being stalled due to a lack of memory,
1054  * such as waiting for a refault or performing reclaim.
1055  */
psi_memstall_enter(unsigned long * flags)1056 void psi_memstall_enter(unsigned long *flags)
1057 {
1058 	struct rq_flags rf;
1059 	struct rq *rq;
1060 
1061 	if (static_branch_likely(&psi_disabled))
1062 		return;
1063 
1064 	*flags = current->in_memstall;
1065 	if (*flags)
1066 		return;
1067 	/*
1068 	 * in_memstall setting & accounting needs to be atomic wrt
1069 	 * changes to the task's scheduling state, otherwise we can
1070 	 * race with CPU migration.
1071 	 */
1072 	rq = this_rq_lock_irq(&rf);
1073 
1074 	current->in_memstall = 1;
1075 	psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
1076 
1077 	rq_unlock_irq(rq, &rf);
1078 }
1079 EXPORT_SYMBOL_GPL(psi_memstall_enter);
1080 
1081 /**
1082  * psi_memstall_leave - mark the end of an memory stall section
1083  * @flags: flags to handle nested memdelay sections
1084  *
1085  * Marks the calling task as no longer stalled due to lack of memory.
1086  */
psi_memstall_leave(unsigned long * flags)1087 void psi_memstall_leave(unsigned long *flags)
1088 {
1089 	struct rq_flags rf;
1090 	struct rq *rq;
1091 
1092 	if (static_branch_likely(&psi_disabled))
1093 		return;
1094 
1095 	if (*flags)
1096 		return;
1097 	/*
1098 	 * in_memstall clearing & accounting needs to be atomic wrt
1099 	 * changes to the task's scheduling state, otherwise we could
1100 	 * race with CPU migration.
1101 	 */
1102 	rq = this_rq_lock_irq(&rf);
1103 
1104 	current->in_memstall = 0;
1105 	psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
1106 
1107 	rq_unlock_irq(rq, &rf);
1108 }
1109 EXPORT_SYMBOL_GPL(psi_memstall_leave);
1110 
1111 #ifdef CONFIG_CGROUPS
psi_cgroup_alloc(struct cgroup * cgroup)1112 int psi_cgroup_alloc(struct cgroup *cgroup)
1113 {
1114 	if (!static_branch_likely(&psi_cgroups_enabled))
1115 		return 0;
1116 
1117 	cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
1118 	if (!cgroup->psi)
1119 		return -ENOMEM;
1120 
1121 	cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
1122 	if (!cgroup->psi->pcpu) {
1123 		kfree(cgroup->psi);
1124 		return -ENOMEM;
1125 	}
1126 	group_init(cgroup->psi);
1127 	cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup));
1128 	return 0;
1129 }
1130 
psi_cgroup_free(struct cgroup * cgroup)1131 void psi_cgroup_free(struct cgroup *cgroup)
1132 {
1133 	if (!static_branch_likely(&psi_cgroups_enabled))
1134 		return;
1135 
1136 	cancel_delayed_work_sync(&cgroup->psi->avgs_work);
1137 	free_percpu(cgroup->psi->pcpu);
1138 	/* All triggers must be removed by now */
1139 	WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n");
1140 	kfree(cgroup->psi);
1141 }
1142 
1143 /**
1144  * cgroup_move_task - move task to a different cgroup
1145  * @task: the task
1146  * @to: the target css_set
1147  *
1148  * Move task to a new cgroup and safely migrate its associated stall
1149  * state between the different groups.
1150  *
1151  * This function acquires the task's rq lock to lock out concurrent
1152  * changes to the task's scheduling state and - in case the task is
1153  * running - concurrent changes to its stall state.
1154  */
cgroup_move_task(struct task_struct * task,struct css_set * to)1155 void cgroup_move_task(struct task_struct *task, struct css_set *to)
1156 {
1157 	unsigned int task_flags;
1158 	struct rq_flags rf;
1159 	struct rq *rq;
1160 
1161 	if (!static_branch_likely(&psi_cgroups_enabled)) {
1162 		/*
1163 		 * Lame to do this here, but the scheduler cannot be locked
1164 		 * from the outside, so we move cgroups from inside sched/.
1165 		 */
1166 		rcu_assign_pointer(task->cgroups, to);
1167 		return;
1168 	}
1169 
1170 	rq = task_rq_lock(task, &rf);
1171 
1172 	/*
1173 	 * We may race with schedule() dropping the rq lock between
1174 	 * deactivating prev and switching to next. Because the psi
1175 	 * updates from the deactivation are deferred to the switch
1176 	 * callback to save cgroup tree updates, the task's scheduling
1177 	 * state here is not coherent with its psi state:
1178 	 *
1179 	 * schedule()                   cgroup_move_task()
1180 	 *   rq_lock()
1181 	 *   deactivate_task()
1182 	 *     p->on_rq = 0
1183 	 *     psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1184 	 *   pick_next_task()
1185 	 *     rq_unlock()
1186 	 *                                rq_lock()
1187 	 *                                psi_task_change() // old cgroup
1188 	 *                                task->cgroups = to
1189 	 *                                psi_task_change() // new cgroup
1190 	 *                                rq_unlock()
1191 	 *     rq_lock()
1192 	 *   psi_sched_switch() // does deferred updates in new cgroup
1193 	 *
1194 	 * Don't rely on the scheduling state. Use psi_flags instead.
1195 	 */
1196 	task_flags = task->psi_flags;
1197 
1198 	if (task_flags)
1199 		psi_task_change(task, task_flags, 0);
1200 
1201 	/* See comment above */
1202 	rcu_assign_pointer(task->cgroups, to);
1203 
1204 	if (task_flags)
1205 		psi_task_change(task, 0, task_flags);
1206 
1207 	task_rq_unlock(rq, task, &rf);
1208 }
1209 
psi_cgroup_restart(struct psi_group * group)1210 void psi_cgroup_restart(struct psi_group *group)
1211 {
1212 	int cpu;
1213 
1214 	/*
1215 	 * After we disable psi_group->enabled, we don't actually
1216 	 * stop percpu tasks accounting in each psi_group_cpu,
1217 	 * instead only stop test_states() loop, record_times()
1218 	 * and averaging worker, see psi_group_change() for details.
1219 	 *
1220 	 * When disable cgroup PSI, this function has nothing to sync
1221 	 * since cgroup pressure files are hidden and percpu psi_group_cpu
1222 	 * would see !psi_group->enabled and only do task accounting.
1223 	 *
1224 	 * When re-enable cgroup PSI, this function use psi_group_change()
1225 	 * to get correct state mask from test_states() loop on tasks[],
1226 	 * and restart groupc->state_start from now, use .clear = .set = 0
1227 	 * here since no task status really changed.
1228 	 */
1229 	if (!group->enabled)
1230 		return;
1231 
1232 	for_each_possible_cpu(cpu) {
1233 		u64 now;
1234 
1235 		guard(rq_lock_irq)(cpu_rq(cpu));
1236 
1237 		psi_write_begin(cpu);
1238 		now = cpu_clock(cpu);
1239 		psi_group_change(group, cpu, 0, 0, now, true);
1240 		psi_write_end(cpu);
1241 	}
1242 }
1243 #endif /* CONFIG_CGROUPS */
1244 
psi_show(struct seq_file * m,struct psi_group * group,enum psi_res res)1245 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1246 {
1247 	bool only_full = false;
1248 	int full;
1249 	u64 now;
1250 
1251 	if (static_branch_likely(&psi_disabled))
1252 		return -EOPNOTSUPP;
1253 
1254 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1255 	if (!irqtime_enabled() && res == PSI_IRQ)
1256 		return -EOPNOTSUPP;
1257 #endif
1258 
1259 	/* Update averages before reporting them */
1260 	mutex_lock(&group->avgs_lock);
1261 	now = sched_clock();
1262 	collect_percpu_times(group, PSI_AVGS, NULL);
1263 	if (now >= group->avg_next_update)
1264 		group->avg_next_update = update_averages(group, now);
1265 	mutex_unlock(&group->avgs_lock);
1266 
1267 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1268 	only_full = res == PSI_IRQ;
1269 #endif
1270 
1271 	for (full = 0; full < 2 - only_full; full++) {
1272 		unsigned long avg[3] = { 0, };
1273 		u64 total = 0;
1274 		int w;
1275 
1276 		/* CPU FULL is undefined at the system level */
1277 		if (!(group == &psi_system && res == PSI_CPU && full)) {
1278 			for (w = 0; w < 3; w++)
1279 				avg[w] = group->avg[res * 2 + full][w];
1280 			total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1281 					NSEC_PER_USEC);
1282 		}
1283 
1284 		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1285 			   full || only_full ? "full" : "some",
1286 			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1287 			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1288 			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1289 			   total);
1290 	}
1291 
1292 	return 0;
1293 }
1294 
psi_trigger_create(struct psi_group * group,char * buf,enum psi_res res,struct file * file,struct kernfs_open_file * of)1295 struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
1296 				       enum psi_res res, struct file *file,
1297 				       struct kernfs_open_file *of)
1298 {
1299 	struct psi_trigger *t;
1300 	enum psi_states state;
1301 	u32 threshold_us;
1302 	bool privileged;
1303 	u32 window_us;
1304 
1305 	if (static_branch_likely(&psi_disabled))
1306 		return ERR_PTR(-EOPNOTSUPP);
1307 
1308 	/*
1309 	 * Checking the privilege here on file->f_cred implies that a privileged user
1310 	 * could open the file and delegate the write to an unprivileged one.
1311 	 */
1312 	privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE);
1313 
1314 	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1315 		state = PSI_IO_SOME + res * 2;
1316 	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1317 		state = PSI_IO_FULL + res * 2;
1318 	else
1319 		return ERR_PTR(-EINVAL);
1320 
1321 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1322 	if (res == PSI_IRQ && --state != PSI_IRQ_FULL)
1323 		return ERR_PTR(-EINVAL);
1324 #endif
1325 
1326 	if (state >= PSI_NONIDLE)
1327 		return ERR_PTR(-EINVAL);
1328 
1329 	if (window_us == 0 || window_us > WINDOW_MAX_US)
1330 		return ERR_PTR(-EINVAL);
1331 
1332 	/*
1333 	 * Unprivileged users can only use 2s windows so that averages aggregation
1334 	 * work is used, and no RT threads need to be spawned.
1335 	 */
1336 	if (!privileged && window_us % 2000000)
1337 		return ERR_PTR(-EINVAL);
1338 
1339 	/* Check threshold */
1340 	if (threshold_us == 0 || threshold_us > window_us)
1341 		return ERR_PTR(-EINVAL);
1342 
1343 	t = kmalloc(sizeof(*t), GFP_KERNEL);
1344 	if (!t)
1345 		return ERR_PTR(-ENOMEM);
1346 
1347 	t->group = group;
1348 	t->state = state;
1349 	t->threshold = threshold_us * NSEC_PER_USEC;
1350 	t->win.size = window_us * NSEC_PER_USEC;
1351 	window_reset(&t->win, sched_clock(),
1352 			group->total[PSI_POLL][t->state], 0);
1353 
1354 	t->event = 0;
1355 	t->last_event_time = 0;
1356 	t->of = of;
1357 	if (!of)
1358 		init_waitqueue_head(&t->event_wait);
1359 	t->pending_event = false;
1360 	t->aggregator = privileged ? PSI_POLL : PSI_AVGS;
1361 
1362 	if (privileged) {
1363 		mutex_lock(&group->rtpoll_trigger_lock);
1364 
1365 		if (!rcu_access_pointer(group->rtpoll_task)) {
1366 			struct task_struct *task;
1367 
1368 			task = kthread_create(psi_rtpoll_worker, group, "psimon");
1369 			if (IS_ERR(task)) {
1370 				kfree(t);
1371 				mutex_unlock(&group->rtpoll_trigger_lock);
1372 				return ERR_CAST(task);
1373 			}
1374 			atomic_set(&group->rtpoll_wakeup, 0);
1375 			wake_up_process(task);
1376 			rcu_assign_pointer(group->rtpoll_task, task);
1377 		}
1378 
1379 		list_add(&t->node, &group->rtpoll_triggers);
1380 		group->rtpoll_min_period = min(group->rtpoll_min_period,
1381 			div_u64(t->win.size, UPDATES_PER_WINDOW));
1382 		group->rtpoll_nr_triggers[t->state]++;
1383 		group->rtpoll_states |= (1 << t->state);
1384 
1385 		mutex_unlock(&group->rtpoll_trigger_lock);
1386 	} else {
1387 		mutex_lock(&group->avgs_lock);
1388 
1389 		list_add(&t->node, &group->avg_triggers);
1390 		group->avg_nr_triggers[t->state]++;
1391 
1392 		mutex_unlock(&group->avgs_lock);
1393 	}
1394 	return t;
1395 }
1396 
psi_trigger_destroy(struct psi_trigger * t)1397 void psi_trigger_destroy(struct psi_trigger *t)
1398 {
1399 	struct psi_group *group;
1400 	struct task_struct *task_to_destroy = NULL;
1401 
1402 	/*
1403 	 * We do not check psi_disabled since it might have been disabled after
1404 	 * the trigger got created.
1405 	 */
1406 	if (!t)
1407 		return;
1408 
1409 	group = t->group;
1410 	/*
1411 	 * Wakeup waiters to stop polling and clear the queue to prevent it from
1412 	 * being accessed later. Can happen if cgroup is deleted from under a
1413 	 * polling process.
1414 	 */
1415 	if (t->of)
1416 		kernfs_notify(t->of->kn);
1417 	else
1418 		wake_up_interruptible(&t->event_wait);
1419 
1420 	if (t->aggregator == PSI_AVGS) {
1421 		mutex_lock(&group->avgs_lock);
1422 		if (!list_empty(&t->node)) {
1423 			list_del(&t->node);
1424 			group->avg_nr_triggers[t->state]--;
1425 		}
1426 		mutex_unlock(&group->avgs_lock);
1427 	} else {
1428 		mutex_lock(&group->rtpoll_trigger_lock);
1429 		if (!list_empty(&t->node)) {
1430 			struct psi_trigger *tmp;
1431 			u64 period = ULLONG_MAX;
1432 
1433 			list_del(&t->node);
1434 			group->rtpoll_nr_triggers[t->state]--;
1435 			if (!group->rtpoll_nr_triggers[t->state])
1436 				group->rtpoll_states &= ~(1 << t->state);
1437 			/*
1438 			 * Reset min update period for the remaining triggers
1439 			 * iff the destroying trigger had the min window size.
1440 			 */
1441 			if (group->rtpoll_min_period == div_u64(t->win.size, UPDATES_PER_WINDOW)) {
1442 				list_for_each_entry(tmp, &group->rtpoll_triggers, node)
1443 					period = min(period, div_u64(tmp->win.size,
1444 							UPDATES_PER_WINDOW));
1445 				group->rtpoll_min_period = period;
1446 			}
1447 			/* Destroy rtpoll_task when the last trigger is destroyed */
1448 			if (group->rtpoll_states == 0) {
1449 				group->rtpoll_until = 0;
1450 				task_to_destroy = rcu_dereference_protected(
1451 						group->rtpoll_task,
1452 						lockdep_is_held(&group->rtpoll_trigger_lock));
1453 				rcu_assign_pointer(group->rtpoll_task, NULL);
1454 				timer_delete(&group->rtpoll_timer);
1455 			}
1456 		}
1457 		mutex_unlock(&group->rtpoll_trigger_lock);
1458 	}
1459 
1460 	/*
1461 	 * Wait for psi_schedule_rtpoll_work RCU to complete its read-side
1462 	 * critical section before destroying the trigger and optionally the
1463 	 * rtpoll_task.
1464 	 */
1465 	synchronize_rcu();
1466 	/*
1467 	 * Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent
1468 	 * a deadlock while waiting for psi_rtpoll_work to acquire
1469 	 * rtpoll_trigger_lock
1470 	 */
1471 	if (task_to_destroy) {
1472 		/*
1473 		 * After the RCU grace period has expired, the worker
1474 		 * can no longer be found through group->rtpoll_task.
1475 		 */
1476 		kthread_stop(task_to_destroy);
1477 		atomic_set(&group->rtpoll_scheduled, 0);
1478 	}
1479 	kfree(t);
1480 }
1481 
psi_trigger_poll(void ** trigger_ptr,struct file * file,poll_table * wait)1482 __poll_t psi_trigger_poll(void **trigger_ptr,
1483 				struct file *file, poll_table *wait)
1484 {
1485 	__poll_t ret = DEFAULT_POLLMASK;
1486 	struct psi_trigger *t;
1487 
1488 	if (static_branch_likely(&psi_disabled))
1489 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1490 
1491 	t = smp_load_acquire(trigger_ptr);
1492 	if (!t)
1493 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1494 
1495 	if (t->of)
1496 		kernfs_generic_poll(t->of, wait);
1497 	else
1498 		poll_wait(file, &t->event_wait, wait);
1499 
1500 	if (cmpxchg(&t->event, 1, 0) == 1)
1501 		ret |= EPOLLPRI;
1502 
1503 	return ret;
1504 }
1505 
1506 #ifdef CONFIG_PROC_FS
psi_io_show(struct seq_file * m,void * v)1507 static int psi_io_show(struct seq_file *m, void *v)
1508 {
1509 	return psi_show(m, &psi_system, PSI_IO);
1510 }
1511 
psi_memory_show(struct seq_file * m,void * v)1512 static int psi_memory_show(struct seq_file *m, void *v)
1513 {
1514 	return psi_show(m, &psi_system, PSI_MEM);
1515 }
1516 
psi_cpu_show(struct seq_file * m,void * v)1517 static int psi_cpu_show(struct seq_file *m, void *v)
1518 {
1519 	return psi_show(m, &psi_system, PSI_CPU);
1520 }
1521 
psi_io_open(struct inode * inode,struct file * file)1522 static int psi_io_open(struct inode *inode, struct file *file)
1523 {
1524 	return single_open(file, psi_io_show, NULL);
1525 }
1526 
psi_memory_open(struct inode * inode,struct file * file)1527 static int psi_memory_open(struct inode *inode, struct file *file)
1528 {
1529 	return single_open(file, psi_memory_show, NULL);
1530 }
1531 
psi_cpu_open(struct inode * inode,struct file * file)1532 static int psi_cpu_open(struct inode *inode, struct file *file)
1533 {
1534 	return single_open(file, psi_cpu_show, NULL);
1535 }
1536 
psi_write(struct file * file,const char __user * user_buf,size_t nbytes,enum psi_res res)1537 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1538 			 size_t nbytes, enum psi_res res)
1539 {
1540 	char buf[32];
1541 	size_t buf_size;
1542 	struct seq_file *seq;
1543 	struct psi_trigger *new;
1544 
1545 	if (static_branch_likely(&psi_disabled))
1546 		return -EOPNOTSUPP;
1547 
1548 	if (!nbytes)
1549 		return -EINVAL;
1550 
1551 	buf_size = min(nbytes, sizeof(buf));
1552 	if (copy_from_user(buf, user_buf, buf_size))
1553 		return -EFAULT;
1554 
1555 	buf[buf_size - 1] = '\0';
1556 
1557 	seq = file->private_data;
1558 
1559 	/* Take seq->lock to protect seq->private from concurrent writes */
1560 	mutex_lock(&seq->lock);
1561 
1562 	/* Allow only one trigger per file descriptor */
1563 	if (seq->private) {
1564 		mutex_unlock(&seq->lock);
1565 		return -EBUSY;
1566 	}
1567 
1568 	new = psi_trigger_create(&psi_system, buf, res, file, NULL);
1569 	if (IS_ERR(new)) {
1570 		mutex_unlock(&seq->lock);
1571 		return PTR_ERR(new);
1572 	}
1573 
1574 	smp_store_release(&seq->private, new);
1575 	mutex_unlock(&seq->lock);
1576 
1577 	return nbytes;
1578 }
1579 
psi_io_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1580 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1581 			    size_t nbytes, loff_t *ppos)
1582 {
1583 	return psi_write(file, user_buf, nbytes, PSI_IO);
1584 }
1585 
psi_memory_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1586 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1587 				size_t nbytes, loff_t *ppos)
1588 {
1589 	return psi_write(file, user_buf, nbytes, PSI_MEM);
1590 }
1591 
psi_cpu_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1592 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1593 			     size_t nbytes, loff_t *ppos)
1594 {
1595 	return psi_write(file, user_buf, nbytes, PSI_CPU);
1596 }
1597 
psi_fop_poll(struct file * file,poll_table * wait)1598 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1599 {
1600 	struct seq_file *seq = file->private_data;
1601 
1602 	return psi_trigger_poll(&seq->private, file, wait);
1603 }
1604 
psi_fop_release(struct inode * inode,struct file * file)1605 static int psi_fop_release(struct inode *inode, struct file *file)
1606 {
1607 	struct seq_file *seq = file->private_data;
1608 
1609 	psi_trigger_destroy(seq->private);
1610 	return single_release(inode, file);
1611 }
1612 
1613 static const struct proc_ops psi_io_proc_ops = {
1614 	.proc_open	= psi_io_open,
1615 	.proc_read	= seq_read,
1616 	.proc_lseek	= seq_lseek,
1617 	.proc_write	= psi_io_write,
1618 	.proc_poll	= psi_fop_poll,
1619 	.proc_release	= psi_fop_release,
1620 };
1621 
1622 static const struct proc_ops psi_memory_proc_ops = {
1623 	.proc_open	= psi_memory_open,
1624 	.proc_read	= seq_read,
1625 	.proc_lseek	= seq_lseek,
1626 	.proc_write	= psi_memory_write,
1627 	.proc_poll	= psi_fop_poll,
1628 	.proc_release	= psi_fop_release,
1629 };
1630 
1631 static const struct proc_ops psi_cpu_proc_ops = {
1632 	.proc_open	= psi_cpu_open,
1633 	.proc_read	= seq_read,
1634 	.proc_lseek	= seq_lseek,
1635 	.proc_write	= psi_cpu_write,
1636 	.proc_poll	= psi_fop_poll,
1637 	.proc_release	= psi_fop_release,
1638 };
1639 
1640 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
psi_irq_show(struct seq_file * m,void * v)1641 static int psi_irq_show(struct seq_file *m, void *v)
1642 {
1643 	return psi_show(m, &psi_system, PSI_IRQ);
1644 }
1645 
psi_irq_open(struct inode * inode,struct file * file)1646 static int psi_irq_open(struct inode *inode, struct file *file)
1647 {
1648 	return single_open(file, psi_irq_show, NULL);
1649 }
1650 
psi_irq_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1651 static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
1652 			     size_t nbytes, loff_t *ppos)
1653 {
1654 	return psi_write(file, user_buf, nbytes, PSI_IRQ);
1655 }
1656 
1657 static const struct proc_ops psi_irq_proc_ops = {
1658 	.proc_open	= psi_irq_open,
1659 	.proc_read	= seq_read,
1660 	.proc_lseek	= seq_lseek,
1661 	.proc_write	= psi_irq_write,
1662 	.proc_poll	= psi_fop_poll,
1663 	.proc_release	= psi_fop_release,
1664 };
1665 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1666 
psi_proc_init(void)1667 static int __init psi_proc_init(void)
1668 {
1669 	if (psi_enable) {
1670 		proc_mkdir("pressure", NULL);
1671 		proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
1672 		proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
1673 		proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
1674 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1675 		proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops);
1676 #endif
1677 	}
1678 	return 0;
1679 }
1680 module_init(psi_proc_init);
1681 
1682 #endif /* CONFIG_PROC_FS */
1683