xref: /linux/kernel/sched/psi.c (revision 2dc73b48665411a08c4e5f0f823dea8510761603)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Pressure stall information for CPU, memory and IO
4  *
5  * Copyright (c) 2018 Facebook, Inc.
6  * Author: Johannes Weiner <hannes@cmpxchg.org>
7  *
8  * Polling support by Suren Baghdasaryan <surenb@google.com>
9  * Copyright (c) 2018 Google, Inc.
10  *
11  * When CPU, memory and IO are contended, tasks experience delays that
12  * reduce throughput and introduce latencies into the workload. Memory
13  * and IO contention, in addition, can cause a full loss of forward
14  * progress in which the CPU goes idle.
15  *
16  * This code aggregates individual task delays into resource pressure
17  * metrics that indicate problems with both workload health and
18  * resource utilization.
19  *
20  *			Model
21  *
22  * The time in which a task can execute on a CPU is our baseline for
23  * productivity. Pressure expresses the amount of time in which this
24  * potential cannot be realized due to resource contention.
25  *
26  * This concept of productivity has two components: the workload and
27  * the CPU. To measure the impact of pressure on both, we define two
28  * contention states for a resource: SOME and FULL.
29  *
30  * In the SOME state of a given resource, one or more tasks are
31  * delayed on that resource. This affects the workload's ability to
32  * perform work, but the CPU may still be executing other tasks.
33  *
34  * In the FULL state of a given resource, all non-idle tasks are
35  * delayed on that resource such that nobody is advancing and the CPU
36  * goes idle. This leaves both workload and CPU unproductive.
37  *
38  *	SOME = nr_delayed_tasks != 0
39  *	FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
40  *
41  * What it means for a task to be productive is defined differently
42  * for each resource. For IO, productive means a running task. For
43  * memory, productive means a running task that isn't a reclaimer. For
44  * CPU, productive means an oncpu task.
45  *
46  * Naturally, the FULL state doesn't exist for the CPU resource at the
47  * system level, but exist at the cgroup level. At the cgroup level,
48  * FULL means all non-idle tasks in the cgroup are delayed on the CPU
49  * resource which is being used by others outside of the cgroup or
50  * throttled by the cgroup cpu.max configuration.
51  *
52  * The percentage of wallclock time spent in those compound stall
53  * states gives pressure numbers between 0 and 100 for each resource,
54  * where the SOME percentage indicates workload slowdowns and the FULL
55  * percentage indicates reduced CPU utilization:
56  *
57  *	%SOME = time(SOME) / period
58  *	%FULL = time(FULL) / period
59  *
60  *			Multiple CPUs
61  *
62  * The more tasks and available CPUs there are, the more work can be
63  * performed concurrently. This means that the potential that can go
64  * unrealized due to resource contention *also* scales with non-idle
65  * tasks and CPUs.
66  *
67  * Consider a scenario where 257 number crunching tasks are trying to
68  * run concurrently on 256 CPUs. If we simply aggregated the task
69  * states, we would have to conclude a CPU SOME pressure number of
70  * 100%, since *somebody* is waiting on a runqueue at all
71  * times. However, that is clearly not the amount of contention the
72  * workload is experiencing: only one out of 256 possible execution
73  * threads will be contended at any given time, or about 0.4%.
74  *
75  * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
76  * given time *one* of the tasks is delayed due to a lack of memory.
77  * Again, looking purely at the task state would yield a memory FULL
78  * pressure number of 0%, since *somebody* is always making forward
79  * progress. But again this wouldn't capture the amount of execution
80  * potential lost, which is 1 out of 4 CPUs, or 25%.
81  *
82  * To calculate wasted potential (pressure) with multiple processors,
83  * we have to base our calculation on the number of non-idle tasks in
84  * conjunction with the number of available CPUs, which is the number
85  * of potential execution threads. SOME becomes then the proportion of
86  * delayed tasks to possible threads, and FULL is the share of possible
87  * threads that are unproductive due to delays:
88  *
89  *	threads = min(nr_nonidle_tasks, nr_cpus)
90  *	   SOME = min(nr_delayed_tasks / threads, 1)
91  *	   FULL = (threads - min(nr_productive_tasks, threads)) / threads
92  *
93  * For the 257 number crunchers on 256 CPUs, this yields:
94  *
95  *	threads = min(257, 256)
96  *	   SOME = min(1 / 256, 1)             = 0.4%
97  *	   FULL = (256 - min(256, 256)) / 256 = 0%
98  *
99  * For the 1 out of 4 memory-delayed tasks, this yields:
100  *
101  *	threads = min(4, 4)
102  *	   SOME = min(1 / 4, 1)               = 25%
103  *	   FULL = (4 - min(3, 4)) / 4         = 25%
104  *
105  * [ Substitute nr_cpus with 1, and you can see that it's a natural
106  *   extension of the single-CPU model. ]
107  *
108  *			Implementation
109  *
110  * To assess the precise time spent in each such state, we would have
111  * to freeze the system on task changes and start/stop the state
112  * clocks accordingly. Obviously that doesn't scale in practice.
113  *
114  * Because the scheduler aims to distribute the compute load evenly
115  * among the available CPUs, we can track task state locally to each
116  * CPU and, at much lower frequency, extrapolate the global state for
117  * the cumulative stall times and the running averages.
118  *
119  * For each runqueue, we track:
120  *
121  *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
122  *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
123  *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
124  *
125  * and then periodically aggregate:
126  *
127  *	tNONIDLE = sum(tNONIDLE[i])
128  *
129  *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
130  *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
131  *
132  *	   %SOME = tSOME / period
133  *	   %FULL = tFULL / period
134  *
135  * This gives us an approximation of pressure that is practical
136  * cost-wise, yet way more sensitive and accurate than periodic
137  * sampling of the aggregate task states would be.
138  */
139 
140 static int psi_bug __read_mostly;
141 
142 DEFINE_STATIC_KEY_FALSE(psi_disabled);
143 DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
144 
145 #ifdef CONFIG_PSI_DEFAULT_DISABLED
146 static bool psi_enable;
147 #else
148 static bool psi_enable = true;
149 #endif
150 static int __init setup_psi(char *str)
151 {
152 	return kstrtobool(str, &psi_enable) == 0;
153 }
154 __setup("psi=", setup_psi);
155 
156 /* Running averages - we need to be higher-res than loadavg */
157 #define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
158 #define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
159 #define EXP_60s		1981		/* 1/exp(2s/60s) */
160 #define EXP_300s	2034		/* 1/exp(2s/300s) */
161 
162 /* PSI trigger definitions */
163 #define WINDOW_MIN_US 500000	/* Min window size is 500ms */
164 #define WINDOW_MAX_US 10000000	/* Max window size is 10s */
165 #define UPDATES_PER_WINDOW 10	/* 10 updates per window */
166 
167 /* Sampling frequency in nanoseconds */
168 static u64 psi_period __read_mostly;
169 
170 /* System-level pressure and stall tracking */
171 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
172 struct psi_group psi_system = {
173 	.pcpu = &system_group_pcpu,
174 };
175 
176 static void psi_avgs_work(struct work_struct *work);
177 
178 static void poll_timer_fn(struct timer_list *t);
179 
180 static void group_init(struct psi_group *group)
181 {
182 	int cpu;
183 
184 	for_each_possible_cpu(cpu)
185 		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
186 	group->avg_last_update = sched_clock();
187 	group->avg_next_update = group->avg_last_update + psi_period;
188 	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
189 	mutex_init(&group->avgs_lock);
190 	/* Init trigger-related members */
191 	mutex_init(&group->trigger_lock);
192 	INIT_LIST_HEAD(&group->triggers);
193 	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
194 	group->poll_states = 0;
195 	group->poll_min_period = U32_MAX;
196 	memset(group->polling_total, 0, sizeof(group->polling_total));
197 	group->polling_next_update = ULLONG_MAX;
198 	group->polling_until = 0;
199 	init_waitqueue_head(&group->poll_wait);
200 	timer_setup(&group->poll_timer, poll_timer_fn, 0);
201 	rcu_assign_pointer(group->poll_task, NULL);
202 }
203 
204 void __init psi_init(void)
205 {
206 	if (!psi_enable) {
207 		static_branch_enable(&psi_disabled);
208 		return;
209 	}
210 
211 	if (!cgroup_psi_enabled())
212 		static_branch_disable(&psi_cgroups_enabled);
213 
214 	psi_period = jiffies_to_nsecs(PSI_FREQ);
215 	group_init(&psi_system);
216 }
217 
218 static bool test_state(unsigned int *tasks, enum psi_states state)
219 {
220 	switch (state) {
221 	case PSI_IO_SOME:
222 		return unlikely(tasks[NR_IOWAIT]);
223 	case PSI_IO_FULL:
224 		return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
225 	case PSI_MEM_SOME:
226 		return unlikely(tasks[NR_MEMSTALL]);
227 	case PSI_MEM_FULL:
228 		return unlikely(tasks[NR_MEMSTALL] &&
229 			tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
230 	case PSI_CPU_SOME:
231 		return unlikely(tasks[NR_RUNNING] > tasks[NR_ONCPU]);
232 	case PSI_CPU_FULL:
233 		return unlikely(tasks[NR_RUNNING] && !tasks[NR_ONCPU]);
234 	case PSI_NONIDLE:
235 		return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
236 			tasks[NR_RUNNING];
237 	default:
238 		return false;
239 	}
240 }
241 
242 static void get_recent_times(struct psi_group *group, int cpu,
243 			     enum psi_aggregators aggregator, u32 *times,
244 			     u32 *pchanged_states)
245 {
246 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
247 	u64 now, state_start;
248 	enum psi_states s;
249 	unsigned int seq;
250 	u32 state_mask;
251 
252 	*pchanged_states = 0;
253 
254 	/* Snapshot a coherent view of the CPU state */
255 	do {
256 		seq = read_seqcount_begin(&groupc->seq);
257 		now = cpu_clock(cpu);
258 		memcpy(times, groupc->times, sizeof(groupc->times));
259 		state_mask = groupc->state_mask;
260 		state_start = groupc->state_start;
261 	} while (read_seqcount_retry(&groupc->seq, seq));
262 
263 	/* Calculate state time deltas against the previous snapshot */
264 	for (s = 0; s < NR_PSI_STATES; s++) {
265 		u32 delta;
266 		/*
267 		 * In addition to already concluded states, we also
268 		 * incorporate currently active states on the CPU,
269 		 * since states may last for many sampling periods.
270 		 *
271 		 * This way we keep our delta sampling buckets small
272 		 * (u32) and our reported pressure close to what's
273 		 * actually happening.
274 		 */
275 		if (state_mask & (1 << s))
276 			times[s] += now - state_start;
277 
278 		delta = times[s] - groupc->times_prev[aggregator][s];
279 		groupc->times_prev[aggregator][s] = times[s];
280 
281 		times[s] = delta;
282 		if (delta)
283 			*pchanged_states |= (1 << s);
284 	}
285 }
286 
287 static void calc_avgs(unsigned long avg[3], int missed_periods,
288 		      u64 time, u64 period)
289 {
290 	unsigned long pct;
291 
292 	/* Fill in zeroes for periods of no activity */
293 	if (missed_periods) {
294 		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
295 		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
296 		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
297 	}
298 
299 	/* Sample the most recent active period */
300 	pct = div_u64(time * 100, period);
301 	pct *= FIXED_1;
302 	avg[0] = calc_load(avg[0], EXP_10s, pct);
303 	avg[1] = calc_load(avg[1], EXP_60s, pct);
304 	avg[2] = calc_load(avg[2], EXP_300s, pct);
305 }
306 
307 static void collect_percpu_times(struct psi_group *group,
308 				 enum psi_aggregators aggregator,
309 				 u32 *pchanged_states)
310 {
311 	u64 deltas[NR_PSI_STATES - 1] = { 0, };
312 	unsigned long nonidle_total = 0;
313 	u32 changed_states = 0;
314 	int cpu;
315 	int s;
316 
317 	/*
318 	 * Collect the per-cpu time buckets and average them into a
319 	 * single time sample that is normalized to wallclock time.
320 	 *
321 	 * For averaging, each CPU is weighted by its non-idle time in
322 	 * the sampling period. This eliminates artifacts from uneven
323 	 * loading, or even entirely idle CPUs.
324 	 */
325 	for_each_possible_cpu(cpu) {
326 		u32 times[NR_PSI_STATES];
327 		u32 nonidle;
328 		u32 cpu_changed_states;
329 
330 		get_recent_times(group, cpu, aggregator, times,
331 				&cpu_changed_states);
332 		changed_states |= cpu_changed_states;
333 
334 		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
335 		nonidle_total += nonidle;
336 
337 		for (s = 0; s < PSI_NONIDLE; s++)
338 			deltas[s] += (u64)times[s] * nonidle;
339 	}
340 
341 	/*
342 	 * Integrate the sample into the running statistics that are
343 	 * reported to userspace: the cumulative stall times and the
344 	 * decaying averages.
345 	 *
346 	 * Pressure percentages are sampled at PSI_FREQ. We might be
347 	 * called more often when the user polls more frequently than
348 	 * that; we might be called less often when there is no task
349 	 * activity, thus no data, and clock ticks are sporadic. The
350 	 * below handles both.
351 	 */
352 
353 	/* total= */
354 	for (s = 0; s < NR_PSI_STATES - 1; s++)
355 		group->total[aggregator][s] +=
356 				div_u64(deltas[s], max(nonidle_total, 1UL));
357 
358 	if (pchanged_states)
359 		*pchanged_states = changed_states;
360 }
361 
362 static u64 update_averages(struct psi_group *group, u64 now)
363 {
364 	unsigned long missed_periods = 0;
365 	u64 expires, period;
366 	u64 avg_next_update;
367 	int s;
368 
369 	/* avgX= */
370 	expires = group->avg_next_update;
371 	if (now - expires >= psi_period)
372 		missed_periods = div_u64(now - expires, psi_period);
373 
374 	/*
375 	 * The periodic clock tick can get delayed for various
376 	 * reasons, especially on loaded systems. To avoid clock
377 	 * drift, we schedule the clock in fixed psi_period intervals.
378 	 * But the deltas we sample out of the per-cpu buckets above
379 	 * are based on the actual time elapsing between clock ticks.
380 	 */
381 	avg_next_update = expires + ((1 + missed_periods) * psi_period);
382 	period = now - (group->avg_last_update + (missed_periods * psi_period));
383 	group->avg_last_update = now;
384 
385 	for (s = 0; s < NR_PSI_STATES - 1; s++) {
386 		u32 sample;
387 
388 		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
389 		/*
390 		 * Due to the lockless sampling of the time buckets,
391 		 * recorded time deltas can slip into the next period,
392 		 * which under full pressure can result in samples in
393 		 * excess of the period length.
394 		 *
395 		 * We don't want to report non-sensical pressures in
396 		 * excess of 100%, nor do we want to drop such events
397 		 * on the floor. Instead we punt any overage into the
398 		 * future until pressure subsides. By doing this we
399 		 * don't underreport the occurring pressure curve, we
400 		 * just report it delayed by one period length.
401 		 *
402 		 * The error isn't cumulative. As soon as another
403 		 * delta slips from a period P to P+1, by definition
404 		 * it frees up its time T in P.
405 		 */
406 		if (sample > period)
407 			sample = period;
408 		group->avg_total[s] += sample;
409 		calc_avgs(group->avg[s], missed_periods, sample, period);
410 	}
411 
412 	return avg_next_update;
413 }
414 
415 static void psi_avgs_work(struct work_struct *work)
416 {
417 	struct delayed_work *dwork;
418 	struct psi_group *group;
419 	u32 changed_states;
420 	bool nonidle;
421 	u64 now;
422 
423 	dwork = to_delayed_work(work);
424 	group = container_of(dwork, struct psi_group, avgs_work);
425 
426 	mutex_lock(&group->avgs_lock);
427 
428 	now = sched_clock();
429 
430 	collect_percpu_times(group, PSI_AVGS, &changed_states);
431 	nonidle = changed_states & (1 << PSI_NONIDLE);
432 	/*
433 	 * If there is task activity, periodically fold the per-cpu
434 	 * times and feed samples into the running averages. If things
435 	 * are idle and there is no data to process, stop the clock.
436 	 * Once restarted, we'll catch up the running averages in one
437 	 * go - see calc_avgs() and missed_periods.
438 	 */
439 	if (now >= group->avg_next_update)
440 		group->avg_next_update = update_averages(group, now);
441 
442 	if (nonidle) {
443 		schedule_delayed_work(dwork, nsecs_to_jiffies(
444 				group->avg_next_update - now) + 1);
445 	}
446 
447 	mutex_unlock(&group->avgs_lock);
448 }
449 
450 /* Trigger tracking window manipulations */
451 static void window_reset(struct psi_window *win, u64 now, u64 value,
452 			 u64 prev_growth)
453 {
454 	win->start_time = now;
455 	win->start_value = value;
456 	win->prev_growth = prev_growth;
457 }
458 
459 /*
460  * PSI growth tracking window update and growth calculation routine.
461  *
462  * This approximates a sliding tracking window by interpolating
463  * partially elapsed windows using historical growth data from the
464  * previous intervals. This minimizes memory requirements (by not storing
465  * all the intermediate values in the previous window) and simplifies
466  * the calculations. It works well because PSI signal changes only in
467  * positive direction and over relatively small window sizes the growth
468  * is close to linear.
469  */
470 static u64 window_update(struct psi_window *win, u64 now, u64 value)
471 {
472 	u64 elapsed;
473 	u64 growth;
474 
475 	elapsed = now - win->start_time;
476 	growth = value - win->start_value;
477 	/*
478 	 * After each tracking window passes win->start_value and
479 	 * win->start_time get reset and win->prev_growth stores
480 	 * the average per-window growth of the previous window.
481 	 * win->prev_growth is then used to interpolate additional
482 	 * growth from the previous window assuming it was linear.
483 	 */
484 	if (elapsed > win->size)
485 		window_reset(win, now, value, growth);
486 	else {
487 		u32 remaining;
488 
489 		remaining = win->size - elapsed;
490 		growth += div64_u64(win->prev_growth * remaining, win->size);
491 	}
492 
493 	return growth;
494 }
495 
496 static void init_triggers(struct psi_group *group, u64 now)
497 {
498 	struct psi_trigger *t;
499 
500 	list_for_each_entry(t, &group->triggers, node)
501 		window_reset(&t->win, now,
502 				group->total[PSI_POLL][t->state], 0);
503 	memcpy(group->polling_total, group->total[PSI_POLL],
504 		   sizeof(group->polling_total));
505 	group->polling_next_update = now + group->poll_min_period;
506 }
507 
508 static u64 update_triggers(struct psi_group *group, u64 now)
509 {
510 	struct psi_trigger *t;
511 	bool update_total = false;
512 	u64 *total = group->total[PSI_POLL];
513 
514 	/*
515 	 * On subsequent updates, calculate growth deltas and let
516 	 * watchers know when their specified thresholds are exceeded.
517 	 */
518 	list_for_each_entry(t, &group->triggers, node) {
519 		u64 growth;
520 		bool new_stall;
521 
522 		new_stall = group->polling_total[t->state] != total[t->state];
523 
524 		/* Check for stall activity or a previous threshold breach */
525 		if (!new_stall && !t->pending_event)
526 			continue;
527 		/*
528 		 * Check for new stall activity, as well as deferred
529 		 * events that occurred in the last window after the
530 		 * trigger had already fired (we want to ratelimit
531 		 * events without dropping any).
532 		 */
533 		if (new_stall) {
534 			/*
535 			 * Multiple triggers might be looking at the same state,
536 			 * remember to update group->polling_total[] once we've
537 			 * been through all of them. Also remember to extend the
538 			 * polling time if we see new stall activity.
539 			 */
540 			update_total = true;
541 
542 			/* Calculate growth since last update */
543 			growth = window_update(&t->win, now, total[t->state]);
544 			if (growth < t->threshold)
545 				continue;
546 
547 			t->pending_event = true;
548 		}
549 		/* Limit event signaling to once per window */
550 		if (now < t->last_event_time + t->win.size)
551 			continue;
552 
553 		/* Generate an event */
554 		if (cmpxchg(&t->event, 0, 1) == 0)
555 			wake_up_interruptible(&t->event_wait);
556 		t->last_event_time = now;
557 		/* Reset threshold breach flag once event got generated */
558 		t->pending_event = false;
559 	}
560 
561 	if (update_total)
562 		memcpy(group->polling_total, total,
563 				sizeof(group->polling_total));
564 
565 	return now + group->poll_min_period;
566 }
567 
568 /* Schedule polling if it's not already scheduled. */
569 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
570 {
571 	struct task_struct *task;
572 
573 	/*
574 	 * Do not reschedule if already scheduled.
575 	 * Possible race with a timer scheduled after this check but before
576 	 * mod_timer below can be tolerated because group->polling_next_update
577 	 * will keep updates on schedule.
578 	 */
579 	if (timer_pending(&group->poll_timer))
580 		return;
581 
582 	rcu_read_lock();
583 
584 	task = rcu_dereference(group->poll_task);
585 	/*
586 	 * kworker might be NULL in case psi_trigger_destroy races with
587 	 * psi_task_change (hotpath) which can't use locks
588 	 */
589 	if (likely(task))
590 		mod_timer(&group->poll_timer, jiffies + delay);
591 
592 	rcu_read_unlock();
593 }
594 
595 static void psi_poll_work(struct psi_group *group)
596 {
597 	u32 changed_states;
598 	u64 now;
599 
600 	mutex_lock(&group->trigger_lock);
601 
602 	now = sched_clock();
603 
604 	collect_percpu_times(group, PSI_POLL, &changed_states);
605 
606 	if (changed_states & group->poll_states) {
607 		/* Initialize trigger windows when entering polling mode */
608 		if (now > group->polling_until)
609 			init_triggers(group, now);
610 
611 		/*
612 		 * Keep the monitor active for at least the duration of the
613 		 * minimum tracking window as long as monitor states are
614 		 * changing.
615 		 */
616 		group->polling_until = now +
617 			group->poll_min_period * UPDATES_PER_WINDOW;
618 	}
619 
620 	if (now > group->polling_until) {
621 		group->polling_next_update = ULLONG_MAX;
622 		goto out;
623 	}
624 
625 	if (now >= group->polling_next_update)
626 		group->polling_next_update = update_triggers(group, now);
627 
628 	psi_schedule_poll_work(group,
629 		nsecs_to_jiffies(group->polling_next_update - now) + 1);
630 
631 out:
632 	mutex_unlock(&group->trigger_lock);
633 }
634 
635 static int psi_poll_worker(void *data)
636 {
637 	struct psi_group *group = (struct psi_group *)data;
638 
639 	sched_set_fifo_low(current);
640 
641 	while (true) {
642 		wait_event_interruptible(group->poll_wait,
643 				atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
644 				kthread_should_stop());
645 		if (kthread_should_stop())
646 			break;
647 
648 		psi_poll_work(group);
649 	}
650 	return 0;
651 }
652 
653 static void poll_timer_fn(struct timer_list *t)
654 {
655 	struct psi_group *group = from_timer(group, t, poll_timer);
656 
657 	atomic_set(&group->poll_wakeup, 1);
658 	wake_up_interruptible(&group->poll_wait);
659 }
660 
661 static void record_times(struct psi_group_cpu *groupc, u64 now)
662 {
663 	u32 delta;
664 
665 	delta = now - groupc->state_start;
666 	groupc->state_start = now;
667 
668 	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
669 		groupc->times[PSI_IO_SOME] += delta;
670 		if (groupc->state_mask & (1 << PSI_IO_FULL))
671 			groupc->times[PSI_IO_FULL] += delta;
672 	}
673 
674 	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
675 		groupc->times[PSI_MEM_SOME] += delta;
676 		if (groupc->state_mask & (1 << PSI_MEM_FULL))
677 			groupc->times[PSI_MEM_FULL] += delta;
678 	}
679 
680 	if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
681 		groupc->times[PSI_CPU_SOME] += delta;
682 		if (groupc->state_mask & (1 << PSI_CPU_FULL))
683 			groupc->times[PSI_CPU_FULL] += delta;
684 	}
685 
686 	if (groupc->state_mask & (1 << PSI_NONIDLE))
687 		groupc->times[PSI_NONIDLE] += delta;
688 }
689 
690 static void psi_group_change(struct psi_group *group, int cpu,
691 			     unsigned int clear, unsigned int set, u64 now,
692 			     bool wake_clock)
693 {
694 	struct psi_group_cpu *groupc;
695 	u32 state_mask = 0;
696 	unsigned int t, m;
697 	enum psi_states s;
698 
699 	groupc = per_cpu_ptr(group->pcpu, cpu);
700 
701 	/*
702 	 * First we assess the aggregate resource states this CPU's
703 	 * tasks have been in since the last change, and account any
704 	 * SOME and FULL time these may have resulted in.
705 	 *
706 	 * Then we update the task counts according to the state
707 	 * change requested through the @clear and @set bits.
708 	 */
709 	write_seqcount_begin(&groupc->seq);
710 
711 	record_times(groupc, now);
712 
713 	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
714 		if (!(m & (1 << t)))
715 			continue;
716 		if (groupc->tasks[t]) {
717 			groupc->tasks[t]--;
718 		} else if (!psi_bug) {
719 			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u %u] clear=%x set=%x\n",
720 					cpu, t, groupc->tasks[0],
721 					groupc->tasks[1], groupc->tasks[2],
722 					groupc->tasks[3], groupc->tasks[4],
723 					clear, set);
724 			psi_bug = 1;
725 		}
726 	}
727 
728 	for (t = 0; set; set &= ~(1 << t), t++)
729 		if (set & (1 << t))
730 			groupc->tasks[t]++;
731 
732 	/* Calculate state mask representing active states */
733 	for (s = 0; s < NR_PSI_STATES; s++) {
734 		if (test_state(groupc->tasks, s))
735 			state_mask |= (1 << s);
736 	}
737 
738 	/*
739 	 * Since we care about lost potential, a memstall is FULL
740 	 * when there are no other working tasks, but also when
741 	 * the CPU is actively reclaiming and nothing productive
742 	 * could run even if it were runnable. So when the current
743 	 * task in a cgroup is in_memstall, the corresponding groupc
744 	 * on that cpu is in PSI_MEM_FULL state.
745 	 */
746 	if (unlikely(groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall))
747 		state_mask |= (1 << PSI_MEM_FULL);
748 
749 	groupc->state_mask = state_mask;
750 
751 	write_seqcount_end(&groupc->seq);
752 
753 	if (state_mask & group->poll_states)
754 		psi_schedule_poll_work(group, 1);
755 
756 	if (wake_clock && !delayed_work_pending(&group->avgs_work))
757 		schedule_delayed_work(&group->avgs_work, PSI_FREQ);
758 }
759 
760 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
761 {
762 	if (*iter == &psi_system)
763 		return NULL;
764 
765 #ifdef CONFIG_CGROUPS
766 	if (static_branch_likely(&psi_cgroups_enabled)) {
767 		struct cgroup *cgroup = NULL;
768 
769 		if (!*iter)
770 			cgroup = task->cgroups->dfl_cgrp;
771 		else
772 			cgroup = cgroup_parent(*iter);
773 
774 		if (cgroup && cgroup_parent(cgroup)) {
775 			*iter = cgroup;
776 			return cgroup_psi(cgroup);
777 		}
778 	}
779 #endif
780 	*iter = &psi_system;
781 	return &psi_system;
782 }
783 
784 static void psi_flags_change(struct task_struct *task, int clear, int set)
785 {
786 	if (((task->psi_flags & set) ||
787 	     (task->psi_flags & clear) != clear) &&
788 	    !psi_bug) {
789 		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
790 				task->pid, task->comm, task_cpu(task),
791 				task->psi_flags, clear, set);
792 		psi_bug = 1;
793 	}
794 
795 	task->psi_flags &= ~clear;
796 	task->psi_flags |= set;
797 }
798 
799 void psi_task_change(struct task_struct *task, int clear, int set)
800 {
801 	int cpu = task_cpu(task);
802 	struct psi_group *group;
803 	bool wake_clock = true;
804 	void *iter = NULL;
805 	u64 now;
806 
807 	if (!task->pid)
808 		return;
809 
810 	psi_flags_change(task, clear, set);
811 
812 	now = cpu_clock(cpu);
813 	/*
814 	 * Periodic aggregation shuts off if there is a period of no
815 	 * task changes, so we wake it back up if necessary. However,
816 	 * don't do this if the task change is the aggregation worker
817 	 * itself going to sleep, or we'll ping-pong forever.
818 	 */
819 	if (unlikely((clear & TSK_RUNNING) &&
820 		     (task->flags & PF_WQ_WORKER) &&
821 		     wq_worker_last_func(task) == psi_avgs_work))
822 		wake_clock = false;
823 
824 	while ((group = iterate_groups(task, &iter)))
825 		psi_group_change(group, cpu, clear, set, now, wake_clock);
826 }
827 
828 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
829 		     bool sleep)
830 {
831 	struct psi_group *group, *common = NULL;
832 	int cpu = task_cpu(prev);
833 	void *iter;
834 	u64 now = cpu_clock(cpu);
835 
836 	if (next->pid) {
837 		bool identical_state;
838 
839 		psi_flags_change(next, 0, TSK_ONCPU);
840 		/*
841 		 * When switching between tasks that have an identical
842 		 * runtime state, the cgroup that contains both tasks
843 		 * we reach the first common ancestor. Iterate @next's
844 		 * ancestors only until we encounter @prev's ONCPU.
845 		 */
846 		identical_state = prev->psi_flags == next->psi_flags;
847 		iter = NULL;
848 		while ((group = iterate_groups(next, &iter))) {
849 			if (identical_state &&
850 			    per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
851 				common = group;
852 				break;
853 			}
854 
855 			psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
856 		}
857 	}
858 
859 	if (prev->pid) {
860 		int clear = TSK_ONCPU, set = 0;
861 
862 		/*
863 		 * When we're going to sleep, psi_dequeue() lets us
864 		 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
865 		 * TSK_IOWAIT here, where we can combine it with
866 		 * TSK_ONCPU and save walking common ancestors twice.
867 		 */
868 		if (sleep) {
869 			clear |= TSK_RUNNING;
870 			if (prev->in_memstall)
871 				clear |= TSK_MEMSTALL_RUNNING;
872 			if (prev->in_iowait)
873 				set |= TSK_IOWAIT;
874 		}
875 
876 		psi_flags_change(prev, clear, set);
877 
878 		iter = NULL;
879 		while ((group = iterate_groups(prev, &iter)) && group != common)
880 			psi_group_change(group, cpu, clear, set, now, true);
881 
882 		/*
883 		 * TSK_ONCPU is handled up to the common ancestor. If we're tasked
884 		 * with dequeuing too, finish that for the rest of the hierarchy.
885 		 */
886 		if (sleep) {
887 			clear &= ~TSK_ONCPU;
888 			for (; group; group = iterate_groups(prev, &iter))
889 				psi_group_change(group, cpu, clear, set, now, true);
890 		}
891 	}
892 }
893 
894 /**
895  * psi_memstall_enter - mark the beginning of a memory stall section
896  * @flags: flags to handle nested sections
897  *
898  * Marks the calling task as being stalled due to a lack of memory,
899  * such as waiting for a refault or performing reclaim.
900  */
901 void psi_memstall_enter(unsigned long *flags)
902 {
903 	struct rq_flags rf;
904 	struct rq *rq;
905 
906 	if (static_branch_likely(&psi_disabled))
907 		return;
908 
909 	*flags = current->in_memstall;
910 	if (*flags)
911 		return;
912 	/*
913 	 * in_memstall setting & accounting needs to be atomic wrt
914 	 * changes to the task's scheduling state, otherwise we can
915 	 * race with CPU migration.
916 	 */
917 	rq = this_rq_lock_irq(&rf);
918 
919 	current->in_memstall = 1;
920 	psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
921 
922 	rq_unlock_irq(rq, &rf);
923 }
924 
925 /**
926  * psi_memstall_leave - mark the end of an memory stall section
927  * @flags: flags to handle nested memdelay sections
928  *
929  * Marks the calling task as no longer stalled due to lack of memory.
930  */
931 void psi_memstall_leave(unsigned long *flags)
932 {
933 	struct rq_flags rf;
934 	struct rq *rq;
935 
936 	if (static_branch_likely(&psi_disabled))
937 		return;
938 
939 	if (*flags)
940 		return;
941 	/*
942 	 * in_memstall clearing & accounting needs to be atomic wrt
943 	 * changes to the task's scheduling state, otherwise we could
944 	 * race with CPU migration.
945 	 */
946 	rq = this_rq_lock_irq(&rf);
947 
948 	current->in_memstall = 0;
949 	psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
950 
951 	rq_unlock_irq(rq, &rf);
952 }
953 
954 #ifdef CONFIG_CGROUPS
955 int psi_cgroup_alloc(struct cgroup *cgroup)
956 {
957 	if (static_branch_likely(&psi_disabled))
958 		return 0;
959 
960 	cgroup->psi = kmalloc(sizeof(struct psi_group), GFP_KERNEL);
961 	if (!cgroup->psi)
962 		return -ENOMEM;
963 
964 	cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
965 	if (!cgroup->psi->pcpu) {
966 		kfree(cgroup->psi);
967 		return -ENOMEM;
968 	}
969 	group_init(cgroup->psi);
970 	return 0;
971 }
972 
973 void psi_cgroup_free(struct cgroup *cgroup)
974 {
975 	if (static_branch_likely(&psi_disabled))
976 		return;
977 
978 	cancel_delayed_work_sync(&cgroup->psi->avgs_work);
979 	free_percpu(cgroup->psi->pcpu);
980 	/* All triggers must be removed by now */
981 	WARN_ONCE(cgroup->psi->poll_states, "psi: trigger leak\n");
982 	kfree(cgroup->psi);
983 }
984 
985 /**
986  * cgroup_move_task - move task to a different cgroup
987  * @task: the task
988  * @to: the target css_set
989  *
990  * Move task to a new cgroup and safely migrate its associated stall
991  * state between the different groups.
992  *
993  * This function acquires the task's rq lock to lock out concurrent
994  * changes to the task's scheduling state and - in case the task is
995  * running - concurrent changes to its stall state.
996  */
997 void cgroup_move_task(struct task_struct *task, struct css_set *to)
998 {
999 	unsigned int task_flags;
1000 	struct rq_flags rf;
1001 	struct rq *rq;
1002 
1003 	if (static_branch_likely(&psi_disabled)) {
1004 		/*
1005 		 * Lame to do this here, but the scheduler cannot be locked
1006 		 * from the outside, so we move cgroups from inside sched/.
1007 		 */
1008 		rcu_assign_pointer(task->cgroups, to);
1009 		return;
1010 	}
1011 
1012 	rq = task_rq_lock(task, &rf);
1013 
1014 	/*
1015 	 * We may race with schedule() dropping the rq lock between
1016 	 * deactivating prev and switching to next. Because the psi
1017 	 * updates from the deactivation are deferred to the switch
1018 	 * callback to save cgroup tree updates, the task's scheduling
1019 	 * state here is not coherent with its psi state:
1020 	 *
1021 	 * schedule()                   cgroup_move_task()
1022 	 *   rq_lock()
1023 	 *   deactivate_task()
1024 	 *     p->on_rq = 0
1025 	 *     psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1026 	 *   pick_next_task()
1027 	 *     rq_unlock()
1028 	 *                                rq_lock()
1029 	 *                                psi_task_change() // old cgroup
1030 	 *                                task->cgroups = to
1031 	 *                                psi_task_change() // new cgroup
1032 	 *                                rq_unlock()
1033 	 *     rq_lock()
1034 	 *   psi_sched_switch() // does deferred updates in new cgroup
1035 	 *
1036 	 * Don't rely on the scheduling state. Use psi_flags instead.
1037 	 */
1038 	task_flags = task->psi_flags;
1039 
1040 	if (task_flags)
1041 		psi_task_change(task, task_flags, 0);
1042 
1043 	/* See comment above */
1044 	rcu_assign_pointer(task->cgroups, to);
1045 
1046 	if (task_flags)
1047 		psi_task_change(task, 0, task_flags);
1048 
1049 	task_rq_unlock(rq, task, &rf);
1050 }
1051 #endif /* CONFIG_CGROUPS */
1052 
1053 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1054 {
1055 	int full;
1056 	u64 now;
1057 
1058 	if (static_branch_likely(&psi_disabled))
1059 		return -EOPNOTSUPP;
1060 
1061 	/* Update averages before reporting them */
1062 	mutex_lock(&group->avgs_lock);
1063 	now = sched_clock();
1064 	collect_percpu_times(group, PSI_AVGS, NULL);
1065 	if (now >= group->avg_next_update)
1066 		group->avg_next_update = update_averages(group, now);
1067 	mutex_unlock(&group->avgs_lock);
1068 
1069 	for (full = 0; full < 2; full++) {
1070 		unsigned long avg[3] = { 0, };
1071 		u64 total = 0;
1072 		int w;
1073 
1074 		/* CPU FULL is undefined at the system level */
1075 		if (!(group == &psi_system && res == PSI_CPU && full)) {
1076 			for (w = 0; w < 3; w++)
1077 				avg[w] = group->avg[res * 2 + full][w];
1078 			total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1079 					NSEC_PER_USEC);
1080 		}
1081 
1082 		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1083 			   full ? "full" : "some",
1084 			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1085 			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1086 			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1087 			   total);
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 struct psi_trigger *psi_trigger_create(struct psi_group *group,
1094 			char *buf, size_t nbytes, enum psi_res res)
1095 {
1096 	struct psi_trigger *t;
1097 	enum psi_states state;
1098 	u32 threshold_us;
1099 	u32 window_us;
1100 
1101 	if (static_branch_likely(&psi_disabled))
1102 		return ERR_PTR(-EOPNOTSUPP);
1103 
1104 	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1105 		state = PSI_IO_SOME + res * 2;
1106 	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1107 		state = PSI_IO_FULL + res * 2;
1108 	else
1109 		return ERR_PTR(-EINVAL);
1110 
1111 	if (state >= PSI_NONIDLE)
1112 		return ERR_PTR(-EINVAL);
1113 
1114 	if (window_us < WINDOW_MIN_US ||
1115 		window_us > WINDOW_MAX_US)
1116 		return ERR_PTR(-EINVAL);
1117 
1118 	/* Check threshold */
1119 	if (threshold_us == 0 || threshold_us > window_us)
1120 		return ERR_PTR(-EINVAL);
1121 
1122 	t = kmalloc(sizeof(*t), GFP_KERNEL);
1123 	if (!t)
1124 		return ERR_PTR(-ENOMEM);
1125 
1126 	t->group = group;
1127 	t->state = state;
1128 	t->threshold = threshold_us * NSEC_PER_USEC;
1129 	t->win.size = window_us * NSEC_PER_USEC;
1130 	window_reset(&t->win, sched_clock(),
1131 			group->total[PSI_POLL][t->state], 0);
1132 
1133 	t->event = 0;
1134 	t->last_event_time = 0;
1135 	init_waitqueue_head(&t->event_wait);
1136 	t->pending_event = false;
1137 
1138 	mutex_lock(&group->trigger_lock);
1139 
1140 	if (!rcu_access_pointer(group->poll_task)) {
1141 		struct task_struct *task;
1142 
1143 		task = kthread_create(psi_poll_worker, group, "psimon");
1144 		if (IS_ERR(task)) {
1145 			kfree(t);
1146 			mutex_unlock(&group->trigger_lock);
1147 			return ERR_CAST(task);
1148 		}
1149 		atomic_set(&group->poll_wakeup, 0);
1150 		wake_up_process(task);
1151 		rcu_assign_pointer(group->poll_task, task);
1152 	}
1153 
1154 	list_add(&t->node, &group->triggers);
1155 	group->poll_min_period = min(group->poll_min_period,
1156 		div_u64(t->win.size, UPDATES_PER_WINDOW));
1157 	group->nr_triggers[t->state]++;
1158 	group->poll_states |= (1 << t->state);
1159 
1160 	mutex_unlock(&group->trigger_lock);
1161 
1162 	return t;
1163 }
1164 
1165 void psi_trigger_destroy(struct psi_trigger *t)
1166 {
1167 	struct psi_group *group;
1168 	struct task_struct *task_to_destroy = NULL;
1169 
1170 	/*
1171 	 * We do not check psi_disabled since it might have been disabled after
1172 	 * the trigger got created.
1173 	 */
1174 	if (!t)
1175 		return;
1176 
1177 	group = t->group;
1178 	/*
1179 	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
1180 	 * from under a polling process.
1181 	 */
1182 	wake_up_interruptible(&t->event_wait);
1183 
1184 	mutex_lock(&group->trigger_lock);
1185 
1186 	if (!list_empty(&t->node)) {
1187 		struct psi_trigger *tmp;
1188 		u64 period = ULLONG_MAX;
1189 
1190 		list_del(&t->node);
1191 		group->nr_triggers[t->state]--;
1192 		if (!group->nr_triggers[t->state])
1193 			group->poll_states &= ~(1 << t->state);
1194 		/* reset min update period for the remaining triggers */
1195 		list_for_each_entry(tmp, &group->triggers, node)
1196 			period = min(period, div_u64(tmp->win.size,
1197 					UPDATES_PER_WINDOW));
1198 		group->poll_min_period = period;
1199 		/* Destroy poll_task when the last trigger is destroyed */
1200 		if (group->poll_states == 0) {
1201 			group->polling_until = 0;
1202 			task_to_destroy = rcu_dereference_protected(
1203 					group->poll_task,
1204 					lockdep_is_held(&group->trigger_lock));
1205 			rcu_assign_pointer(group->poll_task, NULL);
1206 			del_timer(&group->poll_timer);
1207 		}
1208 	}
1209 
1210 	mutex_unlock(&group->trigger_lock);
1211 
1212 	/*
1213 	 * Wait for psi_schedule_poll_work RCU to complete its read-side
1214 	 * critical section before destroying the trigger and optionally the
1215 	 * poll_task.
1216 	 */
1217 	synchronize_rcu();
1218 	/*
1219 	 * Stop kthread 'psimon' after releasing trigger_lock to prevent a
1220 	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1221 	 */
1222 	if (task_to_destroy) {
1223 		/*
1224 		 * After the RCU grace period has expired, the worker
1225 		 * can no longer be found through group->poll_task.
1226 		 */
1227 		kthread_stop(task_to_destroy);
1228 	}
1229 	kfree(t);
1230 }
1231 
1232 __poll_t psi_trigger_poll(void **trigger_ptr,
1233 				struct file *file, poll_table *wait)
1234 {
1235 	__poll_t ret = DEFAULT_POLLMASK;
1236 	struct psi_trigger *t;
1237 
1238 	if (static_branch_likely(&psi_disabled))
1239 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1240 
1241 	t = smp_load_acquire(trigger_ptr);
1242 	if (!t)
1243 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1244 
1245 	poll_wait(file, &t->event_wait, wait);
1246 
1247 	if (cmpxchg(&t->event, 1, 0) == 1)
1248 		ret |= EPOLLPRI;
1249 
1250 	return ret;
1251 }
1252 
1253 #ifdef CONFIG_PROC_FS
1254 static int psi_io_show(struct seq_file *m, void *v)
1255 {
1256 	return psi_show(m, &psi_system, PSI_IO);
1257 }
1258 
1259 static int psi_memory_show(struct seq_file *m, void *v)
1260 {
1261 	return psi_show(m, &psi_system, PSI_MEM);
1262 }
1263 
1264 static int psi_cpu_show(struct seq_file *m, void *v)
1265 {
1266 	return psi_show(m, &psi_system, PSI_CPU);
1267 }
1268 
1269 static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
1270 {
1271 	if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
1272 		return -EPERM;
1273 
1274 	return single_open(file, psi_show, NULL);
1275 }
1276 
1277 static int psi_io_open(struct inode *inode, struct file *file)
1278 {
1279 	return psi_open(file, psi_io_show);
1280 }
1281 
1282 static int psi_memory_open(struct inode *inode, struct file *file)
1283 {
1284 	return psi_open(file, psi_memory_show);
1285 }
1286 
1287 static int psi_cpu_open(struct inode *inode, struct file *file)
1288 {
1289 	return psi_open(file, psi_cpu_show);
1290 }
1291 
1292 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1293 			 size_t nbytes, enum psi_res res)
1294 {
1295 	char buf[32];
1296 	size_t buf_size;
1297 	struct seq_file *seq;
1298 	struct psi_trigger *new;
1299 
1300 	if (static_branch_likely(&psi_disabled))
1301 		return -EOPNOTSUPP;
1302 
1303 	if (!nbytes)
1304 		return -EINVAL;
1305 
1306 	buf_size = min(nbytes, sizeof(buf));
1307 	if (copy_from_user(buf, user_buf, buf_size))
1308 		return -EFAULT;
1309 
1310 	buf[buf_size - 1] = '\0';
1311 
1312 	seq = file->private_data;
1313 
1314 	/* Take seq->lock to protect seq->private from concurrent writes */
1315 	mutex_lock(&seq->lock);
1316 
1317 	/* Allow only one trigger per file descriptor */
1318 	if (seq->private) {
1319 		mutex_unlock(&seq->lock);
1320 		return -EBUSY;
1321 	}
1322 
1323 	new = psi_trigger_create(&psi_system, buf, nbytes, res);
1324 	if (IS_ERR(new)) {
1325 		mutex_unlock(&seq->lock);
1326 		return PTR_ERR(new);
1327 	}
1328 
1329 	smp_store_release(&seq->private, new);
1330 	mutex_unlock(&seq->lock);
1331 
1332 	return nbytes;
1333 }
1334 
1335 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1336 			    size_t nbytes, loff_t *ppos)
1337 {
1338 	return psi_write(file, user_buf, nbytes, PSI_IO);
1339 }
1340 
1341 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1342 				size_t nbytes, loff_t *ppos)
1343 {
1344 	return psi_write(file, user_buf, nbytes, PSI_MEM);
1345 }
1346 
1347 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1348 			     size_t nbytes, loff_t *ppos)
1349 {
1350 	return psi_write(file, user_buf, nbytes, PSI_CPU);
1351 }
1352 
1353 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1354 {
1355 	struct seq_file *seq = file->private_data;
1356 
1357 	return psi_trigger_poll(&seq->private, file, wait);
1358 }
1359 
1360 static int psi_fop_release(struct inode *inode, struct file *file)
1361 {
1362 	struct seq_file *seq = file->private_data;
1363 
1364 	psi_trigger_destroy(seq->private);
1365 	return single_release(inode, file);
1366 }
1367 
1368 static const struct proc_ops psi_io_proc_ops = {
1369 	.proc_open	= psi_io_open,
1370 	.proc_read	= seq_read,
1371 	.proc_lseek	= seq_lseek,
1372 	.proc_write	= psi_io_write,
1373 	.proc_poll	= psi_fop_poll,
1374 	.proc_release	= psi_fop_release,
1375 };
1376 
1377 static const struct proc_ops psi_memory_proc_ops = {
1378 	.proc_open	= psi_memory_open,
1379 	.proc_read	= seq_read,
1380 	.proc_lseek	= seq_lseek,
1381 	.proc_write	= psi_memory_write,
1382 	.proc_poll	= psi_fop_poll,
1383 	.proc_release	= psi_fop_release,
1384 };
1385 
1386 static const struct proc_ops psi_cpu_proc_ops = {
1387 	.proc_open	= psi_cpu_open,
1388 	.proc_read	= seq_read,
1389 	.proc_lseek	= seq_lseek,
1390 	.proc_write	= psi_cpu_write,
1391 	.proc_poll	= psi_fop_poll,
1392 	.proc_release	= psi_fop_release,
1393 };
1394 
1395 static int __init psi_proc_init(void)
1396 {
1397 	if (psi_enable) {
1398 		proc_mkdir("pressure", NULL);
1399 		proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
1400 		proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
1401 		proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
1402 	}
1403 	return 0;
1404 }
1405 module_init(psi_proc_init);
1406 
1407 #endif /* CONFIG_PROC_FS */
1408