xref: /linux/kernel/sched/psi.c (revision 497e6b37b0099dc415578488287fd84fb74433eb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Pressure stall information for CPU, memory and IO
4  *
5  * Copyright (c) 2018 Facebook, Inc.
6  * Author: Johannes Weiner <hannes@cmpxchg.org>
7  *
8  * Polling support by Suren Baghdasaryan <surenb@google.com>
9  * Copyright (c) 2018 Google, Inc.
10  *
11  * When CPU, memory and IO are contended, tasks experience delays that
12  * reduce throughput and introduce latencies into the workload. Memory
13  * and IO contention, in addition, can cause a full loss of forward
14  * progress in which the CPU goes idle.
15  *
16  * This code aggregates individual task delays into resource pressure
17  * metrics that indicate problems with both workload health and
18  * resource utilization.
19  *
20  *			Model
21  *
22  * The time in which a task can execute on a CPU is our baseline for
23  * productivity. Pressure expresses the amount of time in which this
24  * potential cannot be realized due to resource contention.
25  *
26  * This concept of productivity has two components: the workload and
27  * the CPU. To measure the impact of pressure on both, we define two
28  * contention states for a resource: SOME and FULL.
29  *
30  * In the SOME state of a given resource, one or more tasks are
31  * delayed on that resource. This affects the workload's ability to
32  * perform work, but the CPU may still be executing other tasks.
33  *
34  * In the FULL state of a given resource, all non-idle tasks are
35  * delayed on that resource such that nobody is advancing and the CPU
36  * goes idle. This leaves both workload and CPU unproductive.
37  *
38  *	SOME = nr_delayed_tasks != 0
39  *	FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
40  *
41  * What it means for a task to be productive is defined differently
42  * for each resource. For IO, productive means a running task. For
43  * memory, productive means a running task that isn't a reclaimer. For
44  * CPU, productive means an oncpu task.
45  *
46  * Naturally, the FULL state doesn't exist for the CPU resource at the
47  * system level, but exist at the cgroup level. At the cgroup level,
48  * FULL means all non-idle tasks in the cgroup are delayed on the CPU
49  * resource which is being used by others outside of the cgroup or
50  * throttled by the cgroup cpu.max configuration.
51  *
52  * The percentage of wallclock time spent in those compound stall
53  * states gives pressure numbers between 0 and 100 for each resource,
54  * where the SOME percentage indicates workload slowdowns and the FULL
55  * percentage indicates reduced CPU utilization:
56  *
57  *	%SOME = time(SOME) / period
58  *	%FULL = time(FULL) / period
59  *
60  *			Multiple CPUs
61  *
62  * The more tasks and available CPUs there are, the more work can be
63  * performed concurrently. This means that the potential that can go
64  * unrealized due to resource contention *also* scales with non-idle
65  * tasks and CPUs.
66  *
67  * Consider a scenario where 257 number crunching tasks are trying to
68  * run concurrently on 256 CPUs. If we simply aggregated the task
69  * states, we would have to conclude a CPU SOME pressure number of
70  * 100%, since *somebody* is waiting on a runqueue at all
71  * times. However, that is clearly not the amount of contention the
72  * workload is experiencing: only one out of 256 possible execution
73  * threads will be contended at any given time, or about 0.4%.
74  *
75  * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
76  * given time *one* of the tasks is delayed due to a lack of memory.
77  * Again, looking purely at the task state would yield a memory FULL
78  * pressure number of 0%, since *somebody* is always making forward
79  * progress. But again this wouldn't capture the amount of execution
80  * potential lost, which is 1 out of 4 CPUs, or 25%.
81  *
82  * To calculate wasted potential (pressure) with multiple processors,
83  * we have to base our calculation on the number of non-idle tasks in
84  * conjunction with the number of available CPUs, which is the number
85  * of potential execution threads. SOME becomes then the proportion of
86  * delayed tasks to possible threads, and FULL is the share of possible
87  * threads that are unproductive due to delays:
88  *
89  *	threads = min(nr_nonidle_tasks, nr_cpus)
90  *	   SOME = min(nr_delayed_tasks / threads, 1)
91  *	   FULL = (threads - min(nr_productive_tasks, threads)) / threads
92  *
93  * For the 257 number crunchers on 256 CPUs, this yields:
94  *
95  *	threads = min(257, 256)
96  *	   SOME = min(1 / 256, 1)             = 0.4%
97  *	   FULL = (256 - min(256, 256)) / 256 = 0%
98  *
99  * For the 1 out of 4 memory-delayed tasks, this yields:
100  *
101  *	threads = min(4, 4)
102  *	   SOME = min(1 / 4, 1)               = 25%
103  *	   FULL = (4 - min(3, 4)) / 4         = 25%
104  *
105  * [ Substitute nr_cpus with 1, and you can see that it's a natural
106  *   extension of the single-CPU model. ]
107  *
108  *			Implementation
109  *
110  * To assess the precise time spent in each such state, we would have
111  * to freeze the system on task changes and start/stop the state
112  * clocks accordingly. Obviously that doesn't scale in practice.
113  *
114  * Because the scheduler aims to distribute the compute load evenly
115  * among the available CPUs, we can track task state locally to each
116  * CPU and, at much lower frequency, extrapolate the global state for
117  * the cumulative stall times and the running averages.
118  *
119  * For each runqueue, we track:
120  *
121  *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
122  *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
123  *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
124  *
125  * and then periodically aggregate:
126  *
127  *	tNONIDLE = sum(tNONIDLE[i])
128  *
129  *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
130  *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
131  *
132  *	   %SOME = tSOME / period
133  *	   %FULL = tFULL / period
134  *
135  * This gives us an approximation of pressure that is practical
136  * cost-wise, yet way more sensitive and accurate than periodic
137  * sampling of the aggregate task states would be.
138  */
139 
140 static int psi_bug __read_mostly;
141 
142 DEFINE_STATIC_KEY_FALSE(psi_disabled);
143 DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
144 
145 #ifdef CONFIG_PSI_DEFAULT_DISABLED
146 static bool psi_enable;
147 #else
148 static bool psi_enable = true;
149 #endif
150 static int __init setup_psi(char *str)
151 {
152 	return kstrtobool(str, &psi_enable) == 0;
153 }
154 __setup("psi=", setup_psi);
155 
156 /* Running averages - we need to be higher-res than loadavg */
157 #define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
158 #define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
159 #define EXP_60s		1981		/* 1/exp(2s/60s) */
160 #define EXP_300s	2034		/* 1/exp(2s/300s) */
161 
162 /* PSI trigger definitions */
163 #define WINDOW_MIN_US 500000	/* Min window size is 500ms */
164 #define WINDOW_MAX_US 10000000	/* Max window size is 10s */
165 #define UPDATES_PER_WINDOW 10	/* 10 updates per window */
166 
167 /* Sampling frequency in nanoseconds */
168 static u64 psi_period __read_mostly;
169 
170 /* System-level pressure and stall tracking */
171 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
172 struct psi_group psi_system = {
173 	.pcpu = &system_group_pcpu,
174 };
175 
176 static void psi_avgs_work(struct work_struct *work);
177 
178 static void poll_timer_fn(struct timer_list *t);
179 
180 static void group_init(struct psi_group *group)
181 {
182 	int cpu;
183 
184 	group->enabled = true;
185 	for_each_possible_cpu(cpu)
186 		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
187 	group->avg_last_update = sched_clock();
188 	group->avg_next_update = group->avg_last_update + psi_period;
189 	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
190 	mutex_init(&group->avgs_lock);
191 	/* Init trigger-related members */
192 	atomic_set(&group->poll_scheduled, 0);
193 	mutex_init(&group->trigger_lock);
194 	INIT_LIST_HEAD(&group->triggers);
195 	group->poll_min_period = U32_MAX;
196 	group->polling_next_update = ULLONG_MAX;
197 	init_waitqueue_head(&group->poll_wait);
198 	timer_setup(&group->poll_timer, poll_timer_fn, 0);
199 	rcu_assign_pointer(group->poll_task, NULL);
200 }
201 
202 void __init psi_init(void)
203 {
204 	if (!psi_enable) {
205 		static_branch_enable(&psi_disabled);
206 		static_branch_disable(&psi_cgroups_enabled);
207 		return;
208 	}
209 
210 	if (!cgroup_psi_enabled())
211 		static_branch_disable(&psi_cgroups_enabled);
212 
213 	psi_period = jiffies_to_nsecs(PSI_FREQ);
214 	group_init(&psi_system);
215 }
216 
217 static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu)
218 {
219 	switch (state) {
220 	case PSI_IO_SOME:
221 		return unlikely(tasks[NR_IOWAIT]);
222 	case PSI_IO_FULL:
223 		return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
224 	case PSI_MEM_SOME:
225 		return unlikely(tasks[NR_MEMSTALL]);
226 	case PSI_MEM_FULL:
227 		return unlikely(tasks[NR_MEMSTALL] &&
228 			tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
229 	case PSI_CPU_SOME:
230 		return unlikely(tasks[NR_RUNNING] > oncpu);
231 	case PSI_CPU_FULL:
232 		return unlikely(tasks[NR_RUNNING] && !oncpu);
233 	case PSI_NONIDLE:
234 		return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
235 			tasks[NR_RUNNING];
236 	default:
237 		return false;
238 	}
239 }
240 
241 static void get_recent_times(struct psi_group *group, int cpu,
242 			     enum psi_aggregators aggregator, u32 *times,
243 			     u32 *pchanged_states)
244 {
245 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
246 	int current_cpu = raw_smp_processor_id();
247 	unsigned int tasks[NR_PSI_TASK_COUNTS];
248 	u64 now, state_start;
249 	enum psi_states s;
250 	unsigned int seq;
251 	u32 state_mask;
252 
253 	*pchanged_states = 0;
254 
255 	/* Snapshot a coherent view of the CPU state */
256 	do {
257 		seq = read_seqcount_begin(&groupc->seq);
258 		now = cpu_clock(cpu);
259 		memcpy(times, groupc->times, sizeof(groupc->times));
260 		state_mask = groupc->state_mask;
261 		state_start = groupc->state_start;
262 		if (cpu == current_cpu)
263 			memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
264 	} while (read_seqcount_retry(&groupc->seq, seq));
265 
266 	/* Calculate state time deltas against the previous snapshot */
267 	for (s = 0; s < NR_PSI_STATES; s++) {
268 		u32 delta;
269 		/*
270 		 * In addition to already concluded states, we also
271 		 * incorporate currently active states on the CPU,
272 		 * since states may last for many sampling periods.
273 		 *
274 		 * This way we keep our delta sampling buckets small
275 		 * (u32) and our reported pressure close to what's
276 		 * actually happening.
277 		 */
278 		if (state_mask & (1 << s))
279 			times[s] += now - state_start;
280 
281 		delta = times[s] - groupc->times_prev[aggregator][s];
282 		groupc->times_prev[aggregator][s] = times[s];
283 
284 		times[s] = delta;
285 		if (delta)
286 			*pchanged_states |= (1 << s);
287 	}
288 
289 	/*
290 	 * When collect_percpu_times() from the avgs_work, we don't want to
291 	 * re-arm avgs_work when all CPUs are IDLE. But the current CPU running
292 	 * this avgs_work is never IDLE, cause avgs_work can't be shut off.
293 	 * So for the current CPU, we need to re-arm avgs_work only when
294 	 * (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs
295 	 * we can just check PSI_NONIDLE delta.
296 	 */
297 	if (current_work() == &group->avgs_work.work) {
298 		bool reschedule;
299 
300 		if (cpu == current_cpu)
301 			reschedule = tasks[NR_RUNNING] +
302 				     tasks[NR_IOWAIT] +
303 				     tasks[NR_MEMSTALL] > 1;
304 		else
305 			reschedule = *pchanged_states & (1 << PSI_NONIDLE);
306 
307 		if (reschedule)
308 			*pchanged_states |= PSI_STATE_RESCHEDULE;
309 	}
310 }
311 
312 static void calc_avgs(unsigned long avg[3], int missed_periods,
313 		      u64 time, u64 period)
314 {
315 	unsigned long pct;
316 
317 	/* Fill in zeroes for periods of no activity */
318 	if (missed_periods) {
319 		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
320 		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
321 		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
322 	}
323 
324 	/* Sample the most recent active period */
325 	pct = div_u64(time * 100, period);
326 	pct *= FIXED_1;
327 	avg[0] = calc_load(avg[0], EXP_10s, pct);
328 	avg[1] = calc_load(avg[1], EXP_60s, pct);
329 	avg[2] = calc_load(avg[2], EXP_300s, pct);
330 }
331 
332 static void collect_percpu_times(struct psi_group *group,
333 				 enum psi_aggregators aggregator,
334 				 u32 *pchanged_states)
335 {
336 	u64 deltas[NR_PSI_STATES - 1] = { 0, };
337 	unsigned long nonidle_total = 0;
338 	u32 changed_states = 0;
339 	int cpu;
340 	int s;
341 
342 	/*
343 	 * Collect the per-cpu time buckets and average them into a
344 	 * single time sample that is normalized to wallclock time.
345 	 *
346 	 * For averaging, each CPU is weighted by its non-idle time in
347 	 * the sampling period. This eliminates artifacts from uneven
348 	 * loading, or even entirely idle CPUs.
349 	 */
350 	for_each_possible_cpu(cpu) {
351 		u32 times[NR_PSI_STATES];
352 		u32 nonidle;
353 		u32 cpu_changed_states;
354 
355 		get_recent_times(group, cpu, aggregator, times,
356 				&cpu_changed_states);
357 		changed_states |= cpu_changed_states;
358 
359 		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
360 		nonidle_total += nonidle;
361 
362 		for (s = 0; s < PSI_NONIDLE; s++)
363 			deltas[s] += (u64)times[s] * nonidle;
364 	}
365 
366 	/*
367 	 * Integrate the sample into the running statistics that are
368 	 * reported to userspace: the cumulative stall times and the
369 	 * decaying averages.
370 	 *
371 	 * Pressure percentages are sampled at PSI_FREQ. We might be
372 	 * called more often when the user polls more frequently than
373 	 * that; we might be called less often when there is no task
374 	 * activity, thus no data, and clock ticks are sporadic. The
375 	 * below handles both.
376 	 */
377 
378 	/* total= */
379 	for (s = 0; s < NR_PSI_STATES - 1; s++)
380 		group->total[aggregator][s] +=
381 				div_u64(deltas[s], max(nonidle_total, 1UL));
382 
383 	if (pchanged_states)
384 		*pchanged_states = changed_states;
385 }
386 
387 static u64 update_averages(struct psi_group *group, u64 now)
388 {
389 	unsigned long missed_periods = 0;
390 	u64 expires, period;
391 	u64 avg_next_update;
392 	int s;
393 
394 	/* avgX= */
395 	expires = group->avg_next_update;
396 	if (now - expires >= psi_period)
397 		missed_periods = div_u64(now - expires, psi_period);
398 
399 	/*
400 	 * The periodic clock tick can get delayed for various
401 	 * reasons, especially on loaded systems. To avoid clock
402 	 * drift, we schedule the clock in fixed psi_period intervals.
403 	 * But the deltas we sample out of the per-cpu buckets above
404 	 * are based on the actual time elapsing between clock ticks.
405 	 */
406 	avg_next_update = expires + ((1 + missed_periods) * psi_period);
407 	period = now - (group->avg_last_update + (missed_periods * psi_period));
408 	group->avg_last_update = now;
409 
410 	for (s = 0; s < NR_PSI_STATES - 1; s++) {
411 		u32 sample;
412 
413 		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
414 		/*
415 		 * Due to the lockless sampling of the time buckets,
416 		 * recorded time deltas can slip into the next period,
417 		 * which under full pressure can result in samples in
418 		 * excess of the period length.
419 		 *
420 		 * We don't want to report non-sensical pressures in
421 		 * excess of 100%, nor do we want to drop such events
422 		 * on the floor. Instead we punt any overage into the
423 		 * future until pressure subsides. By doing this we
424 		 * don't underreport the occurring pressure curve, we
425 		 * just report it delayed by one period length.
426 		 *
427 		 * The error isn't cumulative. As soon as another
428 		 * delta slips from a period P to P+1, by definition
429 		 * it frees up its time T in P.
430 		 */
431 		if (sample > period)
432 			sample = period;
433 		group->avg_total[s] += sample;
434 		calc_avgs(group->avg[s], missed_periods, sample, period);
435 	}
436 
437 	return avg_next_update;
438 }
439 
440 static void psi_avgs_work(struct work_struct *work)
441 {
442 	struct delayed_work *dwork;
443 	struct psi_group *group;
444 	u32 changed_states;
445 	u64 now;
446 
447 	dwork = to_delayed_work(work);
448 	group = container_of(dwork, struct psi_group, avgs_work);
449 
450 	mutex_lock(&group->avgs_lock);
451 
452 	now = sched_clock();
453 
454 	collect_percpu_times(group, PSI_AVGS, &changed_states);
455 	/*
456 	 * If there is task activity, periodically fold the per-cpu
457 	 * times and feed samples into the running averages. If things
458 	 * are idle and there is no data to process, stop the clock.
459 	 * Once restarted, we'll catch up the running averages in one
460 	 * go - see calc_avgs() and missed_periods.
461 	 */
462 	if (now >= group->avg_next_update)
463 		group->avg_next_update = update_averages(group, now);
464 
465 	if (changed_states & PSI_STATE_RESCHEDULE) {
466 		schedule_delayed_work(dwork, nsecs_to_jiffies(
467 				group->avg_next_update - now) + 1);
468 	}
469 
470 	mutex_unlock(&group->avgs_lock);
471 }
472 
473 /* Trigger tracking window manipulations */
474 static void window_reset(struct psi_window *win, u64 now, u64 value,
475 			 u64 prev_growth)
476 {
477 	win->start_time = now;
478 	win->start_value = value;
479 	win->prev_growth = prev_growth;
480 }
481 
482 /*
483  * PSI growth tracking window update and growth calculation routine.
484  *
485  * This approximates a sliding tracking window by interpolating
486  * partially elapsed windows using historical growth data from the
487  * previous intervals. This minimizes memory requirements (by not storing
488  * all the intermediate values in the previous window) and simplifies
489  * the calculations. It works well because PSI signal changes only in
490  * positive direction and over relatively small window sizes the growth
491  * is close to linear.
492  */
493 static u64 window_update(struct psi_window *win, u64 now, u64 value)
494 {
495 	u64 elapsed;
496 	u64 growth;
497 
498 	elapsed = now - win->start_time;
499 	growth = value - win->start_value;
500 	/*
501 	 * After each tracking window passes win->start_value and
502 	 * win->start_time get reset and win->prev_growth stores
503 	 * the average per-window growth of the previous window.
504 	 * win->prev_growth is then used to interpolate additional
505 	 * growth from the previous window assuming it was linear.
506 	 */
507 	if (elapsed > win->size)
508 		window_reset(win, now, value, growth);
509 	else {
510 		u32 remaining;
511 
512 		remaining = win->size - elapsed;
513 		growth += div64_u64(win->prev_growth * remaining, win->size);
514 	}
515 
516 	return growth;
517 }
518 
519 static void init_triggers(struct psi_group *group, u64 now)
520 {
521 	struct psi_trigger *t;
522 
523 	list_for_each_entry(t, &group->triggers, node)
524 		window_reset(&t->win, now,
525 				group->total[PSI_POLL][t->state], 0);
526 	memcpy(group->polling_total, group->total[PSI_POLL],
527 		   sizeof(group->polling_total));
528 	group->polling_next_update = now + group->poll_min_period;
529 }
530 
531 static u64 update_triggers(struct psi_group *group, u64 now)
532 {
533 	struct psi_trigger *t;
534 	bool update_total = false;
535 	u64 *total = group->total[PSI_POLL];
536 
537 	/*
538 	 * On subsequent updates, calculate growth deltas and let
539 	 * watchers know when their specified thresholds are exceeded.
540 	 */
541 	list_for_each_entry(t, &group->triggers, node) {
542 		u64 growth;
543 		bool new_stall;
544 
545 		new_stall = group->polling_total[t->state] != total[t->state];
546 
547 		/* Check for stall activity or a previous threshold breach */
548 		if (!new_stall && !t->pending_event)
549 			continue;
550 		/*
551 		 * Check for new stall activity, as well as deferred
552 		 * events that occurred in the last window after the
553 		 * trigger had already fired (we want to ratelimit
554 		 * events without dropping any).
555 		 */
556 		if (new_stall) {
557 			/*
558 			 * Multiple triggers might be looking at the same state,
559 			 * remember to update group->polling_total[] once we've
560 			 * been through all of them. Also remember to extend the
561 			 * polling time if we see new stall activity.
562 			 */
563 			update_total = true;
564 
565 			/* Calculate growth since last update */
566 			growth = window_update(&t->win, now, total[t->state]);
567 			if (!t->pending_event) {
568 				if (growth < t->threshold)
569 					continue;
570 
571 				t->pending_event = true;
572 			}
573 		}
574 		/* Limit event signaling to once per window */
575 		if (now < t->last_event_time + t->win.size)
576 			continue;
577 
578 		/* Generate an event */
579 		if (cmpxchg(&t->event, 0, 1) == 0)
580 			wake_up_interruptible(&t->event_wait);
581 		t->last_event_time = now;
582 		/* Reset threshold breach flag once event got generated */
583 		t->pending_event = false;
584 	}
585 
586 	if (update_total)
587 		memcpy(group->polling_total, total,
588 				sizeof(group->polling_total));
589 
590 	return now + group->poll_min_period;
591 }
592 
593 /* Schedule polling if it's not already scheduled or forced. */
594 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
595 				   bool force)
596 {
597 	struct task_struct *task;
598 
599 	/*
600 	 * atomic_xchg should be called even when !force to provide a
601 	 * full memory barrier (see the comment inside psi_poll_work).
602 	 */
603 	if (atomic_xchg(&group->poll_scheduled, 1) && !force)
604 		return;
605 
606 	rcu_read_lock();
607 
608 	task = rcu_dereference(group->poll_task);
609 	/*
610 	 * kworker might be NULL in case psi_trigger_destroy races with
611 	 * psi_task_change (hotpath) which can't use locks
612 	 */
613 	if (likely(task))
614 		mod_timer(&group->poll_timer, jiffies + delay);
615 	else
616 		atomic_set(&group->poll_scheduled, 0);
617 
618 	rcu_read_unlock();
619 }
620 
621 static void psi_poll_work(struct psi_group *group)
622 {
623 	bool force_reschedule = false;
624 	u32 changed_states;
625 	u64 now;
626 
627 	mutex_lock(&group->trigger_lock);
628 
629 	now = sched_clock();
630 
631 	if (now > group->polling_until) {
632 		/*
633 		 * We are either about to start or might stop polling if no
634 		 * state change was recorded. Resetting poll_scheduled leaves
635 		 * a small window for psi_group_change to sneak in and schedule
636 		 * an immediate poll_work before we get to rescheduling. One
637 		 * potential extra wakeup at the end of the polling window
638 		 * should be negligible and polling_next_update still keeps
639 		 * updates correctly on schedule.
640 		 */
641 		atomic_set(&group->poll_scheduled, 0);
642 		/*
643 		 * A task change can race with the poll worker that is supposed to
644 		 * report on it. To avoid missing events, ensure ordering between
645 		 * poll_scheduled and the task state accesses, such that if the poll
646 		 * worker misses the state update, the task change is guaranteed to
647 		 * reschedule the poll worker:
648 		 *
649 		 * poll worker:
650 		 *   atomic_set(poll_scheduled, 0)
651 		 *   smp_mb()
652 		 *   LOAD states
653 		 *
654 		 * task change:
655 		 *   STORE states
656 		 *   if atomic_xchg(poll_scheduled, 1) == 0:
657 		 *     schedule poll worker
658 		 *
659 		 * The atomic_xchg() implies a full barrier.
660 		 */
661 		smp_mb();
662 	} else {
663 		/* Polling window is not over, keep rescheduling */
664 		force_reschedule = true;
665 	}
666 
667 
668 	collect_percpu_times(group, PSI_POLL, &changed_states);
669 
670 	if (changed_states & group->poll_states) {
671 		/* Initialize trigger windows when entering polling mode */
672 		if (now > group->polling_until)
673 			init_triggers(group, now);
674 
675 		/*
676 		 * Keep the monitor active for at least the duration of the
677 		 * minimum tracking window as long as monitor states are
678 		 * changing.
679 		 */
680 		group->polling_until = now +
681 			group->poll_min_period * UPDATES_PER_WINDOW;
682 	}
683 
684 	if (now > group->polling_until) {
685 		group->polling_next_update = ULLONG_MAX;
686 		goto out;
687 	}
688 
689 	if (now >= group->polling_next_update)
690 		group->polling_next_update = update_triggers(group, now);
691 
692 	psi_schedule_poll_work(group,
693 		nsecs_to_jiffies(group->polling_next_update - now) + 1,
694 		force_reschedule);
695 
696 out:
697 	mutex_unlock(&group->trigger_lock);
698 }
699 
700 static int psi_poll_worker(void *data)
701 {
702 	struct psi_group *group = (struct psi_group *)data;
703 
704 	sched_set_fifo_low(current);
705 
706 	while (true) {
707 		wait_event_interruptible(group->poll_wait,
708 				atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
709 				kthread_should_stop());
710 		if (kthread_should_stop())
711 			break;
712 
713 		psi_poll_work(group);
714 	}
715 	return 0;
716 }
717 
718 static void poll_timer_fn(struct timer_list *t)
719 {
720 	struct psi_group *group = from_timer(group, t, poll_timer);
721 
722 	atomic_set(&group->poll_wakeup, 1);
723 	wake_up_interruptible(&group->poll_wait);
724 }
725 
726 static void record_times(struct psi_group_cpu *groupc, u64 now)
727 {
728 	u32 delta;
729 
730 	delta = now - groupc->state_start;
731 	groupc->state_start = now;
732 
733 	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
734 		groupc->times[PSI_IO_SOME] += delta;
735 		if (groupc->state_mask & (1 << PSI_IO_FULL))
736 			groupc->times[PSI_IO_FULL] += delta;
737 	}
738 
739 	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
740 		groupc->times[PSI_MEM_SOME] += delta;
741 		if (groupc->state_mask & (1 << PSI_MEM_FULL))
742 			groupc->times[PSI_MEM_FULL] += delta;
743 	}
744 
745 	if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
746 		groupc->times[PSI_CPU_SOME] += delta;
747 		if (groupc->state_mask & (1 << PSI_CPU_FULL))
748 			groupc->times[PSI_CPU_FULL] += delta;
749 	}
750 
751 	if (groupc->state_mask & (1 << PSI_NONIDLE))
752 		groupc->times[PSI_NONIDLE] += delta;
753 }
754 
755 static void psi_group_change(struct psi_group *group, int cpu,
756 			     unsigned int clear, unsigned int set, u64 now,
757 			     bool wake_clock)
758 {
759 	struct psi_group_cpu *groupc;
760 	unsigned int t, m;
761 	enum psi_states s;
762 	u32 state_mask;
763 
764 	groupc = per_cpu_ptr(group->pcpu, cpu);
765 
766 	/*
767 	 * First we update the task counts according to the state
768 	 * change requested through the @clear and @set bits.
769 	 *
770 	 * Then if the cgroup PSI stats accounting enabled, we
771 	 * assess the aggregate resource states this CPU's tasks
772 	 * have been in since the last change, and account any
773 	 * SOME and FULL time these may have resulted in.
774 	 */
775 	write_seqcount_begin(&groupc->seq);
776 
777 	/*
778 	 * Start with TSK_ONCPU, which doesn't have a corresponding
779 	 * task count - it's just a boolean flag directly encoded in
780 	 * the state mask. Clear, set, or carry the current state if
781 	 * no changes are requested.
782 	 */
783 	if (unlikely(clear & TSK_ONCPU)) {
784 		state_mask = 0;
785 		clear &= ~TSK_ONCPU;
786 	} else if (unlikely(set & TSK_ONCPU)) {
787 		state_mask = PSI_ONCPU;
788 		set &= ~TSK_ONCPU;
789 	} else {
790 		state_mask = groupc->state_mask & PSI_ONCPU;
791 	}
792 
793 	/*
794 	 * The rest of the state mask is calculated based on the task
795 	 * counts. Update those first, then construct the mask.
796 	 */
797 	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
798 		if (!(m & (1 << t)))
799 			continue;
800 		if (groupc->tasks[t]) {
801 			groupc->tasks[t]--;
802 		} else if (!psi_bug) {
803 			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
804 					cpu, t, groupc->tasks[0],
805 					groupc->tasks[1], groupc->tasks[2],
806 					groupc->tasks[3], clear, set);
807 			psi_bug = 1;
808 		}
809 	}
810 
811 	for (t = 0; set; set &= ~(1 << t), t++)
812 		if (set & (1 << t))
813 			groupc->tasks[t]++;
814 
815 	if (!group->enabled) {
816 		/*
817 		 * On the first group change after disabling PSI, conclude
818 		 * the current state and flush its time. This is unlikely
819 		 * to matter to the user, but aggregation (get_recent_times)
820 		 * may have already incorporated the live state into times_prev;
821 		 * avoid a delta sample underflow when PSI is later re-enabled.
822 		 */
823 		if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
824 			record_times(groupc, now);
825 
826 		groupc->state_mask = state_mask;
827 
828 		write_seqcount_end(&groupc->seq);
829 		return;
830 	}
831 
832 	for (s = 0; s < NR_PSI_STATES; s++) {
833 		if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU))
834 			state_mask |= (1 << s);
835 	}
836 
837 	/*
838 	 * Since we care about lost potential, a memstall is FULL
839 	 * when there are no other working tasks, but also when
840 	 * the CPU is actively reclaiming and nothing productive
841 	 * could run even if it were runnable. So when the current
842 	 * task in a cgroup is in_memstall, the corresponding groupc
843 	 * on that cpu is in PSI_MEM_FULL state.
844 	 */
845 	if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
846 		state_mask |= (1 << PSI_MEM_FULL);
847 
848 	record_times(groupc, now);
849 
850 	groupc->state_mask = state_mask;
851 
852 	write_seqcount_end(&groupc->seq);
853 
854 	if (state_mask & group->poll_states)
855 		psi_schedule_poll_work(group, 1, false);
856 
857 	if (wake_clock && !delayed_work_pending(&group->avgs_work))
858 		schedule_delayed_work(&group->avgs_work, PSI_FREQ);
859 }
860 
861 static inline struct psi_group *task_psi_group(struct task_struct *task)
862 {
863 #ifdef CONFIG_CGROUPS
864 	if (static_branch_likely(&psi_cgroups_enabled))
865 		return cgroup_psi(task_dfl_cgroup(task));
866 #endif
867 	return &psi_system;
868 }
869 
870 static void psi_flags_change(struct task_struct *task, int clear, int set)
871 {
872 	if (((task->psi_flags & set) ||
873 	     (task->psi_flags & clear) != clear) &&
874 	    !psi_bug) {
875 		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
876 				task->pid, task->comm, task_cpu(task),
877 				task->psi_flags, clear, set);
878 		psi_bug = 1;
879 	}
880 
881 	task->psi_flags &= ~clear;
882 	task->psi_flags |= set;
883 }
884 
885 void psi_task_change(struct task_struct *task, int clear, int set)
886 {
887 	int cpu = task_cpu(task);
888 	struct psi_group *group;
889 	u64 now;
890 
891 	if (!task->pid)
892 		return;
893 
894 	psi_flags_change(task, clear, set);
895 
896 	now = cpu_clock(cpu);
897 
898 	group = task_psi_group(task);
899 	do {
900 		psi_group_change(group, cpu, clear, set, now, true);
901 	} while ((group = group->parent));
902 }
903 
904 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
905 		     bool sleep)
906 {
907 	struct psi_group *group, *common = NULL;
908 	int cpu = task_cpu(prev);
909 	u64 now = cpu_clock(cpu);
910 
911 	if (next->pid) {
912 		psi_flags_change(next, 0, TSK_ONCPU);
913 		/*
914 		 * Set TSK_ONCPU on @next's cgroups. If @next shares any
915 		 * ancestors with @prev, those will already have @prev's
916 		 * TSK_ONCPU bit set, and we can stop the iteration there.
917 		 */
918 		group = task_psi_group(next);
919 		do {
920 			if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
921 			    PSI_ONCPU) {
922 				common = group;
923 				break;
924 			}
925 
926 			psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
927 		} while ((group = group->parent));
928 	}
929 
930 	if (prev->pid) {
931 		int clear = TSK_ONCPU, set = 0;
932 		bool wake_clock = true;
933 
934 		/*
935 		 * When we're going to sleep, psi_dequeue() lets us
936 		 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
937 		 * TSK_IOWAIT here, where we can combine it with
938 		 * TSK_ONCPU and save walking common ancestors twice.
939 		 */
940 		if (sleep) {
941 			clear |= TSK_RUNNING;
942 			if (prev->in_memstall)
943 				clear |= TSK_MEMSTALL_RUNNING;
944 			if (prev->in_iowait)
945 				set |= TSK_IOWAIT;
946 
947 			/*
948 			 * Periodic aggregation shuts off if there is a period of no
949 			 * task changes, so we wake it back up if necessary. However,
950 			 * don't do this if the task change is the aggregation worker
951 			 * itself going to sleep, or we'll ping-pong forever.
952 			 */
953 			if (unlikely((prev->flags & PF_WQ_WORKER) &&
954 				     wq_worker_last_func(prev) == psi_avgs_work))
955 				wake_clock = false;
956 		}
957 
958 		psi_flags_change(prev, clear, set);
959 
960 		group = task_psi_group(prev);
961 		do {
962 			if (group == common)
963 				break;
964 			psi_group_change(group, cpu, clear, set, now, wake_clock);
965 		} while ((group = group->parent));
966 
967 		/*
968 		 * TSK_ONCPU is handled up to the common ancestor. If there are
969 		 * any other differences between the two tasks (e.g. prev goes
970 		 * to sleep, or only one task is memstall), finish propagating
971 		 * those differences all the way up to the root.
972 		 */
973 		if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
974 			clear &= ~TSK_ONCPU;
975 			for (; group; group = group->parent)
976 				psi_group_change(group, cpu, clear, set, now, wake_clock);
977 		}
978 	}
979 }
980 
981 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
982 void psi_account_irqtime(struct task_struct *task, u32 delta)
983 {
984 	int cpu = task_cpu(task);
985 	struct psi_group *group;
986 	struct psi_group_cpu *groupc;
987 	u64 now;
988 
989 	if (!task->pid)
990 		return;
991 
992 	now = cpu_clock(cpu);
993 
994 	group = task_psi_group(task);
995 	do {
996 		if (!group->enabled)
997 			continue;
998 
999 		groupc = per_cpu_ptr(group->pcpu, cpu);
1000 
1001 		write_seqcount_begin(&groupc->seq);
1002 
1003 		record_times(groupc, now);
1004 		groupc->times[PSI_IRQ_FULL] += delta;
1005 
1006 		write_seqcount_end(&groupc->seq);
1007 
1008 		if (group->poll_states & (1 << PSI_IRQ_FULL))
1009 			psi_schedule_poll_work(group, 1, false);
1010 	} while ((group = group->parent));
1011 }
1012 #endif
1013 
1014 /**
1015  * psi_memstall_enter - mark the beginning of a memory stall section
1016  * @flags: flags to handle nested sections
1017  *
1018  * Marks the calling task as being stalled due to a lack of memory,
1019  * such as waiting for a refault or performing reclaim.
1020  */
1021 void psi_memstall_enter(unsigned long *flags)
1022 {
1023 	struct rq_flags rf;
1024 	struct rq *rq;
1025 
1026 	if (static_branch_likely(&psi_disabled))
1027 		return;
1028 
1029 	*flags = current->in_memstall;
1030 	if (*flags)
1031 		return;
1032 	/*
1033 	 * in_memstall setting & accounting needs to be atomic wrt
1034 	 * changes to the task's scheduling state, otherwise we can
1035 	 * race with CPU migration.
1036 	 */
1037 	rq = this_rq_lock_irq(&rf);
1038 
1039 	current->in_memstall = 1;
1040 	psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
1041 
1042 	rq_unlock_irq(rq, &rf);
1043 }
1044 EXPORT_SYMBOL_GPL(psi_memstall_enter);
1045 
1046 /**
1047  * psi_memstall_leave - mark the end of an memory stall section
1048  * @flags: flags to handle nested memdelay sections
1049  *
1050  * Marks the calling task as no longer stalled due to lack of memory.
1051  */
1052 void psi_memstall_leave(unsigned long *flags)
1053 {
1054 	struct rq_flags rf;
1055 	struct rq *rq;
1056 
1057 	if (static_branch_likely(&psi_disabled))
1058 		return;
1059 
1060 	if (*flags)
1061 		return;
1062 	/*
1063 	 * in_memstall clearing & accounting needs to be atomic wrt
1064 	 * changes to the task's scheduling state, otherwise we could
1065 	 * race with CPU migration.
1066 	 */
1067 	rq = this_rq_lock_irq(&rf);
1068 
1069 	current->in_memstall = 0;
1070 	psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
1071 
1072 	rq_unlock_irq(rq, &rf);
1073 }
1074 EXPORT_SYMBOL_GPL(psi_memstall_leave);
1075 
1076 #ifdef CONFIG_CGROUPS
1077 int psi_cgroup_alloc(struct cgroup *cgroup)
1078 {
1079 	if (!static_branch_likely(&psi_cgroups_enabled))
1080 		return 0;
1081 
1082 	cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
1083 	if (!cgroup->psi)
1084 		return -ENOMEM;
1085 
1086 	cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
1087 	if (!cgroup->psi->pcpu) {
1088 		kfree(cgroup->psi);
1089 		return -ENOMEM;
1090 	}
1091 	group_init(cgroup->psi);
1092 	cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup));
1093 	return 0;
1094 }
1095 
1096 void psi_cgroup_free(struct cgroup *cgroup)
1097 {
1098 	if (!static_branch_likely(&psi_cgroups_enabled))
1099 		return;
1100 
1101 	cancel_delayed_work_sync(&cgroup->psi->avgs_work);
1102 	free_percpu(cgroup->psi->pcpu);
1103 	/* All triggers must be removed by now */
1104 	WARN_ONCE(cgroup->psi->poll_states, "psi: trigger leak\n");
1105 	kfree(cgroup->psi);
1106 }
1107 
1108 /**
1109  * cgroup_move_task - move task to a different cgroup
1110  * @task: the task
1111  * @to: the target css_set
1112  *
1113  * Move task to a new cgroup and safely migrate its associated stall
1114  * state between the different groups.
1115  *
1116  * This function acquires the task's rq lock to lock out concurrent
1117  * changes to the task's scheduling state and - in case the task is
1118  * running - concurrent changes to its stall state.
1119  */
1120 void cgroup_move_task(struct task_struct *task, struct css_set *to)
1121 {
1122 	unsigned int task_flags;
1123 	struct rq_flags rf;
1124 	struct rq *rq;
1125 
1126 	if (!static_branch_likely(&psi_cgroups_enabled)) {
1127 		/*
1128 		 * Lame to do this here, but the scheduler cannot be locked
1129 		 * from the outside, so we move cgroups from inside sched/.
1130 		 */
1131 		rcu_assign_pointer(task->cgroups, to);
1132 		return;
1133 	}
1134 
1135 	rq = task_rq_lock(task, &rf);
1136 
1137 	/*
1138 	 * We may race with schedule() dropping the rq lock between
1139 	 * deactivating prev and switching to next. Because the psi
1140 	 * updates from the deactivation are deferred to the switch
1141 	 * callback to save cgroup tree updates, the task's scheduling
1142 	 * state here is not coherent with its psi state:
1143 	 *
1144 	 * schedule()                   cgroup_move_task()
1145 	 *   rq_lock()
1146 	 *   deactivate_task()
1147 	 *     p->on_rq = 0
1148 	 *     psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1149 	 *   pick_next_task()
1150 	 *     rq_unlock()
1151 	 *                                rq_lock()
1152 	 *                                psi_task_change() // old cgroup
1153 	 *                                task->cgroups = to
1154 	 *                                psi_task_change() // new cgroup
1155 	 *                                rq_unlock()
1156 	 *     rq_lock()
1157 	 *   psi_sched_switch() // does deferred updates in new cgroup
1158 	 *
1159 	 * Don't rely on the scheduling state. Use psi_flags instead.
1160 	 */
1161 	task_flags = task->psi_flags;
1162 
1163 	if (task_flags)
1164 		psi_task_change(task, task_flags, 0);
1165 
1166 	/* See comment above */
1167 	rcu_assign_pointer(task->cgroups, to);
1168 
1169 	if (task_flags)
1170 		psi_task_change(task, 0, task_flags);
1171 
1172 	task_rq_unlock(rq, task, &rf);
1173 }
1174 
1175 void psi_cgroup_restart(struct psi_group *group)
1176 {
1177 	int cpu;
1178 
1179 	/*
1180 	 * After we disable psi_group->enabled, we don't actually
1181 	 * stop percpu tasks accounting in each psi_group_cpu,
1182 	 * instead only stop test_state() loop, record_times()
1183 	 * and averaging worker, see psi_group_change() for details.
1184 	 *
1185 	 * When disable cgroup PSI, this function has nothing to sync
1186 	 * since cgroup pressure files are hidden and percpu psi_group_cpu
1187 	 * would see !psi_group->enabled and only do task accounting.
1188 	 *
1189 	 * When re-enable cgroup PSI, this function use psi_group_change()
1190 	 * to get correct state mask from test_state() loop on tasks[],
1191 	 * and restart groupc->state_start from now, use .clear = .set = 0
1192 	 * here since no task status really changed.
1193 	 */
1194 	if (!group->enabled)
1195 		return;
1196 
1197 	for_each_possible_cpu(cpu) {
1198 		struct rq *rq = cpu_rq(cpu);
1199 		struct rq_flags rf;
1200 		u64 now;
1201 
1202 		rq_lock_irq(rq, &rf);
1203 		now = cpu_clock(cpu);
1204 		psi_group_change(group, cpu, 0, 0, now, true);
1205 		rq_unlock_irq(rq, &rf);
1206 	}
1207 }
1208 #endif /* CONFIG_CGROUPS */
1209 
1210 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1211 {
1212 	bool only_full = false;
1213 	int full;
1214 	u64 now;
1215 
1216 	if (static_branch_likely(&psi_disabled))
1217 		return -EOPNOTSUPP;
1218 
1219 	/* Update averages before reporting them */
1220 	mutex_lock(&group->avgs_lock);
1221 	now = sched_clock();
1222 	collect_percpu_times(group, PSI_AVGS, NULL);
1223 	if (now >= group->avg_next_update)
1224 		group->avg_next_update = update_averages(group, now);
1225 	mutex_unlock(&group->avgs_lock);
1226 
1227 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1228 	only_full = res == PSI_IRQ;
1229 #endif
1230 
1231 	for (full = 0; full < 2 - only_full; full++) {
1232 		unsigned long avg[3] = { 0, };
1233 		u64 total = 0;
1234 		int w;
1235 
1236 		/* CPU FULL is undefined at the system level */
1237 		if (!(group == &psi_system && res == PSI_CPU && full)) {
1238 			for (w = 0; w < 3; w++)
1239 				avg[w] = group->avg[res * 2 + full][w];
1240 			total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1241 					NSEC_PER_USEC);
1242 		}
1243 
1244 		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1245 			   full || only_full ? "full" : "some",
1246 			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1247 			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1248 			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1249 			   total);
1250 	}
1251 
1252 	return 0;
1253 }
1254 
1255 struct psi_trigger *psi_trigger_create(struct psi_group *group,
1256 			char *buf, enum psi_res res)
1257 {
1258 	struct psi_trigger *t;
1259 	enum psi_states state;
1260 	u32 threshold_us;
1261 	u32 window_us;
1262 
1263 	if (static_branch_likely(&psi_disabled))
1264 		return ERR_PTR(-EOPNOTSUPP);
1265 
1266 	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1267 		state = PSI_IO_SOME + res * 2;
1268 	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1269 		state = PSI_IO_FULL + res * 2;
1270 	else
1271 		return ERR_PTR(-EINVAL);
1272 
1273 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1274 	if (res == PSI_IRQ && --state != PSI_IRQ_FULL)
1275 		return ERR_PTR(-EINVAL);
1276 #endif
1277 
1278 	if (state >= PSI_NONIDLE)
1279 		return ERR_PTR(-EINVAL);
1280 
1281 	if (window_us < WINDOW_MIN_US ||
1282 		window_us > WINDOW_MAX_US)
1283 		return ERR_PTR(-EINVAL);
1284 
1285 	/* Check threshold */
1286 	if (threshold_us == 0 || threshold_us > window_us)
1287 		return ERR_PTR(-EINVAL);
1288 
1289 	t = kmalloc(sizeof(*t), GFP_KERNEL);
1290 	if (!t)
1291 		return ERR_PTR(-ENOMEM);
1292 
1293 	t->group = group;
1294 	t->state = state;
1295 	t->threshold = threshold_us * NSEC_PER_USEC;
1296 	t->win.size = window_us * NSEC_PER_USEC;
1297 	window_reset(&t->win, sched_clock(),
1298 			group->total[PSI_POLL][t->state], 0);
1299 
1300 	t->event = 0;
1301 	t->last_event_time = 0;
1302 	init_waitqueue_head(&t->event_wait);
1303 	t->pending_event = false;
1304 
1305 	mutex_lock(&group->trigger_lock);
1306 
1307 	if (!rcu_access_pointer(group->poll_task)) {
1308 		struct task_struct *task;
1309 
1310 		task = kthread_create(psi_poll_worker, group, "psimon");
1311 		if (IS_ERR(task)) {
1312 			kfree(t);
1313 			mutex_unlock(&group->trigger_lock);
1314 			return ERR_CAST(task);
1315 		}
1316 		atomic_set(&group->poll_wakeup, 0);
1317 		wake_up_process(task);
1318 		rcu_assign_pointer(group->poll_task, task);
1319 	}
1320 
1321 	list_add(&t->node, &group->triggers);
1322 	group->poll_min_period = min(group->poll_min_period,
1323 		div_u64(t->win.size, UPDATES_PER_WINDOW));
1324 	group->nr_triggers[t->state]++;
1325 	group->poll_states |= (1 << t->state);
1326 
1327 	mutex_unlock(&group->trigger_lock);
1328 
1329 	return t;
1330 }
1331 
1332 void psi_trigger_destroy(struct psi_trigger *t)
1333 {
1334 	struct psi_group *group;
1335 	struct task_struct *task_to_destroy = NULL;
1336 
1337 	/*
1338 	 * We do not check psi_disabled since it might have been disabled after
1339 	 * the trigger got created.
1340 	 */
1341 	if (!t)
1342 		return;
1343 
1344 	group = t->group;
1345 	/*
1346 	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
1347 	 * from under a polling process.
1348 	 */
1349 	wake_up_interruptible(&t->event_wait);
1350 
1351 	mutex_lock(&group->trigger_lock);
1352 
1353 	if (!list_empty(&t->node)) {
1354 		struct psi_trigger *tmp;
1355 		u64 period = ULLONG_MAX;
1356 
1357 		list_del(&t->node);
1358 		group->nr_triggers[t->state]--;
1359 		if (!group->nr_triggers[t->state])
1360 			group->poll_states &= ~(1 << t->state);
1361 		/* reset min update period for the remaining triggers */
1362 		list_for_each_entry(tmp, &group->triggers, node)
1363 			period = min(period, div_u64(tmp->win.size,
1364 					UPDATES_PER_WINDOW));
1365 		group->poll_min_period = period;
1366 		/* Destroy poll_task when the last trigger is destroyed */
1367 		if (group->poll_states == 0) {
1368 			group->polling_until = 0;
1369 			task_to_destroy = rcu_dereference_protected(
1370 					group->poll_task,
1371 					lockdep_is_held(&group->trigger_lock));
1372 			rcu_assign_pointer(group->poll_task, NULL);
1373 			del_timer(&group->poll_timer);
1374 		}
1375 	}
1376 
1377 	mutex_unlock(&group->trigger_lock);
1378 
1379 	/*
1380 	 * Wait for psi_schedule_poll_work RCU to complete its read-side
1381 	 * critical section before destroying the trigger and optionally the
1382 	 * poll_task.
1383 	 */
1384 	synchronize_rcu();
1385 	/*
1386 	 * Stop kthread 'psimon' after releasing trigger_lock to prevent a
1387 	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1388 	 */
1389 	if (task_to_destroy) {
1390 		/*
1391 		 * After the RCU grace period has expired, the worker
1392 		 * can no longer be found through group->poll_task.
1393 		 */
1394 		kthread_stop(task_to_destroy);
1395 		atomic_set(&group->poll_scheduled, 0);
1396 	}
1397 	kfree(t);
1398 }
1399 
1400 __poll_t psi_trigger_poll(void **trigger_ptr,
1401 				struct file *file, poll_table *wait)
1402 {
1403 	__poll_t ret = DEFAULT_POLLMASK;
1404 	struct psi_trigger *t;
1405 
1406 	if (static_branch_likely(&psi_disabled))
1407 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1408 
1409 	t = smp_load_acquire(trigger_ptr);
1410 	if (!t)
1411 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1412 
1413 	poll_wait(file, &t->event_wait, wait);
1414 
1415 	if (cmpxchg(&t->event, 1, 0) == 1)
1416 		ret |= EPOLLPRI;
1417 
1418 	return ret;
1419 }
1420 
1421 #ifdef CONFIG_PROC_FS
1422 static int psi_io_show(struct seq_file *m, void *v)
1423 {
1424 	return psi_show(m, &psi_system, PSI_IO);
1425 }
1426 
1427 static int psi_memory_show(struct seq_file *m, void *v)
1428 {
1429 	return psi_show(m, &psi_system, PSI_MEM);
1430 }
1431 
1432 static int psi_cpu_show(struct seq_file *m, void *v)
1433 {
1434 	return psi_show(m, &psi_system, PSI_CPU);
1435 }
1436 
1437 static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
1438 {
1439 	if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
1440 		return -EPERM;
1441 
1442 	return single_open(file, psi_show, NULL);
1443 }
1444 
1445 static int psi_io_open(struct inode *inode, struct file *file)
1446 {
1447 	return psi_open(file, psi_io_show);
1448 }
1449 
1450 static int psi_memory_open(struct inode *inode, struct file *file)
1451 {
1452 	return psi_open(file, psi_memory_show);
1453 }
1454 
1455 static int psi_cpu_open(struct inode *inode, struct file *file)
1456 {
1457 	return psi_open(file, psi_cpu_show);
1458 }
1459 
1460 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1461 			 size_t nbytes, enum psi_res res)
1462 {
1463 	char buf[32];
1464 	size_t buf_size;
1465 	struct seq_file *seq;
1466 	struct psi_trigger *new;
1467 
1468 	if (static_branch_likely(&psi_disabled))
1469 		return -EOPNOTSUPP;
1470 
1471 	if (!nbytes)
1472 		return -EINVAL;
1473 
1474 	buf_size = min(nbytes, sizeof(buf));
1475 	if (copy_from_user(buf, user_buf, buf_size))
1476 		return -EFAULT;
1477 
1478 	buf[buf_size - 1] = '\0';
1479 
1480 	seq = file->private_data;
1481 
1482 	/* Take seq->lock to protect seq->private from concurrent writes */
1483 	mutex_lock(&seq->lock);
1484 
1485 	/* Allow only one trigger per file descriptor */
1486 	if (seq->private) {
1487 		mutex_unlock(&seq->lock);
1488 		return -EBUSY;
1489 	}
1490 
1491 	new = psi_trigger_create(&psi_system, buf, res);
1492 	if (IS_ERR(new)) {
1493 		mutex_unlock(&seq->lock);
1494 		return PTR_ERR(new);
1495 	}
1496 
1497 	smp_store_release(&seq->private, new);
1498 	mutex_unlock(&seq->lock);
1499 
1500 	return nbytes;
1501 }
1502 
1503 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1504 			    size_t nbytes, loff_t *ppos)
1505 {
1506 	return psi_write(file, user_buf, nbytes, PSI_IO);
1507 }
1508 
1509 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1510 				size_t nbytes, loff_t *ppos)
1511 {
1512 	return psi_write(file, user_buf, nbytes, PSI_MEM);
1513 }
1514 
1515 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1516 			     size_t nbytes, loff_t *ppos)
1517 {
1518 	return psi_write(file, user_buf, nbytes, PSI_CPU);
1519 }
1520 
1521 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1522 {
1523 	struct seq_file *seq = file->private_data;
1524 
1525 	return psi_trigger_poll(&seq->private, file, wait);
1526 }
1527 
1528 static int psi_fop_release(struct inode *inode, struct file *file)
1529 {
1530 	struct seq_file *seq = file->private_data;
1531 
1532 	psi_trigger_destroy(seq->private);
1533 	return single_release(inode, file);
1534 }
1535 
1536 static const struct proc_ops psi_io_proc_ops = {
1537 	.proc_open	= psi_io_open,
1538 	.proc_read	= seq_read,
1539 	.proc_lseek	= seq_lseek,
1540 	.proc_write	= psi_io_write,
1541 	.proc_poll	= psi_fop_poll,
1542 	.proc_release	= psi_fop_release,
1543 };
1544 
1545 static const struct proc_ops psi_memory_proc_ops = {
1546 	.proc_open	= psi_memory_open,
1547 	.proc_read	= seq_read,
1548 	.proc_lseek	= seq_lseek,
1549 	.proc_write	= psi_memory_write,
1550 	.proc_poll	= psi_fop_poll,
1551 	.proc_release	= psi_fop_release,
1552 };
1553 
1554 static const struct proc_ops psi_cpu_proc_ops = {
1555 	.proc_open	= psi_cpu_open,
1556 	.proc_read	= seq_read,
1557 	.proc_lseek	= seq_lseek,
1558 	.proc_write	= psi_cpu_write,
1559 	.proc_poll	= psi_fop_poll,
1560 	.proc_release	= psi_fop_release,
1561 };
1562 
1563 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1564 static int psi_irq_show(struct seq_file *m, void *v)
1565 {
1566 	return psi_show(m, &psi_system, PSI_IRQ);
1567 }
1568 
1569 static int psi_irq_open(struct inode *inode, struct file *file)
1570 {
1571 	return psi_open(file, psi_irq_show);
1572 }
1573 
1574 static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
1575 			     size_t nbytes, loff_t *ppos)
1576 {
1577 	return psi_write(file, user_buf, nbytes, PSI_IRQ);
1578 }
1579 
1580 static const struct proc_ops psi_irq_proc_ops = {
1581 	.proc_open	= psi_irq_open,
1582 	.proc_read	= seq_read,
1583 	.proc_lseek	= seq_lseek,
1584 	.proc_write	= psi_irq_write,
1585 	.proc_poll	= psi_fop_poll,
1586 	.proc_release	= psi_fop_release,
1587 };
1588 #endif
1589 
1590 static int __init psi_proc_init(void)
1591 {
1592 	if (psi_enable) {
1593 		proc_mkdir("pressure", NULL);
1594 		proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
1595 		proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
1596 		proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
1597 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1598 		proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops);
1599 #endif
1600 	}
1601 	return 0;
1602 }
1603 module_init(psi_proc_init);
1604 
1605 #endif /* CONFIG_PROC_FS */
1606