xref: /linux/kernel/sched/stats.h (revision 79d2e1919a2728ef49d938eb20ebd5903c14dfb0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _KERNEL_STATS_H
3 #define _KERNEL_STATS_H
4 
5 #ifdef CONFIG_SCHEDSTATS
6 
7 extern struct static_key_false sched_schedstats;
8 
9 /*
10  * Expects runqueue lock to be held for atomicity of update
11  */
12 static inline void
13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
14 {
15 	if (rq) {
16 		rq->rq_sched_info.run_delay += delta;
17 		rq->rq_sched_info.pcount++;
18 	}
19 }
20 
21 /*
22  * Expects runqueue lock to be held for atomicity of update
23  */
24 static inline void
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
26 {
27 	if (rq)
28 		rq->rq_cpu_time += delta;
29 }
30 
31 static inline void
32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
33 {
34 	if (rq)
35 		rq->rq_sched_info.run_delay += delta;
36 }
37 #define   schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
38 #define __schedstat_inc(var)		do { var++; } while (0)
39 #define   schedstat_inc(var)		do { if (schedstat_enabled()) { var++; } } while (0)
40 #define __schedstat_add(var, amt)	do { var += (amt); } while (0)
41 #define   schedstat_add(var, amt)	do { if (schedstat_enabled()) { var += (amt); } } while (0)
42 #define __schedstat_set(var, val)	do { var = (val); } while (0)
43 #define   schedstat_set(var, val)	do { if (schedstat_enabled()) { var = (val); } } while (0)
44 #define   schedstat_val(var)		(var)
45 #define   schedstat_val_or_zero(var)	((schedstat_enabled()) ? (var) : 0)
46 
47 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
48 			       struct sched_statistics *stats);
49 
50 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
51 			     struct sched_statistics *stats);
52 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
53 				    struct sched_statistics *stats);
54 
55 static inline void
56 check_schedstat_required(void)
57 {
58 	if (schedstat_enabled())
59 		return;
60 
61 	/* Force schedstat enabled if a dependent tracepoint is active */
62 	if (trace_sched_stat_wait_enabled()    ||
63 	    trace_sched_stat_sleep_enabled()   ||
64 	    trace_sched_stat_iowait_enabled()  ||
65 	    trace_sched_stat_blocked_enabled() ||
66 	    trace_sched_stat_runtime_enabled())
67 		printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
68 }
69 
70 #else /* !CONFIG_SCHEDSTATS: */
71 
72 static inline void rq_sched_info_arrive  (struct rq *rq, unsigned long long delta) { }
73 static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
74 static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delta) { }
75 # define   schedstat_enabled()		0
76 # define __schedstat_inc(var)		do { } while (0)
77 # define   schedstat_inc(var)		do { } while (0)
78 # define __schedstat_add(var, amt)	do { } while (0)
79 # define   schedstat_add(var, amt)	do { } while (0)
80 # define __schedstat_set(var, val)	do { } while (0)
81 # define   schedstat_set(var, val)	do { } while (0)
82 # define   schedstat_val(var)		0
83 # define   schedstat_val_or_zero(var)	0
84 
85 # define __update_stats_wait_start(rq, p, stats)       do { } while (0)
86 # define __update_stats_wait_end(rq, p, stats)         do { } while (0)
87 # define __update_stats_enqueue_sleeper(rq, p, stats)  do { } while (0)
88 # define check_schedstat_required()                    do { } while (0)
89 
90 #endif /* CONFIG_SCHEDSTATS */
91 
92 #ifdef CONFIG_FAIR_GROUP_SCHED
93 struct sched_entity_stats {
94 	struct sched_entity     se;
95 	struct sched_statistics stats;
96 } __no_randomize_layout;
97 #endif
98 
99 static inline struct sched_statistics *
100 __schedstats_from_se(struct sched_entity *se)
101 {
102 #ifdef CONFIG_FAIR_GROUP_SCHED
103 	if (!entity_is_task(se))
104 		return &container_of(se, struct sched_entity_stats, se)->stats;
105 #endif
106 	return &task_of(se)->stats;
107 }
108 
109 #ifdef CONFIG_PSI
110 void psi_task_change(struct task_struct *task, int clear, int set);
111 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
112 		     bool sleep);
113 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
114 void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev);
115 #else
116 static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
117 				       struct task_struct *prev) {}
118 #endif /*CONFIG_IRQ_TIME_ACCOUNTING */
119 /*
120  * PSI tracks state that persists across sleeps, such as iowaits and
121  * memory stalls. As a result, it has to distinguish between sleeps,
122  * where a task's runnable state changes, and migrations, where a task
123  * and its runnable state are being moved between CPUs and runqueues.
124  *
125  * A notable case is a task whose dequeue is delayed. PSI considers
126  * those sleeping, but because they are still on the runqueue they can
127  * go through migration requeues. In this case, *sleeping* states need
128  * to be transferred.
129  */
130 static inline void psi_enqueue(struct task_struct *p, int flags)
131 {
132 	int clear = 0, set = 0;
133 
134 	if (static_branch_likely(&psi_disabled))
135 		return;
136 
137 	/* Same runqueue, nothing changed for psi */
138 	if (flags & ENQUEUE_RESTORE)
139 		return;
140 
141 	if (p->se.sched_delayed) {
142 		/* CPU migration of "sleeping" task */
143 		SCHED_WARN_ON(!(flags & ENQUEUE_MIGRATED));
144 		if (p->in_memstall)
145 			set |= TSK_MEMSTALL;
146 		if (p->in_iowait)
147 			set |= TSK_IOWAIT;
148 	} else if (flags & ENQUEUE_MIGRATED) {
149 		/* CPU migration of runnable task */
150 		set = TSK_RUNNING;
151 		if (p->in_memstall)
152 			set |= TSK_MEMSTALL | TSK_MEMSTALL_RUNNING;
153 	} else {
154 		/* Wakeup of new or sleeping task */
155 		if (p->in_iowait)
156 			clear |= TSK_IOWAIT;
157 		set = TSK_RUNNING;
158 		if (p->in_memstall)
159 			set |= TSK_MEMSTALL_RUNNING;
160 	}
161 
162 	psi_task_change(p, clear, set);
163 }
164 
165 static inline void psi_dequeue(struct task_struct *p, int flags)
166 {
167 	if (static_branch_likely(&psi_disabled))
168 		return;
169 
170 	/* Same runqueue, nothing changed for psi */
171 	if (flags & DEQUEUE_SAVE)
172 		return;
173 
174 	/*
175 	 * A voluntary sleep is a dequeue followed by a task switch. To
176 	 * avoid walking all ancestors twice, psi_task_switch() handles
177 	 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
178 	 * Do nothing here.
179 	 */
180 	if (flags & DEQUEUE_SLEEP)
181 		return;
182 
183 	/*
184 	 * When migrating a task to another CPU, clear all psi
185 	 * state. The enqueue callback above will work it out.
186 	 */
187 	psi_task_change(p, p->psi_flags, 0);
188 }
189 
190 static inline void psi_ttwu_dequeue(struct task_struct *p)
191 {
192 	if (static_branch_likely(&psi_disabled))
193 		return;
194 	/*
195 	 * Is the task being migrated during a wakeup? Make sure to
196 	 * deregister its sleep-persistent psi states from the old
197 	 * queue, and let psi_enqueue() know it has to requeue.
198 	 */
199 	if (unlikely(p->psi_flags)) {
200 		struct rq_flags rf;
201 		struct rq *rq;
202 
203 		rq = __task_rq_lock(p, &rf);
204 		psi_task_change(p, p->psi_flags, 0);
205 		__task_rq_unlock(rq, &rf);
206 	}
207 }
208 
209 static inline void psi_sched_switch(struct task_struct *prev,
210 				    struct task_struct *next,
211 				    bool sleep)
212 {
213 	if (static_branch_likely(&psi_disabled))
214 		return;
215 
216 	psi_task_switch(prev, next, sleep);
217 }
218 
219 #else /* CONFIG_PSI */
220 static inline void psi_enqueue(struct task_struct *p, bool migrate) {}
221 static inline void psi_dequeue(struct task_struct *p, bool migrate) {}
222 static inline void psi_ttwu_dequeue(struct task_struct *p) {}
223 static inline void psi_sched_switch(struct task_struct *prev,
224 				    struct task_struct *next,
225 				    bool sleep) {}
226 static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
227 				       struct task_struct *prev) {}
228 #endif /* CONFIG_PSI */
229 
230 #ifdef CONFIG_SCHED_INFO
231 /*
232  * We are interested in knowing how long it was from the *first* time a
233  * task was queued to the time that it finally hit a CPU, we call this routine
234  * from dequeue_task() to account for possible rq->clock skew across CPUs. The
235  * delta taken on each CPU would annul the skew.
236  */
237 static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
238 {
239 	unsigned long long delta = 0;
240 
241 	if (!t->sched_info.last_queued)
242 		return;
243 
244 	delta = rq_clock(rq) - t->sched_info.last_queued;
245 	t->sched_info.last_queued = 0;
246 	t->sched_info.run_delay += delta;
247 
248 	rq_sched_info_dequeue(rq, delta);
249 }
250 
251 /*
252  * Called when a task finally hits the CPU.  We can now calculate how
253  * long it was waiting to run.  We also note when it began so that we
254  * can keep stats on how long its time-slice is.
255  */
256 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
257 {
258 	unsigned long long now, delta = 0;
259 
260 	if (!t->sched_info.last_queued)
261 		return;
262 
263 	now = rq_clock(rq);
264 	delta = now - t->sched_info.last_queued;
265 	t->sched_info.last_queued = 0;
266 	t->sched_info.run_delay += delta;
267 	t->sched_info.last_arrival = now;
268 	t->sched_info.pcount++;
269 
270 	rq_sched_info_arrive(rq, delta);
271 }
272 
273 /*
274  * This function is only called from enqueue_task(), but also only updates
275  * the timestamp if it is already not set.  It's assumed that
276  * sched_info_dequeue() will clear that stamp when appropriate.
277  */
278 static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
279 {
280 	if (!t->sched_info.last_queued)
281 		t->sched_info.last_queued = rq_clock(rq);
282 }
283 
284 /*
285  * Called when a process ceases being the active-running process involuntarily
286  * due, typically, to expiring its time slice (this may also be called when
287  * switching to the idle task).  Now we can calculate how long we ran.
288  * Also, if the process is still in the TASK_RUNNING state, call
289  * sched_info_enqueue() to mark that it has now again started waiting on
290  * the runqueue.
291  */
292 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
293 {
294 	unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
295 
296 	rq_sched_info_depart(rq, delta);
297 
298 	if (task_is_running(t))
299 		sched_info_enqueue(rq, t);
300 }
301 
302 /*
303  * Called when tasks are switched involuntarily due, typically, to expiring
304  * their time slice.  (This may also be called when switching to or from
305  * the idle task.)  We are only called when prev != next.
306  */
307 static inline void
308 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
309 {
310 	/*
311 	 * prev now departs the CPU.  It's not interesting to record
312 	 * stats about how efficient we were at scheduling the idle
313 	 * process, however.
314 	 */
315 	if (prev != rq->idle)
316 		sched_info_depart(rq, prev);
317 
318 	if (next != rq->idle)
319 		sched_info_arrive(rq, next);
320 }
321 
322 #else /* !CONFIG_SCHED_INFO: */
323 # define sched_info_enqueue(rq, t)	do { } while (0)
324 # define sched_info_dequeue(rq, t)	do { } while (0)
325 # define sched_info_switch(rq, t, next)	do { } while (0)
326 #endif /* CONFIG_SCHED_INFO */
327 
328 #endif /* _KERNEL_STATS_H */
329