xref: /linux/kernel/sched/stats.h (revision 83bce9c2baa51e439480a713119a73d3c8b61083)
1 
2 #ifdef CONFIG_SCHEDSTATS
3 
4 /*
5  * Expects runqueue lock to be held for atomicity of update
6  */
7 static inline void
8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9 {
10 	if (rq) {
11 		rq->rq_sched_info.run_delay += delta;
12 		rq->rq_sched_info.pcount++;
13 	}
14 }
15 
16 /*
17  * Expects runqueue lock to be held for atomicity of update
18  */
19 static inline void
20 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21 {
22 	if (rq)
23 		rq->rq_cpu_time += delta;
24 }
25 
26 static inline void
27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28 {
29 	if (rq)
30 		rq->rq_sched_info.run_delay += delta;
31 }
32 #define schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
33 #define schedstat_inc(var)		do { if (schedstat_enabled()) { var++; } } while (0)
34 #define schedstat_add(var, amt)		do { if (schedstat_enabled()) { var += (amt); } } while (0)
35 #define schedstat_set(var, val)		do { if (schedstat_enabled()) { var = (val); } } while (0)
36 #define schedstat_val(var)		(var)
37 #define schedstat_val_or_zero(var)	((schedstat_enabled()) ? (var) : 0)
38 
39 #else /* !CONFIG_SCHEDSTATS */
40 static inline void
41 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
42 {}
43 static inline void
44 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
45 {}
46 static inline void
47 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
48 {}
49 #define schedstat_enabled()		0
50 #define schedstat_inc(var)		do { } while (0)
51 #define schedstat_add(var, amt)		do { } while (0)
52 #define schedstat_set(var, val)		do { } while (0)
53 #define schedstat_val(var)		0
54 #define schedstat_val_or_zero(var)	0
55 #endif /* CONFIG_SCHEDSTATS */
56 
57 #ifdef CONFIG_SCHED_INFO
58 static inline void sched_info_reset_dequeued(struct task_struct *t)
59 {
60 	t->sched_info.last_queued = 0;
61 }
62 
63 /*
64  * We are interested in knowing how long it was from the *first* time a
65  * task was queued to the time that it finally hit a cpu, we call this routine
66  * from dequeue_task() to account for possible rq->clock skew across cpus. The
67  * delta taken on each cpu would annul the skew.
68  */
69 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
70 {
71 	unsigned long long now = rq_clock(rq), delta = 0;
72 
73 	if (unlikely(sched_info_on()))
74 		if (t->sched_info.last_queued)
75 			delta = now - t->sched_info.last_queued;
76 	sched_info_reset_dequeued(t);
77 	t->sched_info.run_delay += delta;
78 
79 	rq_sched_info_dequeued(rq, delta);
80 }
81 
82 /*
83  * Called when a task finally hits the cpu.  We can now calculate how
84  * long it was waiting to run.  We also note when it began so that we
85  * can keep stats on how long its timeslice is.
86  */
87 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
88 {
89 	unsigned long long now = rq_clock(rq), delta = 0;
90 
91 	if (t->sched_info.last_queued)
92 		delta = now - t->sched_info.last_queued;
93 	sched_info_reset_dequeued(t);
94 	t->sched_info.run_delay += delta;
95 	t->sched_info.last_arrival = now;
96 	t->sched_info.pcount++;
97 
98 	rq_sched_info_arrive(rq, delta);
99 }
100 
101 /*
102  * This function is only called from enqueue_task(), but also only updates
103  * the timestamp if it is already not set.  It's assumed that
104  * sched_info_dequeued() will clear that stamp when appropriate.
105  */
106 static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
107 {
108 	if (unlikely(sched_info_on()))
109 		if (!t->sched_info.last_queued)
110 			t->sched_info.last_queued = rq_clock(rq);
111 }
112 
113 /*
114  * Called when a process ceases being the active-running process involuntarily
115  * due, typically, to expiring its time slice (this may also be called when
116  * switching to the idle task).  Now we can calculate how long we ran.
117  * Also, if the process is still in the TASK_RUNNING state, call
118  * sched_info_queued() to mark that it has now again started waiting on
119  * the runqueue.
120  */
121 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
122 {
123 	unsigned long long delta = rq_clock(rq) -
124 					t->sched_info.last_arrival;
125 
126 	rq_sched_info_depart(rq, delta);
127 
128 	if (t->state == TASK_RUNNING)
129 		sched_info_queued(rq, t);
130 }
131 
132 /*
133  * Called when tasks are switched involuntarily due, typically, to expiring
134  * their time slice.  (This may also be called when switching to or from
135  * the idle task.)  We are only called when prev != next.
136  */
137 static inline void
138 __sched_info_switch(struct rq *rq,
139 		    struct task_struct *prev, struct task_struct *next)
140 {
141 	/*
142 	 * prev now departs the cpu.  It's not interesting to record
143 	 * stats about how efficient we were at scheduling the idle
144 	 * process, however.
145 	 */
146 	if (prev != rq->idle)
147 		sched_info_depart(rq, prev);
148 
149 	if (next != rq->idle)
150 		sched_info_arrive(rq, next);
151 }
152 static inline void
153 sched_info_switch(struct rq *rq,
154 		  struct task_struct *prev, struct task_struct *next)
155 {
156 	if (unlikely(sched_info_on()))
157 		__sched_info_switch(rq, prev, next);
158 }
159 #else
160 #define sched_info_queued(rq, t)		do { } while (0)
161 #define sched_info_reset_dequeued(t)	do { } while (0)
162 #define sched_info_dequeued(rq, t)		do { } while (0)
163 #define sched_info_depart(rq, t)		do { } while (0)
164 #define sched_info_arrive(rq, next)		do { } while (0)
165 #define sched_info_switch(rq, t, next)		do { } while (0)
166 #endif /* CONFIG_SCHED_INFO */
167