1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifdef CONFIG_SCHEDSTATS 4 5 /* 6 * Expects runqueue lock to be held for atomicity of update 7 */ 8 static inline void 9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 10 { 11 if (rq) { 12 rq->rq_sched_info.run_delay += delta; 13 rq->rq_sched_info.pcount++; 14 } 15 } 16 17 /* 18 * Expects runqueue lock to be held for atomicity of update 19 */ 20 static inline void 21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) 22 { 23 if (rq) 24 rq->rq_cpu_time += delta; 25 } 26 27 static inline void 28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 29 { 30 if (rq) 31 rq->rq_sched_info.run_delay += delta; 32 } 33 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats) 34 #define __schedstat_inc(var) do { var++; } while (0) 35 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) 36 #define __schedstat_add(var, amt) do { var += (amt); } while (0) 37 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) 38 #define __schedstat_set(var, val) do { var = (val); } while (0) 39 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) 40 #define schedstat_val(var) (var) 41 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) 42 43 #else /* !CONFIG_SCHEDSTATS: */ 44 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } 45 static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { } 46 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } 47 # define schedstat_enabled() 0 48 # define __schedstat_inc(var) do { } while (0) 49 # define schedstat_inc(var) do { } while (0) 50 # define __schedstat_add(var, amt) do { } while (0) 51 # define schedstat_add(var, amt) do { } while (0) 52 # define __schedstat_set(var, val) do { } while (0) 53 # define schedstat_set(var, val) do { } while (0) 54 # define schedstat_val(var) 0 55 # define schedstat_val_or_zero(var) 0 56 #endif /* CONFIG_SCHEDSTATS */ 57 58 #ifdef CONFIG_PSI 59 /* 60 * PSI tracks state that persists across sleeps, such as iowaits and 61 * memory stalls. As a result, it has to distinguish between sleeps, 62 * where a task's runnable state changes, and requeues, where a task 63 * and its state are being moved between CPUs and runqueues. 64 */ 65 static inline void psi_enqueue(struct task_struct *p, bool wakeup) 66 { 67 int clear = 0, set = TSK_RUNNING; 68 69 if (static_branch_likely(&psi_disabled)) 70 return; 71 72 if (!wakeup || p->sched_psi_wake_requeue) { 73 if (p->in_memstall) 74 set |= TSK_MEMSTALL; 75 if (p->sched_psi_wake_requeue) 76 p->sched_psi_wake_requeue = 0; 77 } else { 78 if (p->in_iowait) 79 clear |= TSK_IOWAIT; 80 } 81 82 psi_task_change(p, clear, set); 83 } 84 85 static inline void psi_dequeue(struct task_struct *p, bool sleep) 86 { 87 int clear = TSK_RUNNING; 88 89 if (static_branch_likely(&psi_disabled)) 90 return; 91 92 /* 93 * A voluntary sleep is a dequeue followed by a task switch. To 94 * avoid walking all ancestors twice, psi_task_switch() handles 95 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU. 96 * Do nothing here. 97 */ 98 if (sleep) 99 return; 100 101 if (p->in_memstall) 102 clear |= TSK_MEMSTALL; 103 104 psi_task_change(p, clear, 0); 105 } 106 107 static inline void psi_ttwu_dequeue(struct task_struct *p) 108 { 109 if (static_branch_likely(&psi_disabled)) 110 return; 111 /* 112 * Is the task being migrated during a wakeup? Make sure to 113 * deregister its sleep-persistent psi states from the old 114 * queue, and let psi_enqueue() know it has to requeue. 115 */ 116 if (unlikely(p->in_iowait || p->in_memstall)) { 117 struct rq_flags rf; 118 struct rq *rq; 119 int clear = 0; 120 121 if (p->in_iowait) 122 clear |= TSK_IOWAIT; 123 if (p->in_memstall) 124 clear |= TSK_MEMSTALL; 125 126 rq = __task_rq_lock(p, &rf); 127 psi_task_change(p, clear, 0); 128 p->sched_psi_wake_requeue = 1; 129 __task_rq_unlock(rq, &rf); 130 } 131 } 132 133 static inline void psi_sched_switch(struct task_struct *prev, 134 struct task_struct *next, 135 bool sleep) 136 { 137 if (static_branch_likely(&psi_disabled)) 138 return; 139 140 psi_task_switch(prev, next, sleep); 141 } 142 143 #else /* CONFIG_PSI */ 144 static inline void psi_enqueue(struct task_struct *p, bool wakeup) {} 145 static inline void psi_dequeue(struct task_struct *p, bool sleep) {} 146 static inline void psi_ttwu_dequeue(struct task_struct *p) {} 147 static inline void psi_sched_switch(struct task_struct *prev, 148 struct task_struct *next, 149 bool sleep) {} 150 #endif /* CONFIG_PSI */ 151 152 #ifdef CONFIG_SCHED_INFO 153 static inline void sched_info_reset_dequeued(struct task_struct *t) 154 { 155 t->sched_info.last_queued = 0; 156 } 157 158 /* 159 * We are interested in knowing how long it was from the *first* time a 160 * task was queued to the time that it finally hit a CPU, we call this routine 161 * from dequeue_task() to account for possible rq->clock skew across CPUs. The 162 * delta taken on each CPU would annul the skew. 163 */ 164 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) 165 { 166 unsigned long long now = rq_clock(rq), delta = 0; 167 168 if (sched_info_on()) { 169 if (t->sched_info.last_queued) 170 delta = now - t->sched_info.last_queued; 171 } 172 sched_info_reset_dequeued(t); 173 t->sched_info.run_delay += delta; 174 175 rq_sched_info_dequeued(rq, delta); 176 } 177 178 /* 179 * Called when a task finally hits the CPU. We can now calculate how 180 * long it was waiting to run. We also note when it began so that we 181 * can keep stats on how long its timeslice is. 182 */ 183 static void sched_info_arrive(struct rq *rq, struct task_struct *t) 184 { 185 unsigned long long now = rq_clock(rq), delta = 0; 186 187 if (t->sched_info.last_queued) 188 delta = now - t->sched_info.last_queued; 189 sched_info_reset_dequeued(t); 190 t->sched_info.run_delay += delta; 191 t->sched_info.last_arrival = now; 192 t->sched_info.pcount++; 193 194 rq_sched_info_arrive(rq, delta); 195 } 196 197 /* 198 * This function is only called from enqueue_task(), but also only updates 199 * the timestamp if it is already not set. It's assumed that 200 * sched_info_dequeued() will clear that stamp when appropriate. 201 */ 202 static inline void sched_info_queued(struct rq *rq, struct task_struct *t) 203 { 204 if (sched_info_on()) { 205 if (!t->sched_info.last_queued) 206 t->sched_info.last_queued = rq_clock(rq); 207 } 208 } 209 210 /* 211 * Called when a process ceases being the active-running process involuntarily 212 * due, typically, to expiring its time slice (this may also be called when 213 * switching to the idle task). Now we can calculate how long we ran. 214 * Also, if the process is still in the TASK_RUNNING state, call 215 * sched_info_queued() to mark that it has now again started waiting on 216 * the runqueue. 217 */ 218 static inline void sched_info_depart(struct rq *rq, struct task_struct *t) 219 { 220 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; 221 222 rq_sched_info_depart(rq, delta); 223 224 if (t->state == TASK_RUNNING) 225 sched_info_queued(rq, t); 226 } 227 228 /* 229 * Called when tasks are switched involuntarily due, typically, to expiring 230 * their time slice. (This may also be called when switching to or from 231 * the idle task.) We are only called when prev != next. 232 */ 233 static inline void 234 __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) 235 { 236 /* 237 * prev now departs the CPU. It's not interesting to record 238 * stats about how efficient we were at scheduling the idle 239 * process, however. 240 */ 241 if (prev != rq->idle) 242 sched_info_depart(rq, prev); 243 244 if (next != rq->idle) 245 sched_info_arrive(rq, next); 246 } 247 248 static inline void 249 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) 250 { 251 if (sched_info_on()) 252 __sched_info_switch(rq, prev, next); 253 } 254 255 #else /* !CONFIG_SCHED_INFO: */ 256 # define sched_info_queued(rq, t) do { } while (0) 257 # define sched_info_reset_dequeued(t) do { } while (0) 258 # define sched_info_dequeued(rq, t) do { } while (0) 259 # define sched_info_depart(rq, t) do { } while (0) 260 # define sched_info_arrive(rq, next) do { } while (0) 261 # define sched_info_switch(rq, t, next) do { } while (0) 262 #endif /* CONFIG_SCHED_INFO */ 263