1 2 #ifdef CONFIG_SCHEDSTATS 3 4 /* 5 * Expects runqueue lock to be held for atomicity of update 6 */ 7 static inline void 8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 9 { 10 if (rq) { 11 rq->rq_sched_info.run_delay += delta; 12 rq->rq_sched_info.pcount++; 13 } 14 } 15 16 /* 17 * Expects runqueue lock to be held for atomicity of update 18 */ 19 static inline void 20 rq_sched_info_depart(struct rq *rq, unsigned long long delta) 21 { 22 if (rq) 23 rq->rq_cpu_time += delta; 24 } 25 26 static inline void 27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 28 { 29 if (rq) 30 rq->rq_sched_info.run_delay += delta; 31 } 32 # define schedstat_enabled() static_branch_unlikely(&sched_schedstats) 33 # define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) 34 # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) 35 # define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) 36 # define schedstat_val(rq, field) ((schedstat_enabled()) ? (rq)->field : 0) 37 38 #else /* !CONFIG_SCHEDSTATS */ 39 static inline void 40 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 41 {} 42 static inline void 43 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 44 {} 45 static inline void 46 rq_sched_info_depart(struct rq *rq, unsigned long long delta) 47 {} 48 # define schedstat_enabled() 0 49 # define schedstat_inc(rq, field) do { } while (0) 50 # define schedstat_add(rq, field, amt) do { } while (0) 51 # define schedstat_set(var, val) do { } while (0) 52 # define schedstat_val(rq, field) 0 53 #endif 54 55 #ifdef CONFIG_SCHED_INFO 56 static inline void sched_info_reset_dequeued(struct task_struct *t) 57 { 58 t->sched_info.last_queued = 0; 59 } 60 61 /* 62 * We are interested in knowing how long it was from the *first* time a 63 * task was queued to the time that it finally hit a cpu, we call this routine 64 * from dequeue_task() to account for possible rq->clock skew across cpus. The 65 * delta taken on each cpu would annul the skew. 66 */ 67 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) 68 { 69 unsigned long long now = rq_clock(rq), delta = 0; 70 71 if (unlikely(sched_info_on())) 72 if (t->sched_info.last_queued) 73 delta = now - t->sched_info.last_queued; 74 sched_info_reset_dequeued(t); 75 t->sched_info.run_delay += delta; 76 77 rq_sched_info_dequeued(rq, delta); 78 } 79 80 /* 81 * Called when a task finally hits the cpu. We can now calculate how 82 * long it was waiting to run. We also note when it began so that we 83 * can keep stats on how long its timeslice is. 84 */ 85 static void sched_info_arrive(struct rq *rq, struct task_struct *t) 86 { 87 unsigned long long now = rq_clock(rq), delta = 0; 88 89 if (t->sched_info.last_queued) 90 delta = now - t->sched_info.last_queued; 91 sched_info_reset_dequeued(t); 92 t->sched_info.run_delay += delta; 93 t->sched_info.last_arrival = now; 94 t->sched_info.pcount++; 95 96 rq_sched_info_arrive(rq, delta); 97 } 98 99 /* 100 * This function is only called from enqueue_task(), but also only updates 101 * the timestamp if it is already not set. It's assumed that 102 * sched_info_dequeued() will clear that stamp when appropriate. 103 */ 104 static inline void sched_info_queued(struct rq *rq, struct task_struct *t) 105 { 106 if (unlikely(sched_info_on())) 107 if (!t->sched_info.last_queued) 108 t->sched_info.last_queued = rq_clock(rq); 109 } 110 111 /* 112 * Called when a process ceases being the active-running process involuntarily 113 * due, typically, to expiring its time slice (this may also be called when 114 * switching to the idle task). Now we can calculate how long we ran. 115 * Also, if the process is still in the TASK_RUNNING state, call 116 * sched_info_queued() to mark that it has now again started waiting on 117 * the runqueue. 118 */ 119 static inline void sched_info_depart(struct rq *rq, struct task_struct *t) 120 { 121 unsigned long long delta = rq_clock(rq) - 122 t->sched_info.last_arrival; 123 124 rq_sched_info_depart(rq, delta); 125 126 if (t->state == TASK_RUNNING) 127 sched_info_queued(rq, t); 128 } 129 130 /* 131 * Called when tasks are switched involuntarily due, typically, to expiring 132 * their time slice. (This may also be called when switching to or from 133 * the idle task.) We are only called when prev != next. 134 */ 135 static inline void 136 __sched_info_switch(struct rq *rq, 137 struct task_struct *prev, struct task_struct *next) 138 { 139 /* 140 * prev now departs the cpu. It's not interesting to record 141 * stats about how efficient we were at scheduling the idle 142 * process, however. 143 */ 144 if (prev != rq->idle) 145 sched_info_depart(rq, prev); 146 147 if (next != rq->idle) 148 sched_info_arrive(rq, next); 149 } 150 static inline void 151 sched_info_switch(struct rq *rq, 152 struct task_struct *prev, struct task_struct *next) 153 { 154 if (unlikely(sched_info_on())) 155 __sched_info_switch(rq, prev, next); 156 } 157 #else 158 #define sched_info_queued(rq, t) do { } while (0) 159 #define sched_info_reset_dequeued(t) do { } while (0) 160 #define sched_info_dequeued(rq, t) do { } while (0) 161 #define sched_info_depart(rq, t) do { } while (0) 162 #define sched_info_arrive(rq, next) do { } while (0) 163 #define sched_info_switch(rq, t, next) do { } while (0) 164 #endif /* CONFIG_SCHED_INFO */ 165 166 /* 167 * The following are functions that support scheduler-internal time accounting. 168 * These functions are generally called at the timer tick. None of this depends 169 * on CONFIG_SCHEDSTATS. 170 */ 171 172 /** 173 * cputimer_running - return true if cputimer is running 174 * 175 * @tsk: Pointer to target task. 176 */ 177 static inline bool cputimer_running(struct task_struct *tsk) 178 179 { 180 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 181 182 /* Check if cputimer isn't running. This is accessed without locking. */ 183 if (!READ_ONCE(cputimer->running)) 184 return false; 185 186 /* 187 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime 188 * in __exit_signal(), we won't account to the signal struct further 189 * cputime consumed by that task, even though the task can still be 190 * ticking after __exit_signal(). 191 * 192 * In order to keep a consistent behaviour between thread group cputime 193 * and thread group cputimer accounting, lets also ignore the cputime 194 * elapsing after __exit_signal() in any thread group timer running. 195 * 196 * This makes sure that POSIX CPU clocks and timers are synchronized, so 197 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU 198 * clock delta is behind the expiring timer value. 199 */ 200 if (unlikely(!tsk->sighand)) 201 return false; 202 203 return true; 204 } 205 206 /** 207 * account_group_user_time - Maintain utime for a thread group. 208 * 209 * @tsk: Pointer to task structure. 210 * @cputime: Time value by which to increment the utime field of the 211 * thread_group_cputime structure. 212 * 213 * If thread group time is being maintained, get the structure for the 214 * running CPU and update the utime field there. 215 */ 216 static inline void account_group_user_time(struct task_struct *tsk, 217 cputime_t cputime) 218 { 219 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 220 221 if (!cputimer_running(tsk)) 222 return; 223 224 atomic64_add(cputime, &cputimer->cputime_atomic.utime); 225 } 226 227 /** 228 * account_group_system_time - Maintain stime for a thread group. 229 * 230 * @tsk: Pointer to task structure. 231 * @cputime: Time value by which to increment the stime field of the 232 * thread_group_cputime structure. 233 * 234 * If thread group time is being maintained, get the structure for the 235 * running CPU and update the stime field there. 236 */ 237 static inline void account_group_system_time(struct task_struct *tsk, 238 cputime_t cputime) 239 { 240 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 241 242 if (!cputimer_running(tsk)) 243 return; 244 245 atomic64_add(cputime, &cputimer->cputime_atomic.stime); 246 } 247 248 /** 249 * account_group_exec_runtime - Maintain exec runtime for a thread group. 250 * 251 * @tsk: Pointer to task structure. 252 * @ns: Time value by which to increment the sum_exec_runtime field 253 * of the thread_group_cputime structure. 254 * 255 * If thread group time is being maintained, get the structure for the 256 * running CPU and update the sum_exec_runtime field there. 257 */ 258 static inline void account_group_exec_runtime(struct task_struct *tsk, 259 unsigned long long ns) 260 { 261 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 262 263 if (!cputimer_running(tsk)) 264 return; 265 266 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); 267 } 268