1 2 #ifdef CONFIG_SCHEDSTATS 3 4 /* 5 * Expects runqueue lock to be held for atomicity of update 6 */ 7 static inline void 8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 9 { 10 if (rq) { 11 rq->rq_sched_info.run_delay += delta; 12 rq->rq_sched_info.pcount++; 13 } 14 } 15 16 /* 17 * Expects runqueue lock to be held for atomicity of update 18 */ 19 static inline void 20 rq_sched_info_depart(struct rq *rq, unsigned long long delta) 21 { 22 if (rq) 23 rq->rq_cpu_time += delta; 24 } 25 26 static inline void 27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 28 { 29 if (rq) 30 rq->rq_sched_info.run_delay += delta; 31 } 32 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) 33 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) 34 # define schedstat_set(var, val) do { var = (val); } while (0) 35 #else /* !CONFIG_SCHEDSTATS */ 36 static inline void 37 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 38 {} 39 static inline void 40 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 41 {} 42 static inline void 43 rq_sched_info_depart(struct rq *rq, unsigned long long delta) 44 {} 45 # define schedstat_inc(rq, field) do { } while (0) 46 # define schedstat_add(rq, field, amt) do { } while (0) 47 # define schedstat_set(var, val) do { } while (0) 48 #endif 49 50 #ifdef CONFIG_SCHED_INFO 51 static inline void sched_info_reset_dequeued(struct task_struct *t) 52 { 53 t->sched_info.last_queued = 0; 54 } 55 56 /* 57 * We are interested in knowing how long it was from the *first* time a 58 * task was queued to the time that it finally hit a cpu, we call this routine 59 * from dequeue_task() to account for possible rq->clock skew across cpus. The 60 * delta taken on each cpu would annul the skew. 61 */ 62 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) 63 { 64 unsigned long long now = rq_clock(rq), delta = 0; 65 66 if (unlikely(sched_info_on())) 67 if (t->sched_info.last_queued) 68 delta = now - t->sched_info.last_queued; 69 sched_info_reset_dequeued(t); 70 t->sched_info.run_delay += delta; 71 72 rq_sched_info_dequeued(rq, delta); 73 } 74 75 /* 76 * Called when a task finally hits the cpu. We can now calculate how 77 * long it was waiting to run. We also note when it began so that we 78 * can keep stats on how long its timeslice is. 79 */ 80 static void sched_info_arrive(struct rq *rq, struct task_struct *t) 81 { 82 unsigned long long now = rq_clock(rq), delta = 0; 83 84 if (t->sched_info.last_queued) 85 delta = now - t->sched_info.last_queued; 86 sched_info_reset_dequeued(t); 87 t->sched_info.run_delay += delta; 88 t->sched_info.last_arrival = now; 89 t->sched_info.pcount++; 90 91 rq_sched_info_arrive(rq, delta); 92 } 93 94 /* 95 * This function is only called from enqueue_task(), but also only updates 96 * the timestamp if it is already not set. It's assumed that 97 * sched_info_dequeued() will clear that stamp when appropriate. 98 */ 99 static inline void sched_info_queued(struct rq *rq, struct task_struct *t) 100 { 101 if (unlikely(sched_info_on())) 102 if (!t->sched_info.last_queued) 103 t->sched_info.last_queued = rq_clock(rq); 104 } 105 106 /* 107 * Called when a process ceases being the active-running process involuntarily 108 * due, typically, to expiring its time slice (this may also be called when 109 * switching to the idle task). Now we can calculate how long we ran. 110 * Also, if the process is still in the TASK_RUNNING state, call 111 * sched_info_queued() to mark that it has now again started waiting on 112 * the runqueue. 113 */ 114 static inline void sched_info_depart(struct rq *rq, struct task_struct *t) 115 { 116 unsigned long long delta = rq_clock(rq) - 117 t->sched_info.last_arrival; 118 119 rq_sched_info_depart(rq, delta); 120 121 if (t->state == TASK_RUNNING) 122 sched_info_queued(rq, t); 123 } 124 125 /* 126 * Called when tasks are switched involuntarily due, typically, to expiring 127 * their time slice. (This may also be called when switching to or from 128 * the idle task.) We are only called when prev != next. 129 */ 130 static inline void 131 __sched_info_switch(struct rq *rq, 132 struct task_struct *prev, struct task_struct *next) 133 { 134 /* 135 * prev now departs the cpu. It's not interesting to record 136 * stats about how efficient we were at scheduling the idle 137 * process, however. 138 */ 139 if (prev != rq->idle) 140 sched_info_depart(rq, prev); 141 142 if (next != rq->idle) 143 sched_info_arrive(rq, next); 144 } 145 static inline void 146 sched_info_switch(struct rq *rq, 147 struct task_struct *prev, struct task_struct *next) 148 { 149 if (unlikely(sched_info_on())) 150 __sched_info_switch(rq, prev, next); 151 } 152 #else 153 #define sched_info_queued(rq, t) do { } while (0) 154 #define sched_info_reset_dequeued(t) do { } while (0) 155 #define sched_info_dequeued(rq, t) do { } while (0) 156 #define sched_info_depart(rq, t) do { } while (0) 157 #define sched_info_arrive(rq, next) do { } while (0) 158 #define sched_info_switch(rq, t, next) do { } while (0) 159 #endif /* CONFIG_SCHED_INFO */ 160 161 /* 162 * The following are functions that support scheduler-internal time accounting. 163 * These functions are generally called at the timer tick. None of this depends 164 * on CONFIG_SCHEDSTATS. 165 */ 166 167 /** 168 * cputimer_running - return true if cputimer is running 169 * 170 * @tsk: Pointer to target task. 171 */ 172 static inline bool cputimer_running(struct task_struct *tsk) 173 174 { 175 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 176 177 /* Check if cputimer isn't running. This is accessed without locking. */ 178 if (!READ_ONCE(cputimer->running)) 179 return false; 180 181 /* 182 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime 183 * in __exit_signal(), we won't account to the signal struct further 184 * cputime consumed by that task, even though the task can still be 185 * ticking after __exit_signal(). 186 * 187 * In order to keep a consistent behaviour between thread group cputime 188 * and thread group cputimer accounting, lets also ignore the cputime 189 * elapsing after __exit_signal() in any thread group timer running. 190 * 191 * This makes sure that POSIX CPU clocks and timers are synchronized, so 192 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU 193 * clock delta is behind the expiring timer value. 194 */ 195 if (unlikely(!tsk->sighand)) 196 return false; 197 198 return true; 199 } 200 201 /** 202 * account_group_user_time - Maintain utime for a thread group. 203 * 204 * @tsk: Pointer to task structure. 205 * @cputime: Time value by which to increment the utime field of the 206 * thread_group_cputime structure. 207 * 208 * If thread group time is being maintained, get the structure for the 209 * running CPU and update the utime field there. 210 */ 211 static inline void account_group_user_time(struct task_struct *tsk, 212 cputime_t cputime) 213 { 214 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 215 216 if (!cputimer_running(tsk)) 217 return; 218 219 atomic64_add(cputime, &cputimer->cputime_atomic.utime); 220 } 221 222 /** 223 * account_group_system_time - Maintain stime for a thread group. 224 * 225 * @tsk: Pointer to task structure. 226 * @cputime: Time value by which to increment the stime field of the 227 * thread_group_cputime structure. 228 * 229 * If thread group time is being maintained, get the structure for the 230 * running CPU and update the stime field there. 231 */ 232 static inline void account_group_system_time(struct task_struct *tsk, 233 cputime_t cputime) 234 { 235 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 236 237 if (!cputimer_running(tsk)) 238 return; 239 240 atomic64_add(cputime, &cputimer->cputime_atomic.stime); 241 } 242 243 /** 244 * account_group_exec_runtime - Maintain exec runtime for a thread group. 245 * 246 * @tsk: Pointer to task structure. 247 * @ns: Time value by which to increment the sum_exec_runtime field 248 * of the thread_group_cputime structure. 249 * 250 * If thread group time is being maintained, get the structure for the 251 * running CPU and update the sum_exec_runtime field there. 252 */ 253 static inline void account_group_exec_runtime(struct task_struct *tsk, 254 unsigned long long ns) 255 { 256 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 257 258 if (!cputimer_running(tsk)) 259 return; 260 261 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); 262 } 263