1 2 #ifdef CONFIG_SCHEDSTATS 3 4 /* 5 * Expects runqueue lock to be held for atomicity of update 6 */ 7 static inline void 8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 9 { 10 if (rq) { 11 rq->rq_sched_info.run_delay += delta; 12 rq->rq_sched_info.pcount++; 13 } 14 } 15 16 /* 17 * Expects runqueue lock to be held for atomicity of update 18 */ 19 static inline void 20 rq_sched_info_depart(struct rq *rq, unsigned long long delta) 21 { 22 if (rq) 23 rq->rq_cpu_time += delta; 24 } 25 26 static inline void 27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 28 { 29 if (rq) 30 rq->rq_sched_info.run_delay += delta; 31 } 32 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) 33 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) 34 # define schedstat_set(var, val) do { var = (val); } while (0) 35 #else /* !CONFIG_SCHEDSTATS */ 36 static inline void 37 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 38 {} 39 static inline void 40 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 41 {} 42 static inline void 43 rq_sched_info_depart(struct rq *rq, unsigned long long delta) 44 {} 45 # define schedstat_inc(rq, field) do { } while (0) 46 # define schedstat_add(rq, field, amt) do { } while (0) 47 # define schedstat_set(var, val) do { } while (0) 48 #endif 49 50 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 51 static inline void sched_info_reset_dequeued(struct task_struct *t) 52 { 53 t->sched_info.last_queued = 0; 54 } 55 56 /* 57 * We are interested in knowing how long it was from the *first* time a 58 * task was queued to the time that it finally hit a cpu, we call this routine 59 * from dequeue_task() to account for possible rq->clock skew across cpus. The 60 * delta taken on each cpu would annul the skew. 61 */ 62 static inline void sched_info_dequeued(struct task_struct *t) 63 { 64 unsigned long long now = rq_clock(task_rq(t)), delta = 0; 65 66 if (unlikely(sched_info_on())) 67 if (t->sched_info.last_queued) 68 delta = now - t->sched_info.last_queued; 69 sched_info_reset_dequeued(t); 70 t->sched_info.run_delay += delta; 71 72 rq_sched_info_dequeued(task_rq(t), delta); 73 } 74 75 /* 76 * Called when a task finally hits the cpu. We can now calculate how 77 * long it was waiting to run. We also note when it began so that we 78 * can keep stats on how long its timeslice is. 79 */ 80 static void sched_info_arrive(struct task_struct *t) 81 { 82 unsigned long long now = rq_clock(task_rq(t)), delta = 0; 83 84 if (t->sched_info.last_queued) 85 delta = now - t->sched_info.last_queued; 86 sched_info_reset_dequeued(t); 87 t->sched_info.run_delay += delta; 88 t->sched_info.last_arrival = now; 89 t->sched_info.pcount++; 90 91 rq_sched_info_arrive(task_rq(t), delta); 92 } 93 94 /* 95 * This function is only called from enqueue_task(), but also only updates 96 * the timestamp if it is already not set. It's assumed that 97 * sched_info_dequeued() will clear that stamp when appropriate. 98 */ 99 static inline void sched_info_queued(struct task_struct *t) 100 { 101 if (unlikely(sched_info_on())) 102 if (!t->sched_info.last_queued) 103 t->sched_info.last_queued = rq_clock(task_rq(t)); 104 } 105 106 /* 107 * Called when a process ceases being the active-running process, either 108 * voluntarily or involuntarily. Now we can calculate how long we ran. 109 * Also, if the process is still in the TASK_RUNNING state, call 110 * sched_info_queued() to mark that it has now again started waiting on 111 * the runqueue. 112 */ 113 static inline void sched_info_depart(struct task_struct *t) 114 { 115 unsigned long long delta = rq_clock(task_rq(t)) - 116 t->sched_info.last_arrival; 117 118 rq_sched_info_depart(task_rq(t), delta); 119 120 if (t->state == TASK_RUNNING) 121 sched_info_queued(t); 122 } 123 124 /* 125 * Called when tasks are switched involuntarily due, typically, to expiring 126 * their time slice. (This may also be called when switching to or from 127 * the idle task.) We are only called when prev != next. 128 */ 129 static inline void 130 __sched_info_switch(struct task_struct *prev, struct task_struct *next) 131 { 132 struct rq *rq = task_rq(prev); 133 134 /* 135 * prev now departs the cpu. It's not interesting to record 136 * stats about how efficient we were at scheduling the idle 137 * process, however. 138 */ 139 if (prev != rq->idle) 140 sched_info_depart(prev); 141 142 if (next != rq->idle) 143 sched_info_arrive(next); 144 } 145 static inline void 146 sched_info_switch(struct task_struct *prev, struct task_struct *next) 147 { 148 if (unlikely(sched_info_on())) 149 __sched_info_switch(prev, next); 150 } 151 #else 152 #define sched_info_queued(t) do { } while (0) 153 #define sched_info_reset_dequeued(t) do { } while (0) 154 #define sched_info_dequeued(t) do { } while (0) 155 #define sched_info_switch(t, next) do { } while (0) 156 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ 157 158 /* 159 * The following are functions that support scheduler-internal time accounting. 160 * These functions are generally called at the timer tick. None of this depends 161 * on CONFIG_SCHEDSTATS. 162 */ 163 164 /** 165 * cputimer_running - return true if cputimer is running 166 * 167 * @tsk: Pointer to target task. 168 */ 169 static inline bool cputimer_running(struct task_struct *tsk) 170 171 { 172 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 173 174 if (!cputimer->running) 175 return false; 176 177 /* 178 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime 179 * in __exit_signal(), we won't account to the signal struct further 180 * cputime consumed by that task, even though the task can still be 181 * ticking after __exit_signal(). 182 * 183 * In order to keep a consistent behaviour between thread group cputime 184 * and thread group cputimer accounting, lets also ignore the cputime 185 * elapsing after __exit_signal() in any thread group timer running. 186 * 187 * This makes sure that POSIX CPU clocks and timers are synchronized, so 188 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU 189 * clock delta is behind the expiring timer value. 190 */ 191 if (unlikely(!tsk->sighand)) 192 return false; 193 194 return true; 195 } 196 197 /** 198 * account_group_user_time - Maintain utime for a thread group. 199 * 200 * @tsk: Pointer to task structure. 201 * @cputime: Time value by which to increment the utime field of the 202 * thread_group_cputime structure. 203 * 204 * If thread group time is being maintained, get the structure for the 205 * running CPU and update the utime field there. 206 */ 207 static inline void account_group_user_time(struct task_struct *tsk, 208 cputime_t cputime) 209 { 210 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 211 212 if (!cputimer_running(tsk)) 213 return; 214 215 raw_spin_lock(&cputimer->lock); 216 cputimer->cputime.utime += cputime; 217 raw_spin_unlock(&cputimer->lock); 218 } 219 220 /** 221 * account_group_system_time - Maintain stime for a thread group. 222 * 223 * @tsk: Pointer to task structure. 224 * @cputime: Time value by which to increment the stime field of the 225 * thread_group_cputime structure. 226 * 227 * If thread group time is being maintained, get the structure for the 228 * running CPU and update the stime field there. 229 */ 230 static inline void account_group_system_time(struct task_struct *tsk, 231 cputime_t cputime) 232 { 233 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 234 235 if (!cputimer_running(tsk)) 236 return; 237 238 raw_spin_lock(&cputimer->lock); 239 cputimer->cputime.stime += cputime; 240 raw_spin_unlock(&cputimer->lock); 241 } 242 243 /** 244 * account_group_exec_runtime - Maintain exec runtime for a thread group. 245 * 246 * @tsk: Pointer to task structure. 247 * @ns: Time value by which to increment the sum_exec_runtime field 248 * of the thread_group_cputime structure. 249 * 250 * If thread group time is being maintained, get the structure for the 251 * running CPU and update the sum_exec_runtime field there. 252 */ 253 static inline void account_group_exec_runtime(struct task_struct *tsk, 254 unsigned long long ns) 255 { 256 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 257 258 if (!cputimer_running(tsk)) 259 return; 260 261 raw_spin_lock(&cputimer->lock); 262 cputimer->cputime.sum_exec_runtime += ns; 263 raw_spin_unlock(&cputimer->lock); 264 } 265