1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_CPUTIME_H 3 #define _LINUX_SCHED_CPUTIME_H 4 5 #include <linux/sched/signal.h> 6 7 /* 8 * cputime accounting APIs: 9 */ 10 11 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 12 #include <asm/cputime.h> 13 14 #ifndef cputime_to_nsecs 15 # define cputime_to_nsecs(__ct) \ 16 (cputime_to_usecs(__ct) * NSEC_PER_USEC) 17 #endif 18 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 19 20 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 21 extern void task_cputime(struct task_struct *t, 22 u64 *utime, u64 *stime); 23 extern u64 task_gtime(struct task_struct *t); 24 #else 25 static inline void task_cputime(struct task_struct *t, 26 u64 *utime, u64 *stime) 27 { 28 *utime = t->utime; 29 *stime = t->stime; 30 } 31 32 static inline u64 task_gtime(struct task_struct *t) 33 { 34 return t->gtime; 35 } 36 #endif 37 38 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 39 static inline void task_cputime_scaled(struct task_struct *t, 40 u64 *utimescaled, 41 u64 *stimescaled) 42 { 43 *utimescaled = t->utimescaled; 44 *stimescaled = t->stimescaled; 45 } 46 #else 47 static inline void task_cputime_scaled(struct task_struct *t, 48 u64 *utimescaled, 49 u64 *stimescaled) 50 { 51 task_cputime(t, utimescaled, stimescaled); 52 } 53 #endif 54 55 extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); 56 extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); 57 58 59 /* 60 * Thread group CPU time accounting. 61 */ 62 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 63 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 64 65 66 /* 67 * The following are functions that support scheduler-internal time accounting. 68 * These functions are generally called at the timer tick. None of this depends 69 * on CONFIG_SCHEDSTATS. 70 */ 71 72 /** 73 * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running 74 * 75 * @tsk: Pointer to target task. 76 */ 77 #ifdef CONFIG_POSIX_TIMERS 78 static inline 79 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) 80 { 81 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 82 83 /* Check if cputimer isn't running. This is accessed without locking. */ 84 if (!READ_ONCE(cputimer->running)) 85 return NULL; 86 87 /* 88 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime 89 * in __exit_signal(), we won't account to the signal struct further 90 * cputime consumed by that task, even though the task can still be 91 * ticking after __exit_signal(). 92 * 93 * In order to keep a consistent behaviour between thread group cputime 94 * and thread group cputimer accounting, lets also ignore the cputime 95 * elapsing after __exit_signal() in any thread group timer running. 96 * 97 * This makes sure that POSIX CPU clocks and timers are synchronized, so 98 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU 99 * clock delta is behind the expiring timer value. 100 */ 101 if (unlikely(!tsk->sighand)) 102 return NULL; 103 104 return cputimer; 105 } 106 #else 107 static inline 108 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) 109 { 110 return NULL; 111 } 112 #endif 113 114 /** 115 * account_group_user_time - Maintain utime for a thread group. 116 * 117 * @tsk: Pointer to task structure. 118 * @cputime: Time value by which to increment the utime field of the 119 * thread_group_cputime structure. 120 * 121 * If thread group time is being maintained, get the structure for the 122 * running CPU and update the utime field there. 123 */ 124 static inline void account_group_user_time(struct task_struct *tsk, 125 u64 cputime) 126 { 127 struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); 128 129 if (!cputimer) 130 return; 131 132 atomic64_add(cputime, &cputimer->cputime_atomic.utime); 133 } 134 135 /** 136 * account_group_system_time - Maintain stime for a thread group. 137 * 138 * @tsk: Pointer to task structure. 139 * @cputime: Time value by which to increment the stime field of the 140 * thread_group_cputime structure. 141 * 142 * If thread group time is being maintained, get the structure for the 143 * running CPU and update the stime field there. 144 */ 145 static inline void account_group_system_time(struct task_struct *tsk, 146 u64 cputime) 147 { 148 struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); 149 150 if (!cputimer) 151 return; 152 153 atomic64_add(cputime, &cputimer->cputime_atomic.stime); 154 } 155 156 /** 157 * account_group_exec_runtime - Maintain exec runtime for a thread group. 158 * 159 * @tsk: Pointer to task structure. 160 * @ns: Time value by which to increment the sum_exec_runtime field 161 * of the thread_group_cputime structure. 162 * 163 * If thread group time is being maintained, get the structure for the 164 * running CPU and update the sum_exec_runtime field there. 165 */ 166 static inline void account_group_exec_runtime(struct task_struct *tsk, 167 unsigned long long ns) 168 { 169 struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); 170 171 if (!cputimer) 172 return; 173 174 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); 175 } 176 177 static inline void prev_cputime_init(struct prev_cputime *prev) 178 { 179 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 180 prev->utime = prev->stime = 0; 181 raw_spin_lock_init(&prev->lock); 182 #endif 183 } 184 185 extern unsigned long long 186 task_sched_runtime(struct task_struct *task); 187 188 #endif /* _LINUX_SCHED_CPUTIME_H */ 189