1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_CPUTIME_H 3 #define _LINUX_SCHED_CPUTIME_H 4 5 #include <linux/sched/signal.h> 6 7 /* 8 * cputime accounting APIs: 9 */ 10 11 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 12 #include <asm/cputime.h> 13 14 #ifndef cputime_to_nsecs 15 # define cputime_to_nsecs(__ct) \ 16 (cputime_to_usecs(__ct) * NSEC_PER_USEC) 17 #endif 18 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 19 20 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 21 extern void task_cputime(struct task_struct *t, 22 u64 *utime, u64 *stime); 23 extern u64 task_gtime(struct task_struct *t); 24 #else 25 static inline void task_cputime(struct task_struct *t, 26 u64 *utime, u64 *stime) 27 { 28 *utime = t->utime; 29 *stime = t->stime; 30 } 31 32 static inline u64 task_gtime(struct task_struct *t) 33 { 34 return t->gtime; 35 } 36 #endif 37 38 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 39 static inline void task_cputime_scaled(struct task_struct *t, 40 u64 *utimescaled, 41 u64 *stimescaled) 42 { 43 *utimescaled = t->utimescaled; 44 *stimescaled = t->stimescaled; 45 } 46 #else 47 static inline void task_cputime_scaled(struct task_struct *t, 48 u64 *utimescaled, 49 u64 *stimescaled) 50 { 51 task_cputime(t, utimescaled, stimescaled); 52 } 53 #endif 54 55 extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); 56 extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); 57 extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, 58 u64 *ut, u64 *st); 59 60 /* 61 * Thread group CPU time accounting. 62 */ 63 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 64 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 65 66 67 /* 68 * The following are functions that support scheduler-internal time accounting. 69 * These functions are generally called at the timer tick. None of this depends 70 * on CONFIG_SCHEDSTATS. 71 */ 72 73 /** 74 * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running 75 * 76 * @tsk: Pointer to target task. 77 */ 78 #ifdef CONFIG_POSIX_TIMERS 79 static inline 80 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) 81 { 82 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 83 84 /* Check if cputimer isn't running. This is accessed without locking. */ 85 if (!READ_ONCE(cputimer->running)) 86 return NULL; 87 88 /* 89 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime 90 * in __exit_signal(), we won't account to the signal struct further 91 * cputime consumed by that task, even though the task can still be 92 * ticking after __exit_signal(). 93 * 94 * In order to keep a consistent behaviour between thread group cputime 95 * and thread group cputimer accounting, lets also ignore the cputime 96 * elapsing after __exit_signal() in any thread group timer running. 97 * 98 * This makes sure that POSIX CPU clocks and timers are synchronized, so 99 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU 100 * clock delta is behind the expiring timer value. 101 */ 102 if (unlikely(!tsk->sighand)) 103 return NULL; 104 105 return cputimer; 106 } 107 #else 108 static inline 109 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) 110 { 111 return NULL; 112 } 113 #endif 114 115 /** 116 * account_group_user_time - Maintain utime for a thread group. 117 * 118 * @tsk: Pointer to task structure. 119 * @cputime: Time value by which to increment the utime field of the 120 * thread_group_cputime structure. 121 * 122 * If thread group time is being maintained, get the structure for the 123 * running CPU and update the utime field there. 124 */ 125 static inline void account_group_user_time(struct task_struct *tsk, 126 u64 cputime) 127 { 128 struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); 129 130 if (!cputimer) 131 return; 132 133 atomic64_add(cputime, &cputimer->cputime_atomic.utime); 134 } 135 136 /** 137 * account_group_system_time - Maintain stime for a thread group. 138 * 139 * @tsk: Pointer to task structure. 140 * @cputime: Time value by which to increment the stime field of the 141 * thread_group_cputime structure. 142 * 143 * If thread group time is being maintained, get the structure for the 144 * running CPU and update the stime field there. 145 */ 146 static inline void account_group_system_time(struct task_struct *tsk, 147 u64 cputime) 148 { 149 struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); 150 151 if (!cputimer) 152 return; 153 154 atomic64_add(cputime, &cputimer->cputime_atomic.stime); 155 } 156 157 /** 158 * account_group_exec_runtime - Maintain exec runtime for a thread group. 159 * 160 * @tsk: Pointer to task structure. 161 * @ns: Time value by which to increment the sum_exec_runtime field 162 * of the thread_group_cputime structure. 163 * 164 * If thread group time is being maintained, get the structure for the 165 * running CPU and update the sum_exec_runtime field there. 166 */ 167 static inline void account_group_exec_runtime(struct task_struct *tsk, 168 unsigned long long ns) 169 { 170 struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); 171 172 if (!cputimer) 173 return; 174 175 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); 176 } 177 178 static inline void prev_cputime_init(struct prev_cputime *prev) 179 { 180 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 181 prev->utime = prev->stime = 0; 182 raw_spin_lock_init(&prev->lock); 183 #endif 184 } 185 186 extern unsigned long long 187 task_sched_runtime(struct task_struct *task); 188 189 #endif /* _LINUX_SCHED_CPUTIME_H */ 190