xref: /linux/include/linux/sched/cputime.h (revision 905e46acd3272d04566fec49afbd7ad9e2ed9ae3)
1 #ifndef _LINUX_SCHED_CPUTIME_H
2 #define _LINUX_SCHED_CPUTIME_H
3 
4 #include <linux/sched/signal.h>
5 
6 /*
7  * cputime accounting APIs:
8  */
9 
10 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
11 #include <asm/cputime.h>
12 
13 #ifndef cputime_to_nsecs
14 # define cputime_to_nsecs(__ct)	\
15 	(cputime_to_usecs(__ct) * NSEC_PER_USEC)
16 #endif
17 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
18 
19 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
20 extern void task_cputime(struct task_struct *t,
21 			 u64 *utime, u64 *stime);
22 extern u64 task_gtime(struct task_struct *t);
23 #else
24 static inline void task_cputime(struct task_struct *t,
25 				u64 *utime, u64 *stime)
26 {
27 	*utime = t->utime;
28 	*stime = t->stime;
29 }
30 
31 static inline u64 task_gtime(struct task_struct *t)
32 {
33 	return t->gtime;
34 }
35 #endif
36 
37 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
38 static inline void task_cputime_scaled(struct task_struct *t,
39 				       u64 *utimescaled,
40 				       u64 *stimescaled)
41 {
42 	*utimescaled = t->utimescaled;
43 	*stimescaled = t->stimescaled;
44 }
45 #else
46 static inline void task_cputime_scaled(struct task_struct *t,
47 				       u64 *utimescaled,
48 				       u64 *stimescaled)
49 {
50 	task_cputime(t, utimescaled, stimescaled);
51 }
52 #endif
53 
54 extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
55 extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
56 
57 
58 /*
59  * Thread group CPU time accounting.
60  */
61 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
62 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
63 
64 
65 /*
66  * The following are functions that support scheduler-internal time accounting.
67  * These functions are generally called at the timer tick.  None of this depends
68  * on CONFIG_SCHEDSTATS.
69  */
70 
71 /**
72  * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running
73  *
74  * @tsk:	Pointer to target task.
75  */
76 #ifdef CONFIG_POSIX_TIMERS
77 static inline
78 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
79 {
80 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
81 
82 	/* Check if cputimer isn't running. This is accessed without locking. */
83 	if (!READ_ONCE(cputimer->running))
84 		return NULL;
85 
86 	/*
87 	 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
88 	 * in __exit_signal(), we won't account to the signal struct further
89 	 * cputime consumed by that task, even though the task can still be
90 	 * ticking after __exit_signal().
91 	 *
92 	 * In order to keep a consistent behaviour between thread group cputime
93 	 * and thread group cputimer accounting, lets also ignore the cputime
94 	 * elapsing after __exit_signal() in any thread group timer running.
95 	 *
96 	 * This makes sure that POSIX CPU clocks and timers are synchronized, so
97 	 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
98 	 * clock delta is behind the expiring timer value.
99 	 */
100 	if (unlikely(!tsk->sighand))
101 		return NULL;
102 
103 	return cputimer;
104 }
105 #else
106 static inline
107 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
108 {
109 	return NULL;
110 }
111 #endif
112 
113 /**
114  * account_group_user_time - Maintain utime for a thread group.
115  *
116  * @tsk:	Pointer to task structure.
117  * @cputime:	Time value by which to increment the utime field of the
118  *		thread_group_cputime structure.
119  *
120  * If thread group time is being maintained, get the structure for the
121  * running CPU and update the utime field there.
122  */
123 static inline void account_group_user_time(struct task_struct *tsk,
124 					   u64 cputime)
125 {
126 	struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
127 
128 	if (!cputimer)
129 		return;
130 
131 	atomic64_add(cputime, &cputimer->cputime_atomic.utime);
132 }
133 
134 /**
135  * account_group_system_time - Maintain stime for a thread group.
136  *
137  * @tsk:	Pointer to task structure.
138  * @cputime:	Time value by which to increment the stime field of the
139  *		thread_group_cputime structure.
140  *
141  * If thread group time is being maintained, get the structure for the
142  * running CPU and update the stime field there.
143  */
144 static inline void account_group_system_time(struct task_struct *tsk,
145 					     u64 cputime)
146 {
147 	struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
148 
149 	if (!cputimer)
150 		return;
151 
152 	atomic64_add(cputime, &cputimer->cputime_atomic.stime);
153 }
154 
155 /**
156  * account_group_exec_runtime - Maintain exec runtime for a thread group.
157  *
158  * @tsk:	Pointer to task structure.
159  * @ns:		Time value by which to increment the sum_exec_runtime field
160  *		of the thread_group_cputime structure.
161  *
162  * If thread group time is being maintained, get the structure for the
163  * running CPU and update the sum_exec_runtime field there.
164  */
165 static inline void account_group_exec_runtime(struct task_struct *tsk,
166 					      unsigned long long ns)
167 {
168 	struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
169 
170 	if (!cputimer)
171 		return;
172 
173 	atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
174 }
175 
176 static inline void prev_cputime_init(struct prev_cputime *prev)
177 {
178 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
179 	prev->utime = prev->stime = 0;
180 	raw_spin_lock_init(&prev->lock);
181 #endif
182 }
183 
184 extern unsigned long long
185 task_sched_runtime(struct task_struct *task);
186 
187 #endif /* _LINUX_SCHED_CPUTIME_H */
188