xref: /linux/include/linux/hrtimer.h (revision 353a7e8a69058591c3ec40028063af798b698559)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  hrtimers - High-resolution kernel timers
4  *
5  *   Copyright(C) 2005, Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
6  *   Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
7  *
8  *  data type definitions, declarations, prototypes
9  *
10  *  Started by: Thomas Gleixner and Ingo Molnar
11  */
12 #ifndef _LINUX_HRTIMER_H
13 #define _LINUX_HRTIMER_H
14 
15 #include <linux/hrtimer_defs.h>
16 #include <linux/hrtimer_types.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/percpu-defs.h>
20 #include <linux/rbtree.h>
21 #include <linux/timer.h>
22 
23 /*
24  * Mode arguments of xxx_hrtimer functions:
25  *
26  * HRTIMER_MODE_ABS		- Time value is absolute
27  * HRTIMER_MODE_REL		- Time value is relative to now
28  * HRTIMER_MODE_PINNED		- Timer is bound to CPU (is only considered
29  *				  when starting the timer)
30  * HRTIMER_MODE_SOFT		- Timer callback function will be executed in
31  *				  soft irq context
32  * HRTIMER_MODE_HARD		- Timer callback function will be executed in
33  *				  hard irq context even on PREEMPT_RT.
34  */
35 enum hrtimer_mode {
36 	HRTIMER_MODE_ABS	= 0x00,
37 	HRTIMER_MODE_REL	= 0x01,
38 	HRTIMER_MODE_PINNED	= 0x02,
39 	HRTIMER_MODE_SOFT	= 0x04,
40 	HRTIMER_MODE_HARD	= 0x08,
41 
42 	HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
43 	HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
44 
45 	HRTIMER_MODE_ABS_SOFT	= HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
46 	HRTIMER_MODE_REL_SOFT	= HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
47 
48 	HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
49 	HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
50 
51 	HRTIMER_MODE_ABS_HARD	= HRTIMER_MODE_ABS | HRTIMER_MODE_HARD,
52 	HRTIMER_MODE_REL_HARD	= HRTIMER_MODE_REL | HRTIMER_MODE_HARD,
53 
54 	HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD,
55 	HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
56 };
57 
58 /*
59  * Values to track state of the timer
60  *
61  * Possible states:
62  *
63  * 0x00		inactive
64  * 0x01		enqueued into rbtree
65  *
66  * The callback state is not part of the timer->state because clearing it would
67  * mean touching the timer after the callback, this makes it impossible to free
68  * the timer from the callback function.
69  *
70  * Therefore we track the callback state in:
71  *
72  *	timer->base->cpu_base->running == timer
73  *
74  * On SMP it is possible to have a "callback function running and enqueued"
75  * status. It happens for example when a posix timer expired and the callback
76  * queued a signal. Between dropping the lock which protects the posix timer
77  * and reacquiring the base lock of the hrtimer, another CPU can deliver the
78  * signal and rearm the timer.
79  *
80  * All state transitions are protected by cpu_base->lock.
81  */
82 #define HRTIMER_STATE_INACTIVE	0x00
83 #define HRTIMER_STATE_ENQUEUED	0x01
84 
85 /**
86  * struct hrtimer_sleeper - simple sleeper structure
87  * @timer:	embedded timer structure
88  * @task:	task to wake up
89  *
90  * task is set to NULL, when the timer expires.
91  */
92 struct hrtimer_sleeper {
93 	struct hrtimer timer;
94 	struct task_struct *task;
95 };
96 
97 static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
98 {
99 	timer->node.expires = time;
100 	timer->_softexpires = time;
101 }
102 
103 static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
104 {
105 	timer->_softexpires = time;
106 	timer->node.expires = ktime_add_safe(time, delta);
107 }
108 
109 static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
110 {
111 	timer->_softexpires = time;
112 	timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
113 }
114 
115 static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
116 {
117 	timer->node.expires = ktime_add_safe(timer->node.expires, time);
118 	timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
119 }
120 
121 static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
122 {
123 	timer->node.expires = ktime_add_ns(timer->node.expires, ns);
124 	timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
125 }
126 
127 static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
128 {
129 	return timer->node.expires;
130 }
131 
132 static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
133 {
134 	return timer->_softexpires;
135 }
136 
137 static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
138 {
139 	return ktime_to_ns(timer->node.expires);
140 }
141 
142 ktime_t hrtimer_cb_get_time(const struct hrtimer *timer);
143 
144 static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
145 {
146 	return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer));
147 }
148 
149 static inline int hrtimer_is_hres_active(struct hrtimer *timer)
150 {
151 	return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
152 		timer->base->cpu_base->hres_active : 0;
153 }
154 
155 #ifdef CONFIG_HIGH_RES_TIMERS
156 struct clock_event_device;
157 
158 extern void hrtimer_interrupt(struct clock_event_device *dev);
159 
160 extern unsigned int hrtimer_resolution;
161 
162 #else
163 
164 #define hrtimer_resolution	(unsigned int)LOW_RES_NSEC
165 
166 #endif
167 
168 static inline ktime_t
169 __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
170 {
171 	ktime_t rem = ktime_sub(timer->node.expires, now);
172 
173 	/*
174 	 * Adjust relative timers for the extra we added in
175 	 * hrtimer_start_range_ns() to prevent short timeouts.
176 	 */
177 	if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
178 		rem -= hrtimer_resolution;
179 	return rem;
180 }
181 
182 static inline ktime_t
183 hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
184 {
185 	return __hrtimer_expires_remaining_adjusted(timer, hrtimer_cb_get_time(timer));
186 }
187 
188 #ifdef CONFIG_TIMERFD
189 extern void timerfd_clock_was_set(void);
190 extern void timerfd_resume(void);
191 #else
192 static inline void timerfd_clock_was_set(void) { }
193 static inline void timerfd_resume(void) { }
194 #endif
195 
196 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
197 
198 #ifdef CONFIG_PREEMPT_RT
199 void hrtimer_cancel_wait_running(const struct hrtimer *timer);
200 #else
201 static inline void hrtimer_cancel_wait_running(struct hrtimer *timer)
202 {
203 	cpu_relax();
204 }
205 #endif
206 
207 static inline enum hrtimer_restart hrtimer_dummy_timeout(struct hrtimer *unused)
208 {
209 	return HRTIMER_NORESTART;
210 }
211 
212 /* Exported timer functions: */
213 
214 /* Initialize timers: */
215 extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
216 			  clockid_t clock_id, enum hrtimer_mode mode);
217 extern void hrtimer_setup_on_stack(struct hrtimer *timer,
218 				   enum hrtimer_restart (*function)(struct hrtimer *),
219 				   clockid_t clock_id, enum hrtimer_mode mode);
220 extern void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id,
221 					   enum hrtimer_mode mode);
222 
223 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
224 extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
225 #else
226 static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
227 #endif
228 
229 /* Basic timer operations: */
230 extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
231 				   u64 range_ns, const enum hrtimer_mode mode);
232 
233 /**
234  * hrtimer_start - (re)start an hrtimer
235  * @timer:	the timer to be added
236  * @tim:	expiry time
237  * @mode:	timer mode: absolute (HRTIMER_MODE_ABS) or
238  *		relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
239  *		softirq based mode is considered for debug purpose only!
240  */
241 static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
242 				 const enum hrtimer_mode mode)
243 {
244 	hrtimer_start_range_ns(timer, tim, 0, mode);
245 }
246 
247 extern int hrtimer_cancel(struct hrtimer *timer);
248 extern int hrtimer_try_to_cancel(struct hrtimer *timer);
249 
250 static inline void hrtimer_start_expires(struct hrtimer *timer,
251 					 enum hrtimer_mode mode)
252 {
253 	u64 delta;
254 	ktime_t soft, hard;
255 	soft = hrtimer_get_softexpires(timer);
256 	hard = hrtimer_get_expires(timer);
257 	delta = ktime_to_ns(ktime_sub(hard, soft));
258 	hrtimer_start_range_ns(timer, soft, delta, mode);
259 }
260 
261 void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
262 				   enum hrtimer_mode mode);
263 
264 static inline void hrtimer_restart(struct hrtimer *timer)
265 {
266 	hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
267 }
268 
269 /* Query timers: */
270 extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
271 
272 /**
273  * hrtimer_get_remaining - get remaining time for the timer
274  * @timer:	the timer to read
275  */
276 static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
277 {
278 	return __hrtimer_get_remaining(timer, false);
279 }
280 
281 extern u64 hrtimer_get_next_event(void);
282 extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
283 
284 extern bool hrtimer_active(const struct hrtimer *timer);
285 
286 /**
287  * hrtimer_is_queued - check, whether the timer is on one of the queues
288  * @timer:	Timer to check
289  *
290  * Returns: True if the timer is queued, false otherwise
291  *
292  * The function can be used lockless, but it gives only a current snapshot.
293  */
294 static inline bool hrtimer_is_queued(struct hrtimer *timer)
295 {
296 	/* The READ_ONCE pairs with the update functions of timer->state */
297 	return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
298 }
299 
300 /*
301  * Helper function to check, whether the timer is running the callback
302  * function
303  */
304 static inline int hrtimer_callback_running(struct hrtimer *timer)
305 {
306 	return timer->base->running == timer;
307 }
308 
309 /**
310  * hrtimer_update_function - Update the timer's callback function
311  * @timer:	Timer to update
312  * @function:	New callback function
313  *
314  * Only safe to call if the timer is not enqueued. Can be called in the callback function if the
315  * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED).
316  */
317 static inline void hrtimer_update_function(struct hrtimer *timer,
318 					   enum hrtimer_restart (*function)(struct hrtimer *))
319 {
320 #ifdef CONFIG_PROVE_LOCKING
321 	guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock);
322 
323 	if (WARN_ON_ONCE(hrtimer_is_queued(timer)))
324 		return;
325 
326 	if (WARN_ON_ONCE(!function))
327 		return;
328 #endif
329 	ACCESS_PRIVATE(timer, function) = function;
330 }
331 
332 /* Forward a hrtimer so it expires after now: */
333 extern u64
334 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
335 
336 /**
337  * hrtimer_forward_now() - forward the timer expiry so it expires after now
338  * @timer:	hrtimer to forward
339  * @interval:	the interval to forward
340  *
341  * It is a variant of hrtimer_forward(). The timer will expire after the current
342  * time of the hrtimer clock base. See hrtimer_forward() for details.
343  */
344 static inline u64 hrtimer_forward_now(struct hrtimer *timer,
345 				      ktime_t interval)
346 {
347 	return hrtimer_forward(timer, hrtimer_cb_get_time(timer), interval);
348 }
349 
350 /* Precise sleep: */
351 
352 extern int nanosleep_copyout(struct restart_block *, struct timespec64 *);
353 extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
354 			      const clockid_t clockid);
355 
356 extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
357 				    const enum hrtimer_mode mode);
358 extern int schedule_hrtimeout_range_clock(ktime_t *expires,
359 					  u64 delta,
360 					  const enum hrtimer_mode mode,
361 					  clockid_t clock_id);
362 extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
363 
364 /* Soft interrupt function to run the hrtimer queues: */
365 extern void hrtimer_run_queues(void);
366 
367 /* Bootup initialization: */
368 extern void __init hrtimers_init(void);
369 
370 /* Show pending timers: */
371 extern void sysrq_timer_list_show(void);
372 
373 int hrtimers_prepare_cpu(unsigned int cpu);
374 int hrtimers_cpu_starting(unsigned int cpu);
375 #ifdef CONFIG_HOTPLUG_CPU
376 int hrtimers_cpu_dying(unsigned int cpu);
377 #else
378 #define hrtimers_cpu_dying	NULL
379 #endif
380 
381 #endif
382