xref: /linux/include/trace/events/timer.h (revision bd803783dfa7ddd5e1d44a6abfeee26fdc3a2db7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM timer
4 
5 #if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_TIMER_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/hrtimer.h>
10 #include <linux/timer.h>
11 
12 DECLARE_EVENT_CLASS(timer_class,
13 
14 	TP_PROTO(struct timer_list *timer),
15 
16 	TP_ARGS(timer),
17 
18 	TP_STRUCT__entry(
19 		__field( void *,	timer	)
20 	),
21 
22 	TP_fast_assign(
23 		__entry->timer	= timer;
24 	),
25 
26 	TP_printk("timer=%p", __entry->timer)
27 );
28 
29 /**
30  * timer_init - called when the timer is initialized
31  * @timer:	pointer to struct timer_list
32  */
33 DEFINE_EVENT(timer_class, timer_init,
34 
35 	TP_PROTO(struct timer_list *timer),
36 
37 	TP_ARGS(timer)
38 );
39 
40 #define decode_timer_flags(flags)			\
41 	__print_flags(flags, "|",			\
42 		{  TIMER_MIGRATING,	"M" },		\
43 		{  TIMER_DEFERRABLE,	"D" },		\
44 		{  TIMER_PINNED,	"P" },		\
45 		{  TIMER_IRQSAFE,	"I" })
46 
47 /**
48  * timer_start - called when the timer is started
49  * @timer:		pointer to struct timer_list
50  * @bucket_expiry:	the bucket expiry time
51  */
52 TRACE_EVENT(timer_start,
53 
54 	TP_PROTO(struct timer_list *timer,
55 		unsigned long bucket_expiry),
56 
57 	TP_ARGS(timer, bucket_expiry),
58 
59 	TP_STRUCT__entry(
60 		__field( void *,	timer		)
61 		__field( void *,	function	)
62 		__field( unsigned long,	expires		)
63 		__field( unsigned long,	bucket_expiry	)
64 		__field( unsigned long,	now		)
65 		__field( unsigned int,	flags		)
66 	),
67 
68 	TP_fast_assign(
69 		__entry->timer		= timer;
70 		__entry->function	= timer->function;
71 		__entry->expires	= timer->expires;
72 		__entry->bucket_expiry	= bucket_expiry;
73 		__entry->now		= jiffies;
74 		__entry->flags		= timer->flags;
75 	),
76 
77 	TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] bucket_expiry=%lu cpu=%u idx=%u flags=%s",
78 		  __entry->timer, __entry->function, __entry->expires,
79 		  (long)__entry->expires - __entry->now,
80 		  __entry->bucket_expiry, __entry->flags & TIMER_CPUMASK,
81 		  __entry->flags >> TIMER_ARRAYSHIFT,
82 		  decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
83 );
84 
85 /**
86  * timer_expire_entry - called immediately before the timer callback
87  * @timer:	pointer to struct timer_list
88  * @baseclk:	value of timer_base::clk when timer expires
89  *
90  * Allows to determine the timer latency.
91  */
92 TRACE_EVENT(timer_expire_entry,
93 
94 	TP_PROTO(struct timer_list *timer, unsigned long baseclk),
95 
96 	TP_ARGS(timer, baseclk),
97 
98 	TP_STRUCT__entry(
99 		__field( void *,	timer	)
100 		__field( unsigned long,	now	)
101 		__field( void *,	function)
102 		__field( unsigned long,	baseclk	)
103 	),
104 
105 	TP_fast_assign(
106 		__entry->timer		= timer;
107 		__entry->now		= jiffies;
108 		__entry->function	= timer->function;
109 		__entry->baseclk	= baseclk;
110 	),
111 
112 	TP_printk("timer=%p function=%ps now=%lu baseclk=%lu",
113 		  __entry->timer, __entry->function, __entry->now,
114 		  __entry->baseclk)
115 );
116 
117 /**
118  * timer_expire_exit - called immediately after the timer callback returns
119  * @timer:	pointer to struct timer_list
120  *
121  * When used in combination with the timer_expire_entry tracepoint we can
122  * determine the runtime of the timer callback function.
123  *
124  * NOTE: Do NOT dereference timer in TP_fast_assign. The pointer might
125  * be invalid. We solely track the pointer.
126  */
127 DEFINE_EVENT(timer_class, timer_expire_exit,
128 
129 	TP_PROTO(struct timer_list *timer),
130 
131 	TP_ARGS(timer)
132 );
133 
134 /**
135  * timer_cancel - called when the timer is canceled
136  * @timer:	pointer to struct timer_list
137  */
138 DEFINE_EVENT(timer_class, timer_cancel,
139 
140 	TP_PROTO(struct timer_list *timer),
141 
142 	TP_ARGS(timer)
143 );
144 
145 TRACE_EVENT(timer_base_idle,
146 
147 	TP_PROTO(bool is_idle, unsigned int cpu),
148 
149 	TP_ARGS(is_idle, cpu),
150 
151 	TP_STRUCT__entry(
152 		__field( bool,		is_idle	)
153 		__field( unsigned int,	cpu	)
154 	),
155 
156 	TP_fast_assign(
157 		__entry->is_idle	= is_idle;
158 		__entry->cpu		= cpu;
159 	),
160 
161 	TP_printk("is_idle=%d cpu=%d",
162 		  __entry->is_idle, __entry->cpu)
163 );
164 
165 #define decode_clockid(type)						\
166 	__print_symbolic(type,						\
167 		{ CLOCK_REALTIME,	"CLOCK_REALTIME"	},	\
168 		{ CLOCK_MONOTONIC,	"CLOCK_MONOTONIC"	},	\
169 		{ CLOCK_BOOTTIME,	"CLOCK_BOOTTIME"	},	\
170 		{ CLOCK_TAI,		"CLOCK_TAI"		})
171 
172 #define decode_hrtimer_mode(mode)					\
173 	__print_symbolic(mode,						\
174 		{ HRTIMER_MODE_ABS,		"ABS"		},	\
175 		{ HRTIMER_MODE_REL,		"REL"		},	\
176 		{ HRTIMER_MODE_ABS_PINNED,	"ABS|PINNED"	},	\
177 		{ HRTIMER_MODE_REL_PINNED,	"REL|PINNED"	},	\
178 		{ HRTIMER_MODE_ABS_SOFT,	"ABS|SOFT"	},	\
179 		{ HRTIMER_MODE_REL_SOFT,	"REL|SOFT"	},	\
180 		{ HRTIMER_MODE_ABS_PINNED_SOFT,	"ABS|PINNED|SOFT" },	\
181 		{ HRTIMER_MODE_REL_PINNED_SOFT,	"REL|PINNED|SOFT" },	\
182 		{ HRTIMER_MODE_ABS_HARD,	"ABS|HARD" },		\
183 		{ HRTIMER_MODE_REL_HARD,	"REL|HARD" },		\
184 		{ HRTIMER_MODE_ABS_PINNED_HARD, "ABS|PINNED|HARD" },	\
185 		{ HRTIMER_MODE_REL_PINNED_HARD,	"REL|PINNED|HARD" })
186 
187 /**
188  * hrtimer_setup - called when the hrtimer is initialized
189  * @hrtimer:	pointer to struct hrtimer
190  * @clockid:	the hrtimers clock
191  * @mode:	the hrtimers mode
192  */
193 TRACE_EVENT(hrtimer_setup,
194 
195 	TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
196 		 enum hrtimer_mode mode),
197 
198 	TP_ARGS(hrtimer, clockid, mode),
199 
200 	TP_STRUCT__entry(
201 		__field( void *,		hrtimer		)
202 		__field( clockid_t,		clockid		)
203 		__field( enum hrtimer_mode,	mode		)
204 	),
205 
206 	TP_fast_assign(
207 		__entry->hrtimer	= hrtimer;
208 		__entry->clockid	= clockid;
209 		__entry->mode		= mode;
210 	),
211 
212 	TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
213 		  decode_clockid(__entry->clockid),
214 		  decode_hrtimer_mode(__entry->mode))
215 );
216 
217 /**
218  * hrtimer_start - called when the hrtimer is started
219  * @hrtimer:	pointer to struct hrtimer
220  * @mode:	the hrtimers mode
221  * @was_armed:	Was armed when hrtimer_start*() was invoked
222  */
223 TRACE_EVENT(hrtimer_start,
224 
225 	TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode, bool was_armed),
226 
227 	TP_ARGS(hrtimer, mode, was_armed),
228 
229 	TP_STRUCT__entry(
230 		__field( void *,	hrtimer		)
231 		__field( void *,	function	)
232 		__field( s64,		expires		)
233 		__field( s64,		softexpires	)
234 		__field( enum hrtimer_mode,	mode	)
235 		__field( bool,		was_armed	)
236 	),
237 
238 	TP_fast_assign(
239 		__entry->hrtimer	= hrtimer;
240 		__entry->function	= ACCESS_PRIVATE(hrtimer, function);
241 		__entry->expires	= hrtimer_get_expires(hrtimer);
242 		__entry->softexpires	= hrtimer_get_softexpires(hrtimer);
243 		__entry->mode		= mode;
244 		__entry->was_armed	= was_armed;
245 	),
246 
247 	TP_printk("hrtimer=%p function=%ps expires=%llu softexpires=%llu "
248 		  "mode=%s was_armed=%d", __entry->hrtimer, __entry->function,
249 		  (unsigned long long) __entry->expires,
250 		  (unsigned long long) __entry->softexpires,
251 		  decode_hrtimer_mode(__entry->mode), __entry->was_armed)
252 );
253 
254 /**
255  * hrtimer_expire_entry - called immediately before the hrtimer callback
256  * @hrtimer:	pointer to struct hrtimer
257  * @now:	variable which contains current time of the timers base.
258  *
259  * Allows to determine the timer latency.
260  */
261 TRACE_EVENT(hrtimer_expire_entry,
262 
263 	TP_PROTO(struct hrtimer *hrtimer, ktime_t now),
264 
265 	TP_ARGS(hrtimer, now),
266 
267 	TP_STRUCT__entry(
268 		__field( void *,	hrtimer	)
269 		__field( s64,		now	)
270 		__field( void *,	function)
271 	),
272 
273 	TP_fast_assign(
274 		__entry->hrtimer	= hrtimer;
275 		__entry->now		= now;
276 		__entry->function	= ACCESS_PRIVATE(hrtimer, function);
277 	),
278 
279 	TP_printk("hrtimer=%p function=%ps now=%llu",
280 		  __entry->hrtimer, __entry->function,
281 		  (unsigned long long) __entry->now)
282 );
283 
284 DECLARE_EVENT_CLASS(hrtimer_class,
285 
286 	TP_PROTO(struct hrtimer *hrtimer),
287 
288 	TP_ARGS(hrtimer),
289 
290 	TP_STRUCT__entry(
291 		__field( void *,	hrtimer	)
292 	),
293 
294 	TP_fast_assign(
295 		__entry->hrtimer	= hrtimer;
296 	),
297 
298 	TP_printk("hrtimer=%p", __entry->hrtimer)
299 );
300 
301 /**
302  * hrtimer_expire_exit - called immediately after the hrtimer callback returns
303  * @hrtimer:	pointer to struct hrtimer
304  *
305  * When used in combination with the hrtimer_expire_entry tracepoint we can
306  * determine the runtime of the callback function.
307  */
308 DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit,
309 
310 	TP_PROTO(struct hrtimer *hrtimer),
311 
312 	TP_ARGS(hrtimer)
313 );
314 
315 /**
316  * hrtimer_cancel - called when the hrtimer is canceled
317  * @hrtimer:	pointer to struct hrtimer
318  */
319 DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
320 
321 	TP_PROTO(struct hrtimer *hrtimer),
322 
323 	TP_ARGS(hrtimer)
324 );
325 
326 /**
327  * hrtimer_rearm - Invoked when the clockevent device is rearmed
328  * @next_event:	The next expiry time (CLOCK_MONOTONIC)
329  */
330 TRACE_EVENT(hrtimer_rearm,
331 
332 	TP_PROTO(ktime_t next_event, bool deferred),
333 
334 	TP_ARGS(next_event, deferred),
335 
336 	TP_STRUCT__entry(
337 		__field( s64,		next_event	)
338 		__field( bool,		deferred	)
339 	),
340 
341 	TP_fast_assign(
342 		__entry->next_event	= next_event;
343 		__entry->deferred	= deferred;
344 	),
345 
346 	TP_printk("next_event=%llu deferred=%d",
347 		  (unsigned long long) __entry->next_event, __entry->deferred)
348 );
349 
350 /**
351  * itimer_state - called when itimer is started or canceled
352  * @which:	name of the interval timer
353  * @value:	the itimers value, itimer is canceled if value->it_value is
354  *		zero, otherwise it is started
355  * @expires:	the itimers expiry time
356  */
357 TRACE_EVENT(itimer_state,
358 
359 	TP_PROTO(int which, const struct itimerspec64 *const value,
360 		 unsigned long long expires),
361 
362 	TP_ARGS(which, value, expires),
363 
364 	TP_STRUCT__entry(
365 		__field(	int,			which		)
366 		__field(	unsigned long long,	expires		)
367 		__field(	long,			value_sec	)
368 		__field(	long,			value_nsec	)
369 		__field(	long,			interval_sec	)
370 		__field(	long,			interval_nsec	)
371 	),
372 
373 	TP_fast_assign(
374 		__entry->which		= which;
375 		__entry->expires	= expires;
376 		__entry->value_sec	= value->it_value.tv_sec;
377 		__entry->value_nsec	= value->it_value.tv_nsec;
378 		__entry->interval_sec	= value->it_interval.tv_sec;
379 		__entry->interval_nsec	= value->it_interval.tv_nsec;
380 	),
381 
382 	TP_printk("which=%d expires=%llu it_value=%ld.%06ld it_interval=%ld.%06ld",
383 		  __entry->which, __entry->expires,
384 		  __entry->value_sec, __entry->value_nsec / NSEC_PER_USEC,
385 		  __entry->interval_sec, __entry->interval_nsec / NSEC_PER_USEC)
386 );
387 
388 /**
389  * itimer_expire - called when itimer expires
390  * @which:	type of the interval timer
391  * @pid:	pid of the process which owns the timer
392  * @now:	current time, used to calculate the latency of itimer
393  */
394 TRACE_EVENT(itimer_expire,
395 
396 	TP_PROTO(int which, struct pid *pid, unsigned long long now),
397 
398 	TP_ARGS(which, pid, now),
399 
400 	TP_STRUCT__entry(
401 		__field( int ,			which	)
402 		__field( pid_t,			pid	)
403 		__field( unsigned long long,	now	)
404 	),
405 
406 	TP_fast_assign(
407 		__entry->which	= which;
408 		__entry->now	= now;
409 		__entry->pid	= pid_nr(pid);
410 	),
411 
412 	TP_printk("which=%d pid=%d now=%llu", __entry->which,
413 		  (int) __entry->pid, __entry->now)
414 );
415 
416 #ifdef CONFIG_NO_HZ_COMMON
417 
418 #define TICK_DEP_NAMES					\
419 		tick_dep_mask_name(NONE)		\
420 		tick_dep_name(POSIX_TIMER)		\
421 		tick_dep_name(PERF_EVENTS)		\
422 		tick_dep_name(SCHED)			\
423 		tick_dep_name(CLOCK_UNSTABLE)		\
424 		tick_dep_name(RCU)			\
425 		tick_dep_name_end(RCU_EXP)
426 
427 #undef tick_dep_name
428 #undef tick_dep_mask_name
429 #undef tick_dep_name_end
430 
431 /* The MASK will convert to their bits and they need to be processed too */
432 #define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
433 	TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
434 #define tick_dep_name_end(sdep)  TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
435 	TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
436 /* NONE only has a mask defined for it */
437 #define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
438 
439 TICK_DEP_NAMES
440 
441 #undef tick_dep_name
442 #undef tick_dep_mask_name
443 #undef tick_dep_name_end
444 
445 #define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
446 #define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
447 #define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep }
448 
449 #define show_tick_dep_name(val)				\
450 	__print_symbolic(val, TICK_DEP_NAMES)
451 
452 TRACE_EVENT(tick_stop,
453 
454 	TP_PROTO(int success, int dependency),
455 
456 	TP_ARGS(success, dependency),
457 
458 	TP_STRUCT__entry(
459 		__field( int ,		success	)
460 		__field( int ,		dependency )
461 	),
462 
463 	TP_fast_assign(
464 		__entry->success	= success;
465 		__entry->dependency	= dependency;
466 	),
467 
468 	TP_printk("success=%d dependency=%s",  __entry->success, \
469 			show_tick_dep_name(__entry->dependency))
470 );
471 #endif
472 
473 #endif /*  _TRACE_TIMER_H */
474 
475 /* This part must be outside protection */
476 #include <trace/define_trace.h>
477