xref: /linux/include/trace/events/percpu.h (revision 249ebf3f65f8530beb2cbfb91bff1d83ba88d23c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM percpu
4 
5 #if !defined(_TRACE_PERCPU_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_PERCPU_H
7 
8 #include <linux/tracepoint.h>
9 #include <trace/events/mmflags.h>
10 
11 TRACE_EVENT(percpu_alloc_percpu,
12 
13 	TP_PROTO(unsigned long call_site,
14 		 bool reserved, bool is_atomic, size_t size,
15 		 size_t align, void *base_addr, int off,
16 		 void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags),
17 
18 	TP_ARGS(call_site, reserved, is_atomic, size, align, base_addr, off,
19 		ptr, bytes_alloc, gfp_flags),
20 
21 	TP_STRUCT__entry(
22 		__field(	unsigned long,		call_site	)
23 		__field(	bool,			reserved	)
24 		__field(	bool,			is_atomic	)
25 		__field(	size_t,			size		)
26 		__field(	size_t,			align		)
27 		__field(	void *,			base_addr	)
28 		__field(	int,			off		)
29 		__field(	void __percpu *,	ptr		)
30 		__field(	size_t,			bytes_alloc	)
31 		__field(	unsigned long,		gfp_flags	)
32 	),
33 	TP_fast_assign(
34 		__entry->call_site	= call_site;
35 		__entry->reserved	= reserved;
36 		__entry->is_atomic	= is_atomic;
37 		__entry->size		= size;
38 		__entry->align		= align;
39 		__entry->base_addr	= base_addr;
40 		__entry->off		= off;
41 		__entry->ptr		= ptr;
42 		__entry->bytes_alloc	= bytes_alloc;
43 		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
44 	),
45 
46 	TP_printk("call_site=%pS reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s",
47 		  (void *)__entry->call_site,
48 		  __entry->reserved, __entry->is_atomic,
49 		  __entry->size, __entry->align,
50 		  __entry->base_addr, __entry->off, __entry->ptr,
51 		  __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags))
52 );
53 
54 TRACE_EVENT(percpu_free_percpu,
55 
56 	TP_PROTO(void *base_addr, int off, void __percpu *ptr),
57 
58 	TP_ARGS(base_addr, off, ptr),
59 
60 	TP_STRUCT__entry(
61 		__field(	void *,			base_addr	)
62 		__field(	int,			off		)
63 		__field(	void __percpu *,	ptr		)
64 	),
65 
66 	TP_fast_assign(
67 		__entry->base_addr	= base_addr;
68 		__entry->off		= off;
69 		__entry->ptr		= ptr;
70 	),
71 
72 	TP_printk("base_addr=%p off=%d ptr=%p",
73 		__entry->base_addr, __entry->off, __entry->ptr)
74 );
75 
76 TRACE_EVENT(percpu_alloc_percpu_fail,
77 
78 	TP_PROTO(bool reserved, bool is_atomic, size_t size, size_t align),
79 
80 	TP_ARGS(reserved, is_atomic, size, align),
81 
82 	TP_STRUCT__entry(
83 		__field(	bool,	reserved	)
84 		__field(	bool,	is_atomic	)
85 		__field(	size_t,	size		)
86 		__field(	size_t, align		)
87 	),
88 
89 	TP_fast_assign(
90 		__entry->reserved	= reserved;
91 		__entry->is_atomic	= is_atomic;
92 		__entry->size		= size;
93 		__entry->align		= align;
94 	),
95 
96 	TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu",
97 		  __entry->reserved, __entry->is_atomic,
98 		  __entry->size, __entry->align)
99 );
100 
101 TRACE_EVENT(percpu_create_chunk,
102 
103 	TP_PROTO(void *base_addr),
104 
105 	TP_ARGS(base_addr),
106 
107 	TP_STRUCT__entry(
108 		__field(	void *, base_addr	)
109 	),
110 
111 	TP_fast_assign(
112 		__entry->base_addr	= base_addr;
113 	),
114 
115 	TP_printk("base_addr=%p", __entry->base_addr)
116 );
117 
118 TRACE_EVENT(percpu_destroy_chunk,
119 
120 	TP_PROTO(void *base_addr),
121 
122 	TP_ARGS(base_addr),
123 
124 	TP_STRUCT__entry(
125 		__field(	void *,	base_addr	)
126 	),
127 
128 	TP_fast_assign(
129 		__entry->base_addr	= base_addr;
130 	),
131 
132 	TP_printk("base_addr=%p", __entry->base_addr)
133 );
134 
135 #endif /* _TRACE_PERCPU_H */
136 
137 #include <trace/define_trace.h>
138