xref: /linux/include/trace/events/xdp.h (revision 7a92fc8b4d20680e4c20289a670d8fca2d1f2c1b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM xdp
4 
5 #if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_XDP_H
7 
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/tracepoint.h>
11 #include <linux/bpf.h>
12 #include <net/xdp.h>
13 
14 #define __XDP_ACT_MAP(FN)	\
15 	FN(ABORTED)		\
16 	FN(DROP)		\
17 	FN(PASS)		\
18 	FN(TX)			\
19 	FN(REDIRECT)
20 
21 #define __XDP_ACT_TP_FN(x)	\
22 	TRACE_DEFINE_ENUM(XDP_##x);
23 #define __XDP_ACT_SYM_FN(x)	\
24 	{ XDP_##x, #x },
25 #define __XDP_ACT_SYM_TAB	\
26 	__XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
27 __XDP_ACT_MAP(__XDP_ACT_TP_FN)
28 
29 TRACE_EVENT(xdp_exception,
30 
31 	TP_PROTO(const struct net_device *dev,
32 		 const struct bpf_prog *xdp, u32 act),
33 
34 	TP_ARGS(dev, xdp, act),
35 
36 	TP_STRUCT__entry(
37 		__field(int, prog_id)
38 		__field(u32, act)
39 		__field(int, ifindex)
40 	),
41 
42 	TP_fast_assign(
43 		__entry->prog_id	= xdp->aux->id;
44 		__entry->act		= act;
45 		__entry->ifindex	= dev->ifindex;
46 	),
47 
48 	TP_printk("prog_id=%d action=%s ifindex=%d",
49 		  __entry->prog_id,
50 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
51 		  __entry->ifindex)
52 );
53 
54 TRACE_EVENT(xdp_bulk_tx,
55 
56 	TP_PROTO(const struct net_device *dev,
57 		 int sent, int drops, int err),
58 
59 	TP_ARGS(dev, sent, drops, err),
60 
61 	TP_STRUCT__entry(
62 		__field(int, ifindex)
63 		__field(u32, act)
64 		__field(int, drops)
65 		__field(int, sent)
66 		__field(int, err)
67 	),
68 
69 	TP_fast_assign(
70 		__entry->ifindex	= dev->ifindex;
71 		__entry->act		= XDP_TX;
72 		__entry->drops		= drops;
73 		__entry->sent		= sent;
74 		__entry->err		= err;
75 	),
76 
77 	TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
78 		  __entry->ifindex,
79 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
80 		  __entry->sent, __entry->drops, __entry->err)
81 );
82 
83 #ifndef __DEVMAP_OBJ_TYPE
84 #define __DEVMAP_OBJ_TYPE
85 struct _bpf_dtab_netdev {
86 	struct net_device *dev;
87 };
88 #endif /* __DEVMAP_OBJ_TYPE */
89 
90 DECLARE_EVENT_CLASS(xdp_redirect_template,
91 
92 	TP_PROTO(const struct net_device *dev,
93 		 const struct bpf_prog *xdp,
94 		 const void *tgt, int err,
95 		 enum bpf_map_type map_type,
96 		 u32 map_id, u32 index),
97 
98 	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
99 
100 	TP_STRUCT__entry(
101 		__field(int, prog_id)
102 		__field(u32, act)
103 		__field(int, ifindex)
104 		__field(int, err)
105 		__field(int, to_ifindex)
106 		__field(u32, map_id)
107 		__field(int, map_index)
108 	),
109 
110 	TP_fast_assign(
111 		u32 ifindex = 0, map_index = index;
112 
113 		if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
114 			/* Just leave to_ifindex to 0 if do broadcast redirect,
115 			 * as tgt will be NULL.
116 			 */
117 			if (tgt)
118 				ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
119 		} else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
120 			ifindex = index;
121 			map_index = 0;
122 		}
123 
124 		__entry->prog_id	= xdp->aux->id;
125 		__entry->act		= XDP_REDIRECT;
126 		__entry->ifindex	= dev->ifindex;
127 		__entry->err		= err;
128 		__entry->to_ifindex	= ifindex;
129 		__entry->map_id		= map_id;
130 		__entry->map_index	= map_index;
131 	),
132 
133 	TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
134 		  " map_id=%d map_index=%d",
135 		  __entry->prog_id,
136 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
137 		  __entry->ifindex, __entry->to_ifindex,
138 		  __entry->err, __entry->map_id, __entry->map_index)
139 );
140 
141 DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
142 	TP_PROTO(const struct net_device *dev,
143 		 const struct bpf_prog *xdp,
144 		 const void *tgt, int err,
145 		 enum bpf_map_type map_type,
146 		 u32 map_id, u32 index),
147 	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
148 );
149 
150 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
151 	TP_PROTO(const struct net_device *dev,
152 		 const struct bpf_prog *xdp,
153 		 const void *tgt, int err,
154 		 enum bpf_map_type map_type,
155 		 u32 map_id, u32 index),
156 	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
157 );
158 
159 #define _trace_xdp_redirect(dev, xdp, to)						\
160 	 trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
161 
162 #define _trace_xdp_redirect_err(dev, xdp, to, err)					\
163 	 trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
164 
165 #define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
166 	 trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
167 
168 #define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
169 	 trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
170 
171 /* not used anymore, but kept around so as not to break old programs */
172 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
173 	TP_PROTO(const struct net_device *dev,
174 		 const struct bpf_prog *xdp,
175 		 const void *tgt, int err,
176 		 enum bpf_map_type map_type,
177 		 u32 map_id, u32 index),
178 	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
179 );
180 
181 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
182 	TP_PROTO(const struct net_device *dev,
183 		 const struct bpf_prog *xdp,
184 		 const void *tgt, int err,
185 		 enum bpf_map_type map_type,
186 		 u32 map_id, u32 index),
187 	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
188 );
189 
190 TRACE_EVENT(xdp_cpumap_kthread,
191 
192 	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
193 		 int sched, struct xdp_cpumap_stats *xdp_stats),
194 
195 	TP_ARGS(map_id, processed, drops, sched, xdp_stats),
196 
197 	TP_STRUCT__entry(
198 		__field(int, map_id)
199 		__field(u32, act)
200 		__field(int, cpu)
201 		__field(unsigned int, drops)
202 		__field(unsigned int, processed)
203 		__field(int, sched)
204 		__field(unsigned int, xdp_pass)
205 		__field(unsigned int, xdp_drop)
206 		__field(unsigned int, xdp_redirect)
207 	),
208 
209 	TP_fast_assign(
210 		__entry->map_id		= map_id;
211 		__entry->act		= XDP_REDIRECT;
212 		__entry->cpu		= smp_processor_id();
213 		__entry->drops		= drops;
214 		__entry->processed	= processed;
215 		__entry->sched	= sched;
216 		__entry->xdp_pass	= xdp_stats->pass;
217 		__entry->xdp_drop	= xdp_stats->drop;
218 		__entry->xdp_redirect	= xdp_stats->redirect;
219 	),
220 
221 	TP_printk("kthread"
222 		  " cpu=%d map_id=%d action=%s"
223 		  " processed=%u drops=%u"
224 		  " sched=%d"
225 		  " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
226 		  __entry->cpu, __entry->map_id,
227 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
228 		  __entry->processed, __entry->drops,
229 		  __entry->sched,
230 		  __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
231 );
232 
233 TRACE_EVENT(xdp_cpumap_enqueue,
234 
235 	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
236 		 int to_cpu),
237 
238 	TP_ARGS(map_id, processed, drops, to_cpu),
239 
240 	TP_STRUCT__entry(
241 		__field(int, map_id)
242 		__field(u32, act)
243 		__field(int, cpu)
244 		__field(unsigned int, drops)
245 		__field(unsigned int, processed)
246 		__field(int, to_cpu)
247 	),
248 
249 	TP_fast_assign(
250 		__entry->map_id		= map_id;
251 		__entry->act		= XDP_REDIRECT;
252 		__entry->cpu		= smp_processor_id();
253 		__entry->drops		= drops;
254 		__entry->processed	= processed;
255 		__entry->to_cpu		= to_cpu;
256 	),
257 
258 	TP_printk("enqueue"
259 		  " cpu=%d map_id=%d action=%s"
260 		  " processed=%u drops=%u"
261 		  " to_cpu=%d",
262 		  __entry->cpu, __entry->map_id,
263 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
264 		  __entry->processed, __entry->drops,
265 		  __entry->to_cpu)
266 );
267 
268 TRACE_EVENT(xdp_devmap_xmit,
269 
270 	TP_PROTO(const struct net_device *from_dev,
271 		 const struct net_device *to_dev,
272 		 int sent, int drops, int err),
273 
274 	TP_ARGS(from_dev, to_dev, sent, drops, err),
275 
276 	TP_STRUCT__entry(
277 		__field(int, from_ifindex)
278 		__field(u32, act)
279 		__field(int, to_ifindex)
280 		__field(int, drops)
281 		__field(int, sent)
282 		__field(int, err)
283 	),
284 
285 	TP_fast_assign(
286 		__entry->from_ifindex	= from_dev->ifindex;
287 		__entry->act		= XDP_REDIRECT;
288 		__entry->to_ifindex	= to_dev->ifindex;
289 		__entry->drops		= drops;
290 		__entry->sent		= sent;
291 		__entry->err		= err;
292 	),
293 
294 	TP_printk("ndo_xdp_xmit"
295 		  " from_ifindex=%d to_ifindex=%d action=%s"
296 		  " sent=%d drops=%d"
297 		  " err=%d",
298 		  __entry->from_ifindex, __entry->to_ifindex,
299 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
300 		  __entry->sent, __entry->drops,
301 		  __entry->err)
302 );
303 
304 /* Expect users already include <net/xdp.h>, but not xdp_priv.h */
305 #include <net/xdp_priv.h>
306 
307 #define __MEM_TYPE_MAP(FN)	\
308 	FN(PAGE_SHARED)		\
309 	FN(PAGE_ORDER0)		\
310 	FN(PAGE_POOL)		\
311 	FN(XSK_BUFF_POOL)
312 
313 #define __MEM_TYPE_TP_FN(x)	\
314 	TRACE_DEFINE_ENUM(MEM_TYPE_##x);
315 #define __MEM_TYPE_SYM_FN(x)	\
316 	{ MEM_TYPE_##x, #x },
317 #define __MEM_TYPE_SYM_TAB	\
318 	__MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
319 __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
320 
321 TRACE_EVENT(mem_disconnect,
322 
323 	TP_PROTO(const struct xdp_mem_allocator *xa),
324 
325 	TP_ARGS(xa),
326 
327 	TP_STRUCT__entry(
328 		__field(const struct xdp_mem_allocator *,	xa)
329 		__field(u32,		mem_id)
330 		__field(u32,		mem_type)
331 		__field(const void *,	allocator)
332 	),
333 
334 	TP_fast_assign(
335 		__entry->xa		= xa;
336 		__entry->mem_id		= xa->mem.id;
337 		__entry->mem_type	= xa->mem.type;
338 		__entry->allocator	= xa->allocator;
339 	),
340 
341 	TP_printk("mem_id=%d mem_type=%s allocator=%p",
342 		  __entry->mem_id,
343 		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
344 		  __entry->allocator
345 	)
346 );
347 
348 TRACE_EVENT(mem_connect,
349 
350 	TP_PROTO(const struct xdp_mem_allocator *xa,
351 		 const struct xdp_rxq_info *rxq),
352 
353 	TP_ARGS(xa, rxq),
354 
355 	TP_STRUCT__entry(
356 		__field(const struct xdp_mem_allocator *,	xa)
357 		__field(u32,		mem_id)
358 		__field(u32,		mem_type)
359 		__field(const void *,	allocator)
360 		__field(const struct xdp_rxq_info *,		rxq)
361 		__field(int,		ifindex)
362 	),
363 
364 	TP_fast_assign(
365 		__entry->xa		= xa;
366 		__entry->mem_id		= xa->mem.id;
367 		__entry->mem_type	= xa->mem.type;
368 		__entry->allocator	= xa->allocator;
369 		__entry->rxq		= rxq;
370 		__entry->ifindex	= rxq->dev->ifindex;
371 	),
372 
373 	TP_printk("mem_id=%d mem_type=%s allocator=%p"
374 		  " ifindex=%d",
375 		  __entry->mem_id,
376 		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
377 		  __entry->allocator,
378 		  __entry->ifindex
379 	)
380 );
381 
382 TRACE_EVENT(mem_return_failed,
383 
384 	TP_PROTO(const struct xdp_mem_info *mem,
385 		 const struct page *page),
386 
387 	TP_ARGS(mem, page),
388 
389 	TP_STRUCT__entry(
390 		__field(const struct page *,	page)
391 		__field(u32,		mem_id)
392 		__field(u32,		mem_type)
393 	),
394 
395 	TP_fast_assign(
396 		__entry->page		= page;
397 		__entry->mem_id		= mem->id;
398 		__entry->mem_type	= mem->type;
399 	),
400 
401 	TP_printk("mem_id=%d mem_type=%s page=%p",
402 		  __entry->mem_id,
403 		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
404 		  __entry->page
405 	)
406 );
407 
408 TRACE_EVENT(bpf_xdp_link_attach_failed,
409 
410 	TP_PROTO(const char *msg),
411 
412 	TP_ARGS(msg),
413 
414 	TP_STRUCT__entry(
415 		__string(msg, msg)
416 	),
417 
418 	TP_fast_assign(
419 		__assign_str(msg, msg);
420 	),
421 
422 	TP_printk("errmsg=%s", __get_str(msg))
423 );
424 
425 #endif /* _TRACE_XDP_H */
426 
427 #include <trace/define_trace.h>
428