1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM xdp 4 5 #if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_XDP_H 7 8 #include <linux/netdevice.h> 9 #include <linux/filter.h> 10 #include <linux/tracepoint.h> 11 #include <linux/bpf.h> 12 13 #define __XDP_ACT_MAP(FN) \ 14 FN(ABORTED) \ 15 FN(DROP) \ 16 FN(PASS) \ 17 FN(TX) \ 18 FN(REDIRECT) 19 20 #define __XDP_ACT_TP_FN(x) \ 21 TRACE_DEFINE_ENUM(XDP_##x); 22 #define __XDP_ACT_SYM_FN(x) \ 23 { XDP_##x, #x }, 24 #define __XDP_ACT_SYM_TAB \ 25 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL } 26 __XDP_ACT_MAP(__XDP_ACT_TP_FN) 27 28 TRACE_EVENT(xdp_exception, 29 30 TP_PROTO(const struct net_device *dev, 31 const struct bpf_prog *xdp, u32 act), 32 33 TP_ARGS(dev, xdp, act), 34 35 TP_STRUCT__entry( 36 __field(int, prog_id) 37 __field(u32, act) 38 __field(int, ifindex) 39 ), 40 41 TP_fast_assign( 42 __entry->prog_id = xdp->aux->id; 43 __entry->act = act; 44 __entry->ifindex = dev->ifindex; 45 ), 46 47 TP_printk("prog_id=%d action=%s ifindex=%d", 48 __entry->prog_id, 49 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), 50 __entry->ifindex) 51 ); 52 53 TRACE_EVENT(xdp_bulk_tx, 54 55 TP_PROTO(const struct net_device *dev, 56 int sent, int drops, int err), 57 58 TP_ARGS(dev, sent, drops, err), 59 60 TP_STRUCT__entry( 61 __field(int, ifindex) 62 __field(u32, act) 63 __field(int, drops) 64 __field(int, sent) 65 __field(int, err) 66 ), 67 68 TP_fast_assign( 69 __entry->ifindex = dev->ifindex; 70 __entry->act = XDP_TX; 71 __entry->drops = drops; 72 __entry->sent = sent; 73 __entry->err = err; 74 ), 75 76 TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d", 77 __entry->ifindex, 78 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), 79 __entry->sent, __entry->drops, __entry->err) 80 ); 81 82 #ifndef __DEVMAP_OBJ_TYPE 83 #define __DEVMAP_OBJ_TYPE 84 struct _bpf_dtab_netdev { 85 struct net_device *dev; 86 }; 87 #endif /* __DEVMAP_OBJ_TYPE */ 88 89 DECLARE_EVENT_CLASS(xdp_redirect_template, 90 91 TP_PROTO(const struct net_device *dev, 92 const struct bpf_prog *xdp, 93 const void *tgt, int err, 94 enum bpf_map_type map_type, 95 u32 map_id, u32 index), 96 97 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index), 98 99 TP_STRUCT__entry( 100 __field(int, prog_id) 101 __field(u32, act) 102 __field(int, ifindex) 103 __field(int, err) 104 __field(int, to_ifindex) 105 __field(u32, map_id) 106 __field(int, map_index) 107 ), 108 109 TP_fast_assign( 110 u32 ifindex = 0, map_index = index; 111 112 if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) { 113 ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex; 114 } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { 115 ifindex = index; 116 map_index = 0; 117 } 118 119 __entry->prog_id = xdp->aux->id; 120 __entry->act = XDP_REDIRECT; 121 __entry->ifindex = dev->ifindex; 122 __entry->err = err; 123 __entry->to_ifindex = ifindex; 124 __entry->map_id = map_id; 125 __entry->map_index = map_index; 126 ), 127 128 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d" 129 " map_id=%d map_index=%d", 130 __entry->prog_id, 131 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), 132 __entry->ifindex, __entry->to_ifindex, 133 __entry->err, __entry->map_id, __entry->map_index) 134 ); 135 136 DEFINE_EVENT(xdp_redirect_template, xdp_redirect, 137 TP_PROTO(const struct net_device *dev, 138 const struct bpf_prog *xdp, 139 const void *tgt, int err, 140 enum bpf_map_type map_type, 141 u32 map_id, u32 index), 142 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) 143 ); 144 145 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err, 146 TP_PROTO(const struct net_device *dev, 147 const struct bpf_prog *xdp, 148 const void *tgt, int err, 149 enum bpf_map_type map_type, 150 u32 map_id, u32 index), 151 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) 152 ); 153 154 #define _trace_xdp_redirect(dev, xdp, to) \ 155 trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to) 156 157 #define _trace_xdp_redirect_err(dev, xdp, to, err) \ 158 trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to) 159 160 #define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \ 161 trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index) 162 163 #define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \ 164 trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index) 165 166 /* not used anymore, but kept around so as not to break old programs */ 167 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map, 168 TP_PROTO(const struct net_device *dev, 169 const struct bpf_prog *xdp, 170 const void *tgt, int err, 171 enum bpf_map_type map_type, 172 u32 map_id, u32 index), 173 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) 174 ); 175 176 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err, 177 TP_PROTO(const struct net_device *dev, 178 const struct bpf_prog *xdp, 179 const void *tgt, int err, 180 enum bpf_map_type map_type, 181 u32 map_id, u32 index), 182 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) 183 ); 184 185 TRACE_EVENT(xdp_cpumap_kthread, 186 187 TP_PROTO(int map_id, unsigned int processed, unsigned int drops, 188 int sched, struct xdp_cpumap_stats *xdp_stats), 189 190 TP_ARGS(map_id, processed, drops, sched, xdp_stats), 191 192 TP_STRUCT__entry( 193 __field(int, map_id) 194 __field(u32, act) 195 __field(int, cpu) 196 __field(unsigned int, drops) 197 __field(unsigned int, processed) 198 __field(int, sched) 199 __field(unsigned int, xdp_pass) 200 __field(unsigned int, xdp_drop) 201 __field(unsigned int, xdp_redirect) 202 ), 203 204 TP_fast_assign( 205 __entry->map_id = map_id; 206 __entry->act = XDP_REDIRECT; 207 __entry->cpu = smp_processor_id(); 208 __entry->drops = drops; 209 __entry->processed = processed; 210 __entry->sched = sched; 211 __entry->xdp_pass = xdp_stats->pass; 212 __entry->xdp_drop = xdp_stats->drop; 213 __entry->xdp_redirect = xdp_stats->redirect; 214 ), 215 216 TP_printk("kthread" 217 " cpu=%d map_id=%d action=%s" 218 " processed=%u drops=%u" 219 " sched=%d" 220 " xdp_pass=%u xdp_drop=%u xdp_redirect=%u", 221 __entry->cpu, __entry->map_id, 222 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), 223 __entry->processed, __entry->drops, 224 __entry->sched, 225 __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect) 226 ); 227 228 TRACE_EVENT(xdp_cpumap_enqueue, 229 230 TP_PROTO(int map_id, unsigned int processed, unsigned int drops, 231 int to_cpu), 232 233 TP_ARGS(map_id, processed, drops, to_cpu), 234 235 TP_STRUCT__entry( 236 __field(int, map_id) 237 __field(u32, act) 238 __field(int, cpu) 239 __field(unsigned int, drops) 240 __field(unsigned int, processed) 241 __field(int, to_cpu) 242 ), 243 244 TP_fast_assign( 245 __entry->map_id = map_id; 246 __entry->act = XDP_REDIRECT; 247 __entry->cpu = smp_processor_id(); 248 __entry->drops = drops; 249 __entry->processed = processed; 250 __entry->to_cpu = to_cpu; 251 ), 252 253 TP_printk("enqueue" 254 " cpu=%d map_id=%d action=%s" 255 " processed=%u drops=%u" 256 " to_cpu=%d", 257 __entry->cpu, __entry->map_id, 258 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), 259 __entry->processed, __entry->drops, 260 __entry->to_cpu) 261 ); 262 263 TRACE_EVENT(xdp_devmap_xmit, 264 265 TP_PROTO(const struct net_device *from_dev, 266 const struct net_device *to_dev, 267 int sent, int drops, int err), 268 269 TP_ARGS(from_dev, to_dev, sent, drops, err), 270 271 TP_STRUCT__entry( 272 __field(int, from_ifindex) 273 __field(u32, act) 274 __field(int, to_ifindex) 275 __field(int, drops) 276 __field(int, sent) 277 __field(int, err) 278 ), 279 280 TP_fast_assign( 281 __entry->from_ifindex = from_dev->ifindex; 282 __entry->act = XDP_REDIRECT; 283 __entry->to_ifindex = to_dev->ifindex; 284 __entry->drops = drops; 285 __entry->sent = sent; 286 __entry->err = err; 287 ), 288 289 TP_printk("ndo_xdp_xmit" 290 " from_ifindex=%d to_ifindex=%d action=%s" 291 " sent=%d drops=%d" 292 " err=%d", 293 __entry->from_ifindex, __entry->to_ifindex, 294 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), 295 __entry->sent, __entry->drops, 296 __entry->err) 297 ); 298 299 /* Expect users already include <net/xdp.h>, but not xdp_priv.h */ 300 #include <net/xdp_priv.h> 301 302 #define __MEM_TYPE_MAP(FN) \ 303 FN(PAGE_SHARED) \ 304 FN(PAGE_ORDER0) \ 305 FN(PAGE_POOL) \ 306 FN(XSK_BUFF_POOL) 307 308 #define __MEM_TYPE_TP_FN(x) \ 309 TRACE_DEFINE_ENUM(MEM_TYPE_##x); 310 #define __MEM_TYPE_SYM_FN(x) \ 311 { MEM_TYPE_##x, #x }, 312 #define __MEM_TYPE_SYM_TAB \ 313 __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 } 314 __MEM_TYPE_MAP(__MEM_TYPE_TP_FN) 315 316 TRACE_EVENT(mem_disconnect, 317 318 TP_PROTO(const struct xdp_mem_allocator *xa), 319 320 TP_ARGS(xa), 321 322 TP_STRUCT__entry( 323 __field(const struct xdp_mem_allocator *, xa) 324 __field(u32, mem_id) 325 __field(u32, mem_type) 326 __field(const void *, allocator) 327 ), 328 329 TP_fast_assign( 330 __entry->xa = xa; 331 __entry->mem_id = xa->mem.id; 332 __entry->mem_type = xa->mem.type; 333 __entry->allocator = xa->allocator; 334 ), 335 336 TP_printk("mem_id=%d mem_type=%s allocator=%p", 337 __entry->mem_id, 338 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), 339 __entry->allocator 340 ) 341 ); 342 343 TRACE_EVENT(mem_connect, 344 345 TP_PROTO(const struct xdp_mem_allocator *xa, 346 const struct xdp_rxq_info *rxq), 347 348 TP_ARGS(xa, rxq), 349 350 TP_STRUCT__entry( 351 __field(const struct xdp_mem_allocator *, xa) 352 __field(u32, mem_id) 353 __field(u32, mem_type) 354 __field(const void *, allocator) 355 __field(const struct xdp_rxq_info *, rxq) 356 __field(int, ifindex) 357 ), 358 359 TP_fast_assign( 360 __entry->xa = xa; 361 __entry->mem_id = xa->mem.id; 362 __entry->mem_type = xa->mem.type; 363 __entry->allocator = xa->allocator; 364 __entry->rxq = rxq; 365 __entry->ifindex = rxq->dev->ifindex; 366 ), 367 368 TP_printk("mem_id=%d mem_type=%s allocator=%p" 369 " ifindex=%d", 370 __entry->mem_id, 371 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), 372 __entry->allocator, 373 __entry->ifindex 374 ) 375 ); 376 377 TRACE_EVENT(mem_return_failed, 378 379 TP_PROTO(const struct xdp_mem_info *mem, 380 const struct page *page), 381 382 TP_ARGS(mem, page), 383 384 TP_STRUCT__entry( 385 __field(const struct page *, page) 386 __field(u32, mem_id) 387 __field(u32, mem_type) 388 ), 389 390 TP_fast_assign( 391 __entry->page = page; 392 __entry->mem_id = mem->id; 393 __entry->mem_type = mem->type; 394 ), 395 396 TP_printk("mem_id=%d mem_type=%s page=%p", 397 __entry->mem_id, 398 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), 399 __entry->page 400 ) 401 ); 402 403 #endif /* _TRACE_XDP_H */ 404 405 #include <trace/define_trace.h> 406