/linux/include/net/ |
H A D | xdp.h | 91 static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) in xdp_buff_has_frags() argument 93 return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS); in xdp_buff_has_frags() 96 static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp) in xdp_buff_set_frags_flag() argument 98 xdp->flags |= XDP_FLAGS_HAS_FRAGS; in xdp_buff_set_frags_flag() 101 static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp) in xdp_buff_clear_frags_flag() argument 103 xdp->flags &= ~XDP_FLAGS_HAS_FRAGS; in xdp_buff_clear_frags_flag() 106 static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp) in xdp_buff_is_frag_pfmemalloc() argument 108 return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); in xdp_buff_is_frag_pfmemalloc() 111 static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp) in xdp_buff_set_frag_pfmemalloc() argument 113 xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC; in xdp_buff_set_frag_pfmemalloc() [all …]
|
H A D | xdp_sock_drv.h | 65 return pool->heads[0].xdp.rxq->napi_id; in xsk_pool_get_napi_id() 85 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_dma() argument 87 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_dma() 92 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_frame_dma() argument 94 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_frame_dma() 110 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) in xsk_buff_alloc_batch() argument 112 return xp_alloc_batch(pool, xdp, max); in xsk_buff_alloc_batch() 120 static inline void xsk_buff_free(struct xdp_buff *xdp) in xsk_buff_free() argument 139 xsk_buff_add_frag(struct xdp_buff * xdp) xsk_buff_add_frag() argument 179 xsk_buff_set_size(struct xdp_buff * xdp,u32 size) xsk_buff_set_size() argument 222 xsk_buff_dma_sync_for_cpu(struct xdp_buff * xdp,struct xsk_buff_pool * pool) xsk_buff_dma_sync_for_cpu() argument 328 xsk_buff_xdp_get_dma(struct xdp_buff * xdp) xsk_buff_xdp_get_dma() argument 333 xsk_buff_xdp_get_frame_dma(struct xdp_buff * xdp) xsk_buff_xdp_get_frame_dma() argument 348 xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max) xsk_buff_alloc_batch() argument 358 xsk_buff_free(struct xdp_buff * xdp) xsk_buff_free() argument 362 xsk_buff_add_frag(struct xdp_buff * xdp) xsk_buff_add_frag() argument 380 xsk_buff_set_size(struct xdp_buff * xdp,u32 size) xsk_buff_set_size() argument 405 xsk_buff_dma_sync_for_cpu(struct xdp_buff * xdp,struct xsk_buff_pool * pool) xsk_buff_dma_sync_for_cpu() argument [all...] |
/linux/Documentation/bpf/ |
H A D | redirect.rst | 25 :doc: xdp redirect 29 those that do, not all of them support non-linear frames. Non-linear xdp 45 sudo bpftrace -e 'tracepoint:xdp:* { @cnt[probe] = count(); }' 49 @cnt[tracepoint:xdp:mem_connect]: 18 50 @cnt[tracepoint:xdp:mem_disconnect]: 18 51 @cnt[tracepoint:xdp:xdp_exception]: 19605 52 @cnt[tracepoint:xdp:xdp_devmap_xmit]: 1393604 53 @cnt[tracepoint:xdp:xdp_redirect]: 22292200 56 The various xdp tracepoints can be found in ``source/include/trace/events/xdp.h`` 64 'tracepoint:xdp:xdp_redirect*_err {@redir_errno[-args->err] = count();} [all …]
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | xdp_features.c | 65 xdp_process_echo_packet(struct xdp_md *xdp, bool dut) in xdp_process_echo_packet() argument 67 void *data_end = (void *)(long)xdp->data_end; in xdp_process_echo_packet() 68 void *data = (void *)(long)xdp->data; in xdp_process_echo_packet() 135 xdp_update_stats(struct xdp_md *xdp, bool tx, bool dut) in xdp_update_stats() argument 139 if (xdp_process_echo_packet(xdp, tx)) in xdp_update_stats() 156 int xdp_tester_check_tx(struct xdp_md *xdp) in xdp_tester_check_tx() argument 158 xdp_update_stats(xdp, true, false); in xdp_tester_check_tx() 164 int xdp_tester_check_rx(struct xdp_md *xdp) in xdp_tester_check_rx() argument 166 xdp_update_stats(xdp, false, false); in xdp_tester_check_rx() 174 int xdp_do_pass(struct xdp_md *xdp) in xdp_do_pass() argument [all …]
|
H A D | xsk_xdp_progs.c | 19 SEC("xdp.frags") int xsk_def_prog(struct xdp_md *xdp) in xsk_def_prog() argument 24 SEC("xdp.frags") int xsk_xdp_drop(struct xdp_md *xdp) in xsk_xdp_drop() argument 33 SEC("xdp.frags") int xsk_xdp_populate_metadata(struct xdp_md *xdp) in xsk_xdp_populate_metadata() argument 40 err = bpf_xdp_adjust_meta(xdp, -(int)sizeof(struct xdp_info)); in xsk_xdp_populate_metadata() 44 data = (void *)(long)xdp->data; in xsk_xdp_populate_metadata() 45 data_meta = (void *)(long)xdp->data_meta; in xsk_xdp_populate_metadata() 56 SEC("xdp") int xsk_xdp_shared_umem(struct xdp_md *xdp) in xsk_xdp_shared_umem() argument 58 void *data = (void *)(long)xdp->data; in xsk_xdp_shared_umem() 59 void *data_end = (void *)(long)xdp->data_end; in xsk_xdp_shared_umem()
|
H A D | test_xdp.c | 79 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 81 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 82 void *data = (void *)(long)xdp->data; in handle_ipv4() 112 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 115 data = (void *)(long)xdp->data; in handle_ipv4() 116 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 152 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 154 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 155 void *data = (void *)(long)xdp->data; in handle_ipv6() 182 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
H A D | test_xdp_loop.c | 75 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 77 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 78 void *data = (void *)(long)xdp->data; in handle_ipv4() 108 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 111 data = (void *)(long)xdp->data; in handle_ipv4() 112 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 148 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 150 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 151 void *data = (void *)(long)xdp->data; in handle_ipv6() 178 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
H A D | test_xdp_do_redirect.c | 29 int xdp_redirect(struct xdp_md *xdp) in xdp_redirect() argument 31 __u32 *metadata = (void *)(long)xdp->data_meta; in xdp_redirect() 32 void *data_end = (void *)(long)xdp->data_end; in xdp_redirect() 33 void *data = (void *)(long)xdp->data; in xdp_redirect() 41 if (xdp->ingress_ifindex != (__u32)ifindex_in) in xdp_redirect() 55 if (bpf_xdp_adjust_meta(xdp, sizeof(__u64))) in xdp_redirect() 86 int xdp_count_pkts(struct xdp_md *xdp) in xdp_count_pkts() argument 88 void *data = (void *)(long)xdp->data; in xdp_count_pkts() 89 void *data_end = (void *)(long)xdp->data_end; in xdp_count_pkts()
|
H A D | test_xdp_bpf2bpf.c | 45 int BPF_PROG(trace_on_entry, struct xdp_buff *xdp) in BPF_PROG() argument 49 meta.ifindex = xdp->rxq->dev->ifindex; in BPF_PROG() 50 meta.pkt_len = bpf_xdp_get_buff_len((struct xdp_md *)xdp); in BPF_PROG() 51 bpf_xdp_output(xdp, &perf_buf_map, in BPF_PROG() 56 test_result_fentry = xdp->rxq->dev->ifindex; in BPF_PROG() 62 int BPF_PROG(trace_on_exit, struct xdp_buff *xdp, int ret) in BPF_PROG() argument
|
H A D | test_xdp_dynptr.c | 77 static __always_inline int handle_ipv4(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr) in handle_ipv4() argument 98 if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data) in handle_ipv4() 121 if (bpf_xdp_adjust_head(xdp, 0 - (int)iphdr_sz)) in handle_ipv4() 124 bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr); in handle_ipv4() 159 static __always_inline int handle_ipv6(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr) in handle_ipv6() argument 177 if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data) in handle_ipv6() 200 if (bpf_xdp_adjust_head(xdp, 0 - (int)ipv6hdr_sz)) in handle_ipv6() 203 bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr); in handle_ipv6() 231 int _xdp_tx_iptunnel(struct xdp_md *xdp) in _xdp_tx_iptunnel() argument 240 bpf_dynptr_from_xdp(xdp, 0, &ptr); in _xdp_tx_iptunnel() [all …]
|
H A D | test_parse_tcp_hdr_opt.c | 36 static int parse_hdr_opt(const struct xdp_md *xdp, struct hdr_opt_state *state) in parse_hdr_opt() argument 38 const void *data = (void *)(long)xdp->data; in parse_hdr_opt() 39 const void *data_end = (void *)(long)xdp->data_end; in parse_hdr_opt() 82 int xdp_ingress_v6(struct xdp_md *xdp) in xdp_ingress_v6() argument 84 const void *data = (void *)(long)xdp->data; in xdp_ingress_v6() 85 const void *data_end = (void *)(long)xdp->data_end; in xdp_ingress_v6() 106 err = parse_hdr_opt(xdp, &opt_state); in xdp_ingress_v6()
|
H A D | test_xdp_adjust_tail_shrink.c | 13 int _xdp_adjust_tail_shrink(struct xdp_md *xdp) in _xdp_adjust_tail_shrink() argument 15 __u8 *data_end = (void *)(long)xdp->data_end; in _xdp_adjust_tail_shrink() 16 __u8 *data = (void *)(long)xdp->data; in _xdp_adjust_tail_shrink() 19 switch (bpf_xdp_get_buff_len(xdp)) { in _xdp_adjust_tail_shrink() 47 if (bpf_xdp_adjust_tail(xdp, 0 - offset)) in _xdp_adjust_tail_shrink()
|
/linux/drivers/net/ethernet/broadcom/bnxt/ |
H A D | bnxt_xdp.c | 28 struct xdp_buff *xdp) in bnxt_xmit_bd() argument 38 if (xdp && xdp_buff_has_frags(xdp)) { in bnxt_xmit_bd() 39 sinfo = xdp_get_shared_info_from_buff(xdp); in bnxt_xmit_bd() 47 if (xdp) in bnxt_xmit_bd() 48 tx_buf->page = virt_to_head_page(xdp->data); in bnxt_xmit_bd() 97 struct xdp_buff *xdp) in __bnxt_xmit_xdp() argument 101 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp() 184 struct xdp_buff *xdp) in bnxt_xdp_buff_init() argument 199 xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); in bnxt_xdp_buff_init() 200 xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true); in bnxt_xdp_buff_init() [all …]
|
H A D | bnxt_xdp.h | 18 struct xdp_buff *xdp); 21 struct xdp_buff *xdp, struct page *page, u8 **data_ptr, 23 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); 31 struct xdp_buff *xdp); 33 struct xdp_buff *xdp); 36 struct xdp_buff *xdp,
|
/linux/samples/bpf/ |
H A D | xdp_adjust_tail_kern.c | 70 static __always_inline int send_icmp4_too_big(struct xdp_md *xdp) in send_icmp4_too_big() argument 74 if (bpf_xdp_adjust_head(xdp, 0 - headroom)) in send_icmp4_too_big() 76 void *data = (void *)(long)xdp->data; in send_icmp4_too_big() 77 void *data_end = (void *)(long)xdp->data_end; in send_icmp4_too_big() 120 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 122 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 123 void *data = (void *)(long)xdp->data; in handle_ipv4() 129 if (bpf_xdp_adjust_tail(xdp, 0 - offset)) in handle_ipv4() 131 return send_icmp4_too_big(xdp); in handle_ipv4() 137 int _xdp_icmp(struct xdp_md *xdp) in _xdp_icmp() argument [all …]
|
H A D | xdp_tx_iptunnel_kern.c | 77 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 79 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 80 void *data = (void *)(long)xdp->data; in handle_ipv4() 112 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 115 data = (void *)(long)xdp->data; in handle_ipv4() 116 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 152 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 154 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 155 void *data = (void *)(long)xdp->data; in handle_ipv6() 184 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
/linux/drivers/net/ethernet/microchip/lan966x/ |
H A D | lan966x_xdp.c | 9 static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp) in lan966x_xdp_setup() argument 18 NL_SET_ERR_MSG_MOD(xdp->extack, in lan966x_xdp_setup() 24 old_prog = xchg(&port->xdp_prog, xdp->prog); in lan966x_xdp_setup() 43 int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp) in lan966x_xdp() argument 45 switch (xdp->command) { in lan966x_xdp() 47 return lan966x_xdp_setup(dev, xdp); in lan966x_xdp() 79 struct xdp_buff xdp; in lan966x_xdp_run() local 82 xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order, in lan966x_xdp_run() 84 xdp_prepare_buff(&xdp, page_address(page), in lan966x_xdp_run() 87 act = bpf_prog_run_xdp(xdp_prog, &xdp); in lan966x_xdp_run() [all …]
|
/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_xsk.c | 437 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, in ice_fill_rx_descs() argument 444 buffs = xsk_buff_alloc_batch(pool, xdp, count); in ice_fill_rx_descs() 446 dma = xsk_buff_xdp_get_dma(*xdp); in ice_fill_rx_descs() 453 ice_xdp_meta_set_desc(*xdp, rx_desc); in ice_fill_rx_descs() 456 xdp++; in ice_fill_rx_descs() 481 struct xdp_buff **xdp; in __ice_alloc_rx_bufs_zc() local 484 xdp = ice_xdp_buf(rx_ring, ntu); in __ice_alloc_rx_bufs_zc() 487 nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, in __ice_alloc_rx_bufs_zc() 494 xdp = ice_xdp_buf(rx_ring, 0); in __ice_alloc_rx_bufs_zc() 500 nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count); in __ice_alloc_rx_bufs_zc() [all …]
|
H A D | ice_txrx.c | 385 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_ring() local 399 if (xdp->data) { in ice_clean_rx_ring() 400 xdp_return_buff(xdp); in ice_clean_rx_ring() 401 xdp->data = NULL; in ice_clean_rx_ring() 536 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_run_xdp() argument 546 ice_xdp_meta_set_desc(xdp, eop_desc); in ice_run_xdp() 548 act = bpf_prog_run_xdp(xdp_prog, xdp); in ice_run_xdp() 555 ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); in ice_run_xdp() 562 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) in ice_run_xdp() 577 ice_set_rx_bufs_act(xdp, rx_ring, ret); in ice_run_xdp() [all …]
|
/linux/drivers/net/vmxnet3/ |
H A D | vmxnet3_xdp.c | 251 vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp, in vmxnet3_run_xdp() argument 260 act = bpf_prog_run_xdp(prog, xdp); in vmxnet3_run_xdp() 261 page = virt_to_page(xdp->data_hard_start); in vmxnet3_run_xdp() 267 err = xdp_do_redirect(rq->adapter->netdev, xdp, prog); in vmxnet3_run_xdp() 276 xdpf = xdp_convert_buff_to_frame(xdp); in vmxnet3_run_xdp() 304 const struct xdp_buff *xdp) in vmxnet3_build_skb() argument 316 skb_reserve(skb, xdp->data - xdp->data_hard_start); in vmxnet3_build_skb() 317 skb_put(skb, xdp->data_end - xdp->data); in vmxnet3_build_skb() 331 struct xdp_buff xdp; in vmxnet3_process_xdp_small() local 341 xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq); in vmxnet3_process_xdp_small() [all …]
|
/linux/drivers/net/ethernet/netronome/nfp/ |
H A D | nfp_net_xsk.c | 18 struct xdp_buff *xdp) in nfp_net_xsk_rx_bufs_stash() argument 27 rx_ring->xsk_rxbufs[idx].xdp = xdp; in nfp_net_xsk_rx_bufs_stash() 29 xsk_buff_xdp_get_frame_dma(xdp) + headroom; in nfp_net_xsk_rx_bufs_stash() 35 rxbuf->xdp = NULL; in nfp_net_xsk_rx_unstash() 40 if (rxbuf->xdp) in nfp_net_xsk_rx_free() 41 xsk_buff_free(rxbuf->xdp); in nfp_net_xsk_rx_free() 62 struct xdp_buff *xdp; in nfp_net_xsk_rx_ring_fill_freelist() local 67 xdp = xsk_buff_alloc(pool); in nfp_net_xsk_rx_ring_fill_freelist() 68 if (!xdp) in nfp_net_xsk_rx_ring_fill_freelist() 71 nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp); in nfp_net_xsk_rx_ring_fill_freelist()
|
/linux/include/trace/events/ |
H A D | xdp.h | 3 #define TRACE_SYSTEM xdp 12 #include <net/xdp.h> 32 const struct bpf_prog *xdp, u32 act), 34 TP_ARGS(dev, xdp, act), 43 __entry->prog_id = xdp->aux->id; 93 const struct bpf_prog *xdp, 98 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index), 124 __entry->prog_id = xdp->aux->id; 143 const struct bpf_prog *xdp, 147 TP_ARGS(dev, xdp, tg 159 _trace_xdp_redirect(dev,xdp,to) global() argument 162 _trace_xdp_redirect_err(dev,xdp,to,err) global() argument 165 _trace_xdp_redirect_map(dev,xdp,to,map_type,map_id,index) global() argument 168 _trace_xdp_redirect_map_err(dev,xdp,to,map_type,map_id,index,err) global() argument [all...] |
/linux/drivers/net/hyperv/ |
H A D | netvsc_bpf.c | 25 struct xdp_buff *xdp) in netvsc_run_xdp() argument 35 xdp->data_hard_start = NULL; in netvsc_run_xdp() 56 xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq); in netvsc_run_xdp() 57 xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false); in netvsc_run_xdp() 59 memcpy(xdp->data, data, len); in netvsc_run_xdp() 61 act = bpf_prog_run_xdp(prog, xdp); in netvsc_run_xdp() 73 if (!xdp_do_redirect(ndev, xdp, prog)) { in netvsc_run_xdp() 107 xdp->data_hard_start = NULL; in netvsc_run_xdp() 167 struct netdev_bpf xdp; in netvsc_vf_setxdp() local 178 memset(&xdp, 0, sizeof(xdp)); in netvsc_vf_setxdp() [all …]
|
/linux/net/bpf/ |
H A D | test_run.c | 136 struct xdp_test_data *xdp = arg; in xdp_test_run_init_page() local 141 orig_ctx = xdp->orig_ctx; in xdp_test_run_init_page() 151 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); in xdp_test_run_init_page() 161 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) in xdp_test_run_setup() argument 168 .pool_size = xdp->batch_size, in xdp_test_run_setup() 171 .init_arg = xdp, in xdp_test_run_setup() 174 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); in xdp_test_run_setup() 175 if (!xdp->frames) in xdp_test_run_setup() 178 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); in xdp_test_run_setup() 179 if (!xdp->skbs) in xdp_test_run_setup() [all …]
|
/linux/tools/testing/selftests/bpf/ |
H A D | test_tcp_check_syncookie_user.c | 21 static int get_map_fd_by_prog_id(int prog_id, bool *xdp) in get_map_fd_by_prog_id() argument 48 *xdp = info.type == BPF_PROG_TYPE_XDP; in get_map_fd_by_prog_id() 59 static int run_test(int server_fd, int results_fd, bool xdp) in run_test() argument 110 if (xdp && value_gen == 0) { in run_test() 164 bool xdp; in main() local 174 results = get_map_fd_by_prog_id(atoi(argv[1]), &xdp); in main() 194 if (run_test(server, results, xdp)) in main() 197 if (run_test(server_v6, results, xdp)) in main() 200 if (run_test(server_dual, results, xdp)) in main()
|