1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* include/net/xdp.h 3 * 4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. 5 */ 6 #ifndef __LINUX_NET_XDP_H__ 7 #define __LINUX_NET_XDP_H__ 8 9 #include <linux/skbuff.h> /* skb_shared_info */ 10 11 /** 12 * DOC: XDP RX-queue information 13 * 14 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver 15 * level RX-ring queues. It is information that is specific to how 16 * the driver have configured a given RX-ring queue. 17 * 18 * Each xdp_buff frame received in the driver carry a (pointer) 19 * reference to this xdp_rxq_info structure. This provides the XDP 20 * data-path read-access to RX-info for both kernel and bpf-side 21 * (limited subset). 22 * 23 * For now, direct access is only safe while running in NAPI/softirq 24 * context. Contents is read-mostly and must not be updated during 25 * driver NAPI/softirq poll. 26 * 27 * The driver usage API is a register and unregister API. 28 * 29 * The struct is not directly tied to the XDP prog. A new XDP prog 30 * can be attached as long as it doesn't change the underlying 31 * RX-ring. If the RX-ring does change significantly, the NIC driver 32 * naturally need to stop the RX-ring before purging and reallocating 33 * memory. In that process the driver MUST call unregistor (which 34 * also apply for driver shutdown and unload). The register API is 35 * also mandatory during RX-ring setup. 36 */ 37 38 enum xdp_mem_type { 39 MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ 40 MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ 41 MEM_TYPE_PAGE_POOL, 42 MEM_TYPE_XSK_BUFF_POOL, 43 MEM_TYPE_MAX, 44 }; 45 46 /* XDP flags for ndo_xdp_xmit */ 47 #define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */ 48 #define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH 49 50 struct xdp_mem_info { 51 u32 type; /* enum xdp_mem_type, but known size type */ 52 u32 id; 53 }; 54 55 struct page_pool; 56 57 struct xdp_rxq_info { 58 struct net_device *dev; 59 u32 queue_index; 60 u32 reg_state; 61 struct xdp_mem_info mem; 62 } ____cacheline_aligned; /* perf critical, avoid false-sharing */ 63 64 struct xdp_buff { 65 void *data; 66 void *data_end; 67 void *data_meta; 68 void *data_hard_start; 69 struct xdp_rxq_info *rxq; 70 u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ 71 }; 72 73 /* Reserve memory area at end-of data area. 74 * 75 * This macro reserves tailroom in the XDP buffer by limiting the 76 * XDP/BPF data access to data_hard_end. Notice same area (and size) 77 * is used for XDP_PASS, when constructing the SKB via build_skb(). 78 */ 79 #define xdp_data_hard_end(xdp) \ 80 ((xdp)->data_hard_start + (xdp)->frame_sz - \ 81 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 82 83 struct xdp_frame { 84 void *data; 85 u16 len; 86 u16 headroom; 87 u32 metasize:8; 88 u32 frame_sz:24; 89 /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time, 90 * while mem info is valid on remote CPU. 91 */ 92 struct xdp_mem_info mem; 93 struct net_device *dev_rx; /* used by cpumap */ 94 }; 95 96 /* Clear kernel pointers in xdp_frame */ 97 static inline void xdp_scrub_frame(struct xdp_frame *frame) 98 { 99 frame->data = NULL; 100 frame->dev_rx = NULL; 101 } 102 103 /* Avoids inlining WARN macro in fast-path */ 104 void xdp_warn(const char *msg, const char *func, const int line); 105 #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__) 106 107 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); 108 109 /* Convert xdp_buff to xdp_frame */ 110 static inline 111 struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) 112 { 113 struct xdp_frame *xdp_frame; 114 int metasize; 115 int headroom; 116 117 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) 118 return xdp_convert_zc_to_xdp_frame(xdp); 119 120 /* Assure headroom is available for storing info */ 121 headroom = xdp->data - xdp->data_hard_start; 122 metasize = xdp->data - xdp->data_meta; 123 metasize = metasize > 0 ? metasize : 0; 124 if (unlikely((headroom - metasize) < sizeof(*xdp_frame))) 125 return NULL; 126 127 /* Catch if driver didn't reserve tailroom for skb_shared_info */ 128 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { 129 XDP_WARN("Driver BUG: missing reserved tailroom"); 130 return NULL; 131 } 132 133 /* Store info in top of packet */ 134 xdp_frame = xdp->data_hard_start; 135 136 xdp_frame->data = xdp->data; 137 xdp_frame->len = xdp->data_end - xdp->data; 138 xdp_frame->headroom = headroom - sizeof(*xdp_frame); 139 xdp_frame->metasize = metasize; 140 xdp_frame->frame_sz = xdp->frame_sz; 141 142 /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */ 143 xdp_frame->mem = xdp->rxq->mem; 144 145 return xdp_frame; 146 } 147 148 void xdp_return_frame(struct xdp_frame *xdpf); 149 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); 150 void xdp_return_buff(struct xdp_buff *xdp); 151 152 /* When sending xdp_frame into the network stack, then there is no 153 * return point callback, which is needed to release e.g. DMA-mapping 154 * resources with page_pool. Thus, have explicit function to release 155 * frame resources. 156 */ 157 void __xdp_release_frame(void *data, struct xdp_mem_info *mem); 158 static inline void xdp_release_frame(struct xdp_frame *xdpf) 159 { 160 struct xdp_mem_info *mem = &xdpf->mem; 161 162 /* Curr only page_pool needs this */ 163 if (mem->type == MEM_TYPE_PAGE_POOL) 164 __xdp_release_frame(xdpf->data, mem); 165 } 166 167 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, 168 struct net_device *dev, u32 queue_index); 169 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); 170 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); 171 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); 172 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, 173 enum xdp_mem_type type, void *allocator); 174 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq); 175 176 /* Drivers not supporting XDP metadata can use this helper, which 177 * rejects any room expansion for metadata as a result. 178 */ 179 static __always_inline void 180 xdp_set_data_meta_invalid(struct xdp_buff *xdp) 181 { 182 xdp->data_meta = xdp->data + 1; 183 } 184 185 static __always_inline bool 186 xdp_data_meta_unsupported(const struct xdp_buff *xdp) 187 { 188 return unlikely(xdp->data_meta > xdp->data); 189 } 190 191 struct xdp_attachment_info { 192 struct bpf_prog *prog; 193 u32 flags; 194 }; 195 196 struct netdev_bpf; 197 int xdp_attachment_query(struct xdp_attachment_info *info, 198 struct netdev_bpf *bpf); 199 bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, 200 struct netdev_bpf *bpf); 201 void xdp_attachment_setup(struct xdp_attachment_info *info, 202 struct netdev_bpf *bpf); 203 204 #define DEV_MAP_BULK_SIZE 16 205 206 #endif /* __LINUX_NET_XDP_H__ */ 207