1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2025 Intel Corporation */ 3 4 #define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP" 5 6 #include <linux/export.h> 7 8 #include <net/libeth/xdp.h> 9 10 #include "priv.h" 11 12 /* XDPSQ sharing */ 13 14 DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share); 15 EXPORT_SYMBOL_GPL(libeth_xdpsq_share); 16 17 void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock, 18 const struct net_device *dev) 19 { 20 bool warn; 21 22 spin_lock_init(&lock->lock); 23 lock->share = true; 24 25 warn = !static_key_enabled(&libeth_xdpsq_share); 26 static_branch_inc(&libeth_xdpsq_share); 27 28 if (warn && net_ratelimit()) 29 netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n"); 30 } 31 EXPORT_SYMBOL_GPL(__libeth_xdpsq_get); 32 33 void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock, 34 const struct net_device *dev) 35 { 36 static_branch_dec(&libeth_xdpsq_share); 37 38 if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit()) 39 netdev_notice(dev, "XDPSQ sharing disabled\n"); 40 41 lock->share = false; 42 } 43 EXPORT_SYMBOL_GPL(__libeth_xdpsq_put); 44 45 void __acquires(&lock->lock) 46 __libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock) 47 { 48 spin_lock(&lock->lock); 49 } 50 EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock); 51 52 void __releases(&lock->lock) 53 __libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock) 54 { 55 spin_unlock(&lock->lock); 56 } 57 EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock); 58 59 /* XDPSQ clean-up timers */ 60 61 /** 62 * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer 63 * @timer: timer to initialize 64 * @xdpsq: queue this timer belongs to 65 * @lock: corresponding XDPSQ lock 66 * @poll: queue polling/completion function 67 * 68 * XDPSQ clean-up timers must be set up before using at the queue configuration 69 * time. Set the required pointers and the cleaning callback. 70 */ 71 void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq, 72 struct libeth_xdpsq_lock *lock, 73 void (*poll)(struct work_struct *work)) 74 { 75 timer->xdpsq = xdpsq; 76 timer->lock = lock; 77 78 INIT_DELAYED_WORK(&timer->dwork, poll); 79 } 80 EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer); 81 82 /* ``XDP_TX`` bulking */ 83 84 static void __cold 85 libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm) 86 { 87 if (frm->len_fl & LIBETH_XDP_TX_MULTI) 88 libeth_xdp_return_frags(frm->data + frm->soff, true); 89 90 libeth_xdp_return_va(frm->data, true); 91 } 92 93 static void __cold 94 libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count) 95 { 96 for (u32 i = 0; i < count; i++) { 97 const struct libeth_xdp_tx_frame *frm = &bq[i]; 98 99 if (!(frm->len_fl & LIBETH_XDP_TX_FIRST)) 100 continue; 101 102 libeth_xdp_tx_return_one(frm); 103 } 104 } 105 106 static void __cold libeth_trace_xdp_exception(const struct net_device *dev, 107 const struct bpf_prog *prog, 108 u32 act) 109 { 110 trace_xdp_exception(dev, prog, act); 111 } 112 113 /** 114 * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames 115 * @bq: XDP Tx frame bulk 116 * @sent: number of frames sent successfully (from this bulk) 117 * @flags: internal libeth_xdp flags (.ndo_xdp_xmit etc.) 118 * 119 * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly. 120 * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust 121 * the Tx bulk to try again later. 122 */ 123 void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent, 124 u32 flags) 125 { 126 const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent]; 127 u32 left = bq->count - sent; 128 129 if (!(flags & LIBETH_XDP_TX_NDO)) 130 libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX); 131 132 if (!(flags & LIBETH_XDP_TX_DROP)) { 133 memmove(bq->bulk, pos, left * sizeof(*bq->bulk)); 134 bq->count = left; 135 136 return; 137 } 138 139 if (!(flags & LIBETH_XDP_TX_NDO)) 140 libeth_xdp_tx_return_bulk(pos, left); 141 else 142 libeth_xdp_xmit_return_bulk(pos, left, bq->dev); 143 144 bq->count = 0; 145 } 146 EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception); 147 148 /* .ndo_xdp_xmit() implementation */ 149 150 u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq, 151 u32 count, const struct net_device *dev) 152 { 153 u32 n = 0; 154 155 for (u32 i = 0; i < count; i++) { 156 const struct libeth_xdp_tx_frame *frm = &bq[i]; 157 dma_addr_t dma; 158 159 if (frm->flags & LIBETH_XDP_TX_FIRST) 160 dma = *libeth_xdp_xmit_frame_dma(frm->xdpf); 161 else 162 dma = dma_unmap_addr(frm, dma); 163 164 dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len), 165 DMA_TO_DEVICE); 166 167 /* Actual xdp_frames are freed by the core */ 168 n += !!(frm->flags & LIBETH_XDP_TX_FIRST); 169 } 170 171 return n; 172 } 173 EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk); 174 175 /* Rx polling path */ 176 177 /** 178 * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash 179 * @dst: target &libeth_xdp_buff to initialize 180 * @src: source stash 181 * 182 * External helper used by libeth_xdp_init_buff(), do not call directly. 183 * Recreate an onstack &libeth_xdp_buff using the stash saved earlier. 184 * The only field untouched (rxq) is initialized later in the 185 * abovementioned function. 186 */ 187 void libeth_xdp_load_stash(struct libeth_xdp_buff *dst, 188 const struct libeth_xdp_buff_stash *src) 189 { 190 dst->data = src->data; 191 dst->base.data_end = src->data + src->len; 192 dst->base.data_meta = src->data; 193 dst->base.data_hard_start = src->data - src->headroom; 194 195 dst->base.frame_sz = src->frame_sz; 196 dst->base.flags = src->flags; 197 } 198 EXPORT_SYMBOL_GPL(libeth_xdp_load_stash); 199 200 /** 201 * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash 202 * @dst: target &libeth_xdp_buff_stash to initialize 203 * @src: source XDP buffer 204 * 205 * External helper used by libeth_xdp_save_buff(), do not call directly. 206 * Use the fields from the passed XDP buffer to initialize the stash on the 207 * queue, so that a partially received frame can be finished later during 208 * the next NAPI poll. 209 */ 210 void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst, 211 const struct libeth_xdp_buff *src) 212 { 213 dst->data = src->data; 214 dst->headroom = src->data - src->base.data_hard_start; 215 dst->len = src->base.data_end - src->data; 216 217 dst->frame_sz = src->base.frame_sz; 218 dst->flags = src->base.flags; 219 220 WARN_ON_ONCE(dst->flags != src->base.flags); 221 } 222 EXPORT_SYMBOL_GPL(libeth_xdp_save_stash); 223 224 void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash) 225 { 226 LIBETH_XDP_ONSTACK_BUFF(xdp); 227 228 libeth_xdp_load_stash(xdp, stash); 229 libeth_xdp_return_buff_slow(xdp); 230 231 stash->data = NULL; 232 } 233 EXPORT_SYMBOL_GPL(__libeth_xdp_return_stash); 234 235 /** 236 * libeth_xdp_return_buff_slow - free &libeth_xdp_buff 237 * @xdp: buffer to free/return 238 * 239 * Slowpath version of libeth_xdp_return_buff() to be called on exceptions, 240 * queue clean-ups etc., without unwanted inlining. 241 */ 242 void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp) 243 { 244 __libeth_xdp_return_buff(xdp, false); 245 } 246 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow); 247 248 /** 249 * libeth_xdp_buff_add_frag - add frag to XDP buffer 250 * @xdp: head XDP buffer 251 * @fqe: Rx buffer containing the frag 252 * @len: frag length reported by HW 253 * 254 * External helper used by libeth_xdp_process_buff(), do not call directly. 255 * Frees both head and frag buffers on error. 256 * 257 * Return: true success, false on error (no space for a new frag). 258 */ 259 bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp, 260 const struct libeth_fqe *fqe, 261 u32 len) 262 { 263 netmem_ref netmem = fqe->netmem; 264 265 if (!xdp_buff_add_frag(&xdp->base, netmem, 266 fqe->offset + netmem_get_pp(netmem)->p.offset, 267 len, fqe->truesize)) 268 goto recycle; 269 270 return true; 271 272 recycle: 273 libeth_rx_recycle_slow(netmem); 274 libeth_xdp_return_buff_slow(xdp); 275 276 return false; 277 } 278 EXPORT_SYMBOL_GPL(libeth_xdp_buff_add_frag); 279 280 /* Tx buffer completion */ 281 282 static void libeth_xdp_put_netmem_bulk(netmem_ref netmem, 283 struct xdp_frame_bulk *bq) 284 { 285 if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE)) 286 xdp_flush_frame_bulk(bq); 287 288 bq->q[bq->count++] = netmem; 289 } 290 291 /** 292 * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk 293 * @sinfo: shared info corresponding to the buffer 294 * @bq: XDP frame bulk to store the buffer 295 * @frags: whether the buffer has frags 296 * 297 * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx 298 * completion of ``XDP_TX`` buffers and allows to free them in same bulks 299 * with &xdp_frame buffers. 300 */ 301 void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo, 302 struct xdp_frame_bulk *bq, bool frags) 303 { 304 if (!frags) 305 goto head; 306 307 for (u32 i = 0; i < sinfo->nr_frags; i++) 308 libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]), 309 bq); 310 311 head: 312 libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq); 313 } 314 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk); 315 316 /* Module */ 317 318 static const struct libeth_xdp_ops xdp_ops __initconst = { 319 .bulk = libeth_xdp_return_buff_bulk, 320 }; 321 322 static int __init libeth_xdp_module_init(void) 323 { 324 libeth_attach_xdp(&xdp_ops); 325 326 return 0; 327 } 328 module_init(libeth_xdp_module_init); 329 330 static void __exit libeth_xdp_module_exit(void) 331 { 332 libeth_detach_xdp(); 333 } 334 module_exit(libeth_xdp_module_exit); 335 336 MODULE_DESCRIPTION("Common Ethernet library - XDP infra"); 337 MODULE_IMPORT_NS("LIBETH"); 338 MODULE_LICENSE("GPL"); 339