1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3
4 #define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
5
6 #include <linux/export.h>
7
8 #include <net/libeth/xdp.h>
9
10 #include "priv.h"
11
12 /* XDPSQ sharing */
13
14 DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share);
15 EXPORT_SYMBOL_GPL(libeth_xdpsq_share);
16
__libeth_xdpsq_get(struct libeth_xdpsq_lock * lock,const struct net_device * dev)17 void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
18 const struct net_device *dev)
19 {
20 bool warn;
21
22 spin_lock_init(&lock->lock);
23 lock->share = true;
24
25 warn = !static_key_enabled(&libeth_xdpsq_share);
26 static_branch_inc(&libeth_xdpsq_share);
27
28 if (warn && net_ratelimit())
29 netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n");
30 }
31 EXPORT_SYMBOL_GPL(__libeth_xdpsq_get);
32
__libeth_xdpsq_put(struct libeth_xdpsq_lock * lock,const struct net_device * dev)33 void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
34 const struct net_device *dev)
35 {
36 static_branch_dec(&libeth_xdpsq_share);
37
38 if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit())
39 netdev_notice(dev, "XDPSQ sharing disabled\n");
40
41 lock->share = false;
42 }
43 EXPORT_SYMBOL_GPL(__libeth_xdpsq_put);
44
45 void __acquires(&lock->lock)
__libeth_xdpsq_lock(struct libeth_xdpsq_lock * lock)46 __libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
47 {
48 spin_lock(&lock->lock);
49 }
50 EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock);
51
52 void __releases(&lock->lock)
__libeth_xdpsq_unlock(struct libeth_xdpsq_lock * lock)53 __libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
54 {
55 spin_unlock(&lock->lock);
56 }
57 EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock);
58
59 /* XDPSQ clean-up timers */
60
61 /**
62 * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer
63 * @timer: timer to initialize
64 * @xdpsq: queue this timer belongs to
65 * @lock: corresponding XDPSQ lock
66 * @poll: queue polling/completion function
67 *
68 * XDPSQ clean-up timers must be set up before using at the queue configuration
69 * time. Set the required pointers and the cleaning callback.
70 */
libeth_xdpsq_init_timer(struct libeth_xdpsq_timer * timer,void * xdpsq,struct libeth_xdpsq_lock * lock,void (* poll)(struct work_struct * work))71 void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
72 struct libeth_xdpsq_lock *lock,
73 void (*poll)(struct work_struct *work))
74 {
75 timer->xdpsq = xdpsq;
76 timer->lock = lock;
77
78 INIT_DELAYED_WORK(&timer->dwork, poll);
79 }
80 EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer);
81
82 /* ``XDP_TX`` bulking */
83
84 static void __cold
libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame * frm)85 libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm)
86 {
87 if (frm->len_fl & LIBETH_XDP_TX_MULTI)
88 libeth_xdp_return_frags(frm->data + frm->soff, true);
89
90 libeth_xdp_return_va(frm->data, true);
91 }
92
93 static void __cold
libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame * bq,u32 count)94 libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count)
95 {
96 for (u32 i = 0; i < count; i++) {
97 const struct libeth_xdp_tx_frame *frm = &bq[i];
98
99 if (!(frm->len_fl & LIBETH_XDP_TX_FIRST))
100 continue;
101
102 libeth_xdp_tx_return_one(frm);
103 }
104 }
105
libeth_trace_xdp_exception(const struct net_device * dev,const struct bpf_prog * prog,u32 act)106 static void __cold libeth_trace_xdp_exception(const struct net_device *dev,
107 const struct bpf_prog *prog,
108 u32 act)
109 {
110 trace_xdp_exception(dev, prog, act);
111 }
112
113 /**
114 * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames
115 * @bq: XDP Tx frame bulk
116 * @sent: number of frames sent successfully (from this bulk)
117 * @flags: internal libeth_xdp flags (XSk, .ndo_xdp_xmit etc.)
118 *
119 * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly.
120 * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust
121 * the Tx bulk to try again later.
122 */
libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk * bq,u32 sent,u32 flags)123 void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
124 u32 flags)
125 {
126 const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent];
127 u32 left = bq->count - sent;
128
129 if (!(flags & LIBETH_XDP_TX_NDO))
130 libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX);
131
132 if (!(flags & LIBETH_XDP_TX_DROP)) {
133 memmove(bq->bulk, pos, left * sizeof(*bq->bulk));
134 bq->count = left;
135
136 return;
137 }
138
139 if (flags & LIBETH_XDP_TX_XSK)
140 libeth_xsk_tx_return_bulk(pos, left);
141 else if (!(flags & LIBETH_XDP_TX_NDO))
142 libeth_xdp_tx_return_bulk(pos, left);
143 else
144 libeth_xdp_xmit_return_bulk(pos, left, bq->dev);
145
146 bq->count = 0;
147 }
148 EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception);
149
150 /* .ndo_xdp_xmit() implementation */
151
libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame * bq,u32 count,const struct net_device * dev)152 u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
153 u32 count, const struct net_device *dev)
154 {
155 u32 n = 0;
156
157 for (u32 i = 0; i < count; i++) {
158 const struct libeth_xdp_tx_frame *frm = &bq[i];
159 dma_addr_t dma;
160
161 if (frm->flags & LIBETH_XDP_TX_FIRST)
162 dma = *libeth_xdp_xmit_frame_dma(frm->xdpf);
163 else
164 dma = dma_unmap_addr(frm, dma);
165
166 dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len),
167 DMA_TO_DEVICE);
168
169 /* Actual xdp_frames are freed by the core */
170 n += !!(frm->flags & LIBETH_XDP_TX_FIRST);
171 }
172
173 return n;
174 }
175 EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk);
176
177 /* Rx polling path */
178
179 /**
180 * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash
181 * @dst: target &libeth_xdp_buff to initialize
182 * @src: source stash
183 *
184 * External helper used by libeth_xdp_init_buff(), do not call directly.
185 * Recreate an onstack &libeth_xdp_buff using the stash saved earlier.
186 * The only field untouched (rxq) is initialized later in the
187 * abovementioned function.
188 */
libeth_xdp_load_stash(struct libeth_xdp_buff * dst,const struct libeth_xdp_buff_stash * src)189 void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
190 const struct libeth_xdp_buff_stash *src)
191 {
192 dst->data = src->data;
193 dst->base.data_end = src->data + src->len;
194 dst->base.data_meta = src->data;
195 dst->base.data_hard_start = src->data - src->headroom;
196
197 dst->base.frame_sz = src->frame_sz;
198 dst->base.flags = src->flags;
199 }
200 EXPORT_SYMBOL_GPL(libeth_xdp_load_stash);
201
202 /**
203 * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash
204 * @dst: target &libeth_xdp_buff_stash to initialize
205 * @src: source XDP buffer
206 *
207 * External helper used by libeth_xdp_save_buff(), do not call directly.
208 * Use the fields from the passed XDP buffer to initialize the stash on the
209 * queue, so that a partially received frame can be finished later during
210 * the next NAPI poll.
211 */
libeth_xdp_save_stash(struct libeth_xdp_buff_stash * dst,const struct libeth_xdp_buff * src)212 void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
213 const struct libeth_xdp_buff *src)
214 {
215 dst->data = src->data;
216 dst->headroom = src->data - src->base.data_hard_start;
217 dst->len = src->base.data_end - src->data;
218
219 dst->frame_sz = src->base.frame_sz;
220 dst->flags = src->base.flags;
221
222 WARN_ON_ONCE(dst->flags != src->base.flags);
223 }
224 EXPORT_SYMBOL_GPL(libeth_xdp_save_stash);
225
__libeth_xdp_return_stash(struct libeth_xdp_buff_stash * stash)226 void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
227 {
228 LIBETH_XDP_ONSTACK_BUFF(xdp);
229
230 libeth_xdp_load_stash(xdp, stash);
231 libeth_xdp_return_buff_slow(xdp);
232
233 stash->data = NULL;
234 }
235 EXPORT_SYMBOL_GPL(__libeth_xdp_return_stash);
236
237 /**
238 * libeth_xdp_return_buff_slow - free &libeth_xdp_buff
239 * @xdp: buffer to free/return
240 *
241 * Slowpath version of libeth_xdp_return_buff() to be called on exceptions,
242 * queue clean-ups etc., without unwanted inlining.
243 */
libeth_xdp_return_buff_slow(struct libeth_xdp_buff * xdp)244 void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp)
245 {
246 __libeth_xdp_return_buff(xdp, false);
247 }
248 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow);
249
250 /**
251 * libeth_xdp_buff_add_frag - add frag to XDP buffer
252 * @xdp: head XDP buffer
253 * @fqe: Rx buffer containing the frag
254 * @len: frag length reported by HW
255 *
256 * External helper used by libeth_xdp_process_buff(), do not call directly.
257 * Frees both head and frag buffers on error.
258 *
259 * Return: true success, false on error (no space for a new frag).
260 */
libeth_xdp_buff_add_frag(struct libeth_xdp_buff * xdp,const struct libeth_fqe * fqe,u32 len)261 bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
262 const struct libeth_fqe *fqe,
263 u32 len)
264 {
265 netmem_ref netmem = fqe->netmem;
266
267 if (!xdp_buff_add_frag(&xdp->base, netmem,
268 fqe->offset + netmem_get_pp(netmem)->p.offset,
269 len, fqe->truesize))
270 goto recycle;
271
272 return true;
273
274 recycle:
275 libeth_rx_recycle_slow(netmem);
276 libeth_xdp_return_buff_slow(xdp);
277
278 return false;
279 }
280 EXPORT_SYMBOL_GPL(libeth_xdp_buff_add_frag);
281
282 /**
283 * libeth_xdp_prog_exception - handle XDP prog exceptions
284 * @bq: XDP Tx bulk
285 * @xdp: buffer to process
286 * @act: original XDP prog verdict
287 * @ret: error code if redirect failed
288 *
289 * External helper used by __libeth_xdp_run_prog() and
290 * __libeth_xsk_run_prog_slow(), do not call directly.
291 * Reports invalid @act, XDP exception trace event and frees the buffer.
292 *
293 * Return: libeth_xdp XDP prog verdict.
294 */
libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk * bq,struct libeth_xdp_buff * xdp,enum xdp_action act,int ret)295 u32 __cold libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
296 struct libeth_xdp_buff *xdp,
297 enum xdp_action act, int ret)
298 {
299 if (act > XDP_REDIRECT)
300 bpf_warn_invalid_xdp_action(bq->dev, bq->prog, act);
301
302 libeth_trace_xdp_exception(bq->dev, bq->prog, act);
303
304 if (xdp->base.rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
305 return libeth_xsk_prog_exception(xdp, act, ret);
306
307 libeth_xdp_return_buff_slow(xdp);
308
309 return LIBETH_XDP_DROP;
310 }
311 EXPORT_SYMBOL_GPL(libeth_xdp_prog_exception);
312
313 /* Tx buffer completion */
314
libeth_xdp_put_netmem_bulk(netmem_ref netmem,struct xdp_frame_bulk * bq)315 static void libeth_xdp_put_netmem_bulk(netmem_ref netmem,
316 struct xdp_frame_bulk *bq)
317 {
318 if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE))
319 xdp_flush_frame_bulk(bq);
320
321 bq->q[bq->count++] = netmem;
322 }
323
324 /**
325 * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
326 * @sinfo: shared info corresponding to the buffer
327 * @bq: XDP frame bulk to store the buffer
328 * @frags: whether the buffer has frags
329 *
330 * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx
331 * completion of ``XDP_TX`` buffers and allows to free them in same bulks
332 * with &xdp_frame buffers.
333 */
libeth_xdp_return_buff_bulk(const struct skb_shared_info * sinfo,struct xdp_frame_bulk * bq,bool frags)334 void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
335 struct xdp_frame_bulk *bq, bool frags)
336 {
337 if (!frags)
338 goto head;
339
340 for (u32 i = 0; i < sinfo->nr_frags; i++)
341 libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]),
342 bq);
343
344 head:
345 libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq);
346 }
347 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk);
348
349 /* Misc */
350
351 /**
352 * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold
353 * @count: number of descriptors in the queue
354 *
355 * The threshold is the limit at which RQs start to refill (when the number of
356 * empty buffers exceeds it) and SQs get cleaned up (when the number of free
357 * descriptors goes below it). To speed up hotpath processing, threshold is
358 * always pow-2, closest to 1/4 of the queue length.
359 * Don't call it on hotpath, calculate and cache the threshold during the
360 * queue initialization.
361 *
362 * Return: the calculated threshold.
363 */
libeth_xdp_queue_threshold(u32 count)364 u32 libeth_xdp_queue_threshold(u32 count)
365 {
366 u32 quarter, low, high;
367
368 if (likely(is_power_of_2(count)))
369 return count >> 2;
370
371 quarter = DIV_ROUND_CLOSEST(count, 4);
372 low = rounddown_pow_of_two(quarter);
373 high = roundup_pow_of_two(quarter);
374
375 return high - quarter <= quarter - low ? high : low;
376 }
377 EXPORT_SYMBOL_GPL(libeth_xdp_queue_threshold);
378
379 /**
380 * __libeth_xdp_set_features - set XDP features for netdev
381 * @dev: &net_device to configure
382 * @xmo: XDP metadata ops (Rx hints)
383 * @zc_segs: maximum number of S/G frags the HW can transmit
384 * @tmo: XSk Tx metadata ops (Tx hints)
385 *
386 * Set all the features libeth_xdp supports. Only the first argument is
387 * necessary; without the third one (zero), XSk support won't be advertised.
388 * Use the non-underscored versions in drivers instead.
389 */
__libeth_xdp_set_features(struct net_device * dev,const struct xdp_metadata_ops * xmo,u32 zc_segs,const struct xsk_tx_metadata_ops * tmo)390 void __libeth_xdp_set_features(struct net_device *dev,
391 const struct xdp_metadata_ops *xmo,
392 u32 zc_segs,
393 const struct xsk_tx_metadata_ops *tmo)
394 {
395 xdp_set_features_flag(dev,
396 NETDEV_XDP_ACT_BASIC |
397 NETDEV_XDP_ACT_REDIRECT |
398 NETDEV_XDP_ACT_NDO_XMIT |
399 (zc_segs ? NETDEV_XDP_ACT_XSK_ZEROCOPY : 0) |
400 NETDEV_XDP_ACT_RX_SG |
401 NETDEV_XDP_ACT_NDO_XMIT_SG);
402 dev->xdp_metadata_ops = xmo;
403
404 tmo = tmo == libeth_xsktmo ? &libeth_xsktmo_slow : tmo;
405
406 dev->xdp_zc_max_segs = zc_segs ? : 1;
407 dev->xsk_tx_metadata_ops = zc_segs ? tmo : NULL;
408 }
409 EXPORT_SYMBOL_GPL(__libeth_xdp_set_features);
410
411 /**
412 * libeth_xdp_set_redirect - toggle the XDP redirect feature
413 * @dev: &net_device to configure
414 * @enable: whether XDP is enabled
415 *
416 * Use this when XDPSQs are not always available to dynamically enable
417 * and disable redirect feature.
418 */
libeth_xdp_set_redirect(struct net_device * dev,bool enable)419 void libeth_xdp_set_redirect(struct net_device *dev, bool enable)
420 {
421 if (enable)
422 xdp_features_set_redirect_target(dev, true);
423 else
424 xdp_features_clear_redirect_target(dev);
425 }
426 EXPORT_SYMBOL_GPL(libeth_xdp_set_redirect);
427
428 /* Module */
429
430 static const struct libeth_xdp_ops xdp_ops __initconst = {
431 .bulk = libeth_xdp_return_buff_bulk,
432 .xsk = libeth_xsk_buff_free_slow,
433 };
434
libeth_xdp_module_init(void)435 static int __init libeth_xdp_module_init(void)
436 {
437 libeth_attach_xdp(&xdp_ops);
438
439 return 0;
440 }
441 module_init(libeth_xdp_module_init);
442
libeth_xdp_module_exit(void)443 static void __exit libeth_xdp_module_exit(void)
444 {
445 libeth_detach_xdp();
446 }
447 module_exit(libeth_xdp_module_exit);
448
449 MODULE_DESCRIPTION("Common Ethernet library - XDP infra");
450 MODULE_IMPORT_NS("LIBETH");
451 MODULE_LICENSE("GPL");
452