xref: /linux/drivers/net/ethernet/intel/libeth/xdp.c (revision b3ad8450b4dc46c4ab0641f665068fd2a4d1adba)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #define DEFAULT_SYMBOL_NAMESPACE	"LIBETH_XDP"
5 
6 #include <linux/export.h>
7 
8 #include <net/libeth/xdp.h>
9 
10 #include "priv.h"
11 
12 /* XDPSQ sharing */
13 
14 DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share);
15 EXPORT_SYMBOL_GPL(libeth_xdpsq_share);
16 
17 void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
18 			const struct net_device *dev)
19 {
20 	bool warn;
21 
22 	spin_lock_init(&lock->lock);
23 	lock->share = true;
24 
25 	warn = !static_key_enabled(&libeth_xdpsq_share);
26 	static_branch_inc(&libeth_xdpsq_share);
27 
28 	if (warn && net_ratelimit())
29 		netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n");
30 }
31 EXPORT_SYMBOL_GPL(__libeth_xdpsq_get);
32 
33 void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
34 			const struct net_device *dev)
35 {
36 	static_branch_dec(&libeth_xdpsq_share);
37 
38 	if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit())
39 		netdev_notice(dev, "XDPSQ sharing disabled\n");
40 
41 	lock->share = false;
42 }
43 EXPORT_SYMBOL_GPL(__libeth_xdpsq_put);
44 
45 void __acquires(&lock->lock)
46 __libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
47 {
48 	spin_lock(&lock->lock);
49 }
50 EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock);
51 
52 void __releases(&lock->lock)
53 __libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
54 {
55 	spin_unlock(&lock->lock);
56 }
57 EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock);
58 
59 /* XDPSQ clean-up timers */
60 
61 /**
62  * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer
63  * @timer: timer to initialize
64  * @xdpsq: queue this timer belongs to
65  * @lock: corresponding XDPSQ lock
66  * @poll: queue polling/completion function
67  *
68  * XDPSQ clean-up timers must be set up before using at the queue configuration
69  * time. Set the required pointers and the cleaning callback.
70  */
71 void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
72 			     struct libeth_xdpsq_lock *lock,
73 			     void (*poll)(struct work_struct *work))
74 {
75 	timer->xdpsq = xdpsq;
76 	timer->lock = lock;
77 
78 	INIT_DELAYED_WORK(&timer->dwork, poll);
79 }
80 EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer);
81 
82 /* ``XDP_TX`` bulking */
83 
84 static void __cold
85 libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm)
86 {
87 	if (frm->len_fl & LIBETH_XDP_TX_MULTI)
88 		libeth_xdp_return_frags(frm->data + frm->soff, true);
89 
90 	libeth_xdp_return_va(frm->data, true);
91 }
92 
93 static void __cold
94 libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count)
95 {
96 	for (u32 i = 0; i < count; i++) {
97 		const struct libeth_xdp_tx_frame *frm = &bq[i];
98 
99 		if (!(frm->len_fl & LIBETH_XDP_TX_FIRST))
100 			continue;
101 
102 		libeth_xdp_tx_return_one(frm);
103 	}
104 }
105 
106 static void __cold libeth_trace_xdp_exception(const struct net_device *dev,
107 					      const struct bpf_prog *prog,
108 					      u32 act)
109 {
110 	trace_xdp_exception(dev, prog, act);
111 }
112 
113 /**
114  * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames
115  * @bq: XDP Tx frame bulk
116  * @sent: number of frames sent successfully (from this bulk)
117  * @flags: internal libeth_xdp flags (XSk, .ndo_xdp_xmit etc.)
118  *
119  * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly.
120  * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust
121  * the Tx bulk to try again later.
122  */
123 void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
124 				    u32 flags)
125 {
126 	const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent];
127 	u32 left = bq->count - sent;
128 
129 	if (!(flags & LIBETH_XDP_TX_NDO))
130 		libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX);
131 
132 	if (!(flags & LIBETH_XDP_TX_DROP)) {
133 		memmove(bq->bulk, pos, left * sizeof(*bq->bulk));
134 		bq->count = left;
135 
136 		return;
137 	}
138 
139 	if (flags & LIBETH_XDP_TX_XSK)
140 		libeth_xsk_tx_return_bulk(pos, left);
141 	else if (!(flags & LIBETH_XDP_TX_NDO))
142 		libeth_xdp_tx_return_bulk(pos, left);
143 	else
144 		libeth_xdp_xmit_return_bulk(pos, left, bq->dev);
145 
146 	bq->count = 0;
147 }
148 EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception);
149 
150 /* .ndo_xdp_xmit() implementation */
151 
152 u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
153 				       u32 count, const struct net_device *dev)
154 {
155 	u32 n = 0;
156 
157 	for (u32 i = 0; i < count; i++) {
158 		const struct libeth_xdp_tx_frame *frm = &bq[i];
159 		dma_addr_t dma;
160 
161 		if (frm->flags & LIBETH_XDP_TX_FIRST)
162 			dma = *libeth_xdp_xmit_frame_dma(frm->xdpf);
163 		else
164 			dma = dma_unmap_addr(frm, dma);
165 
166 		dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len),
167 			       DMA_TO_DEVICE);
168 
169 		/* Actual xdp_frames are freed by the core */
170 		n += !!(frm->flags & LIBETH_XDP_TX_FIRST);
171 	}
172 
173 	return n;
174 }
175 EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk);
176 
177 /* Rx polling path */
178 
179 /**
180  * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash
181  * @dst: target &libeth_xdp_buff to initialize
182  * @src: source stash
183  *
184  * External helper used by libeth_xdp_init_buff(), do not call directly.
185  * Recreate an onstack &libeth_xdp_buff using the stash saved earlier.
186  * The only field untouched (rxq) is initialized later in the
187  * abovementioned function.
188  */
189 void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
190 			   const struct libeth_xdp_buff_stash *src)
191 {
192 	dst->data = src->data;
193 	dst->base.data_end = src->data + src->len;
194 	dst->base.data_meta = src->data;
195 	dst->base.data_hard_start = src->data - src->headroom;
196 
197 	dst->base.frame_sz = src->frame_sz;
198 	dst->base.flags = src->flags;
199 }
200 EXPORT_SYMBOL_GPL(libeth_xdp_load_stash);
201 
202 /**
203  * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash
204  * @dst: target &libeth_xdp_buff_stash to initialize
205  * @src: source XDP buffer
206  *
207  * External helper used by libeth_xdp_save_buff(), do not call directly.
208  * Use the fields from the passed XDP buffer to initialize the stash on the
209  * queue, so that a partially received frame can be finished later during
210  * the next NAPI poll.
211  */
212 void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
213 			   const struct libeth_xdp_buff *src)
214 {
215 	dst->data = src->data;
216 	dst->headroom = src->data - src->base.data_hard_start;
217 	dst->len = src->base.data_end - src->data;
218 
219 	dst->frame_sz = src->base.frame_sz;
220 	dst->flags = src->base.flags;
221 
222 	WARN_ON_ONCE(dst->flags != src->base.flags);
223 }
224 EXPORT_SYMBOL_GPL(libeth_xdp_save_stash);
225 
226 void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
227 {
228 	LIBETH_XDP_ONSTACK_BUFF(xdp);
229 
230 	libeth_xdp_load_stash(xdp, stash);
231 	libeth_xdp_return_buff_slow(xdp);
232 
233 	stash->data = NULL;
234 }
235 EXPORT_SYMBOL_GPL(__libeth_xdp_return_stash);
236 
237 /**
238  * libeth_xdp_return_buff_slow - free &libeth_xdp_buff
239  * @xdp: buffer to free/return
240  *
241  * Slowpath version of libeth_xdp_return_buff() to be called on exceptions,
242  * queue clean-ups etc., without unwanted inlining.
243  */
244 void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp)
245 {
246 	__libeth_xdp_return_buff(xdp, false);
247 }
248 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow);
249 
250 /**
251  * libeth_xdp_buff_add_frag - add frag to XDP buffer
252  * @xdp: head XDP buffer
253  * @fqe: Rx buffer containing the frag
254  * @len: frag length reported by HW
255  *
256  * External helper used by libeth_xdp_process_buff(), do not call directly.
257  * Frees both head and frag buffers on error.
258  *
259  * Return: true success, false on error (no space for a new frag).
260  */
261 bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
262 			      const struct libeth_fqe *fqe,
263 			      u32 len)
264 {
265 	netmem_ref netmem = fqe->netmem;
266 
267 	if (!xdp_buff_add_frag(&xdp->base, netmem,
268 			       fqe->offset + netmem_get_pp(netmem)->p.offset,
269 			       len, fqe->truesize))
270 		goto recycle;
271 
272 	return true;
273 
274 recycle:
275 	libeth_rx_recycle_slow(netmem);
276 	libeth_xdp_return_buff_slow(xdp);
277 
278 	return false;
279 }
280 EXPORT_SYMBOL_GPL(libeth_xdp_buff_add_frag);
281 
282 /**
283  * libeth_xdp_prog_exception - handle XDP prog exceptions
284  * @bq: XDP Tx bulk
285  * @xdp: buffer to process
286  * @act: original XDP prog verdict
287  * @ret: error code if redirect failed
288  *
289  * External helper used by __libeth_xdp_run_prog(), do not call directly.
290  * Reports invalid @act, XDP exception trace event and frees the buffer.
291  *
292  * Return: libeth_xdp XDP prog verdict.
293  */
294 u32 __cold libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
295 				     struct libeth_xdp_buff *xdp,
296 				     enum xdp_action act, int ret)
297 {
298 	if (act > XDP_REDIRECT)
299 		bpf_warn_invalid_xdp_action(bq->dev, bq->prog, act);
300 
301 	libeth_trace_xdp_exception(bq->dev, bq->prog, act);
302 
303 	libeth_xdp_return_buff_slow(xdp);
304 
305 	return LIBETH_XDP_DROP;
306 }
307 EXPORT_SYMBOL_GPL(libeth_xdp_prog_exception);
308 
309 /* Tx buffer completion */
310 
311 static void libeth_xdp_put_netmem_bulk(netmem_ref netmem,
312 				       struct xdp_frame_bulk *bq)
313 {
314 	if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE))
315 		xdp_flush_frame_bulk(bq);
316 
317 	bq->q[bq->count++] = netmem;
318 }
319 
320 /**
321  * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
322  * @sinfo: shared info corresponding to the buffer
323  * @bq: XDP frame bulk to store the buffer
324  * @frags: whether the buffer has frags
325  *
326  * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx
327  * completion of ``XDP_TX`` buffers and allows to free them in same bulks
328  * with &xdp_frame buffers.
329  */
330 void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
331 				 struct xdp_frame_bulk *bq, bool frags)
332 {
333 	if (!frags)
334 		goto head;
335 
336 	for (u32 i = 0; i < sinfo->nr_frags; i++)
337 		libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]),
338 					   bq);
339 
340 head:
341 	libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq);
342 }
343 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk);
344 
345 /* Misc */
346 
347 /**
348  * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold
349  * @count: number of descriptors in the queue
350  *
351  * The threshold is the limit at which RQs start to refill (when the number of
352  * empty buffers exceeds it) and SQs get cleaned up (when the number of free
353  * descriptors goes below it). To speed up hotpath processing, threshold is
354  * always pow-2, closest to 1/4 of the queue length.
355  * Don't call it on hotpath, calculate and cache the threshold during the
356  * queue initialization.
357  *
358  * Return: the calculated threshold.
359  */
360 u32 libeth_xdp_queue_threshold(u32 count)
361 {
362 	u32 quarter, low, high;
363 
364 	if (likely(is_power_of_2(count)))
365 		return count >> 2;
366 
367 	quarter = DIV_ROUND_CLOSEST(count, 4);
368 	low = rounddown_pow_of_two(quarter);
369 	high = roundup_pow_of_two(quarter);
370 
371 	return high - quarter <= quarter - low ? high : low;
372 }
373 EXPORT_SYMBOL_GPL(libeth_xdp_queue_threshold);
374 
375 /**
376  * __libeth_xdp_set_features - set XDP features for netdev
377  * @dev: &net_device to configure
378  * @xmo: XDP metadata ops (Rx hints)
379  *
380  * Set all the features libeth_xdp supports. Only the first argument is
381  * necessary.
382  * Use the non-underscored versions in drivers instead.
383  */
384 void __libeth_xdp_set_features(struct net_device *dev,
385 			       const struct xdp_metadata_ops *xmo)
386 {
387 	xdp_set_features_flag(dev,
388 			      NETDEV_XDP_ACT_BASIC |
389 			      NETDEV_XDP_ACT_REDIRECT |
390 			      NETDEV_XDP_ACT_NDO_XMIT |
391 			      NETDEV_XDP_ACT_RX_SG |
392 			      NETDEV_XDP_ACT_NDO_XMIT_SG);
393 	dev->xdp_metadata_ops = xmo;
394 }
395 EXPORT_SYMBOL_GPL(__libeth_xdp_set_features);
396 
397 /**
398  * libeth_xdp_set_redirect - toggle the XDP redirect feature
399  * @dev: &net_device to configure
400  * @enable: whether XDP is enabled
401  *
402  * Use this when XDPSQs are not always available to dynamically enable
403  * and disable redirect feature.
404  */
405 void libeth_xdp_set_redirect(struct net_device *dev, bool enable)
406 {
407 	if (enable)
408 		xdp_features_set_redirect_target(dev, true);
409 	else
410 		xdp_features_clear_redirect_target(dev);
411 }
412 EXPORT_SYMBOL_GPL(libeth_xdp_set_redirect);
413 
414 /* Module */
415 
416 static const struct libeth_xdp_ops xdp_ops __initconst = {
417 	.bulk	= libeth_xdp_return_buff_bulk,
418 	.xsk	= libeth_xsk_buff_free_slow,
419 };
420 
421 static int __init libeth_xdp_module_init(void)
422 {
423 	libeth_attach_xdp(&xdp_ops);
424 
425 	return 0;
426 }
427 module_init(libeth_xdp_module_init);
428 
429 static void __exit libeth_xdp_module_exit(void)
430 {
431 	libeth_detach_xdp();
432 }
433 module_exit(libeth_xdp_module_exit);
434 
435 MODULE_DESCRIPTION("Common Ethernet library - XDP infra");
436 MODULE_IMPORT_NS("LIBETH");
437 MODULE_LICENSE("GPL");
438