xref: /linux/drivers/net/ethernet/intel/libeth/xdp.c (revision 576cc5c13d9ba53a1a24d9b34af2f939a87b7ce8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #define DEFAULT_SYMBOL_NAMESPACE	"LIBETH_XDP"
5 
6 #include <linux/export.h>
7 
8 #include <net/libeth/xdp.h>
9 
10 #include "priv.h"
11 
12 /* XDPSQ sharing */
13 
14 DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share);
15 EXPORT_SYMBOL_GPL(libeth_xdpsq_share);
16 
17 void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
18 			const struct net_device *dev)
19 {
20 	bool warn;
21 
22 	spin_lock_init(&lock->lock);
23 	lock->share = true;
24 
25 	warn = !static_key_enabled(&libeth_xdpsq_share);
26 	static_branch_inc(&libeth_xdpsq_share);
27 
28 	if (warn && net_ratelimit())
29 		netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n");
30 }
31 EXPORT_SYMBOL_GPL(__libeth_xdpsq_get);
32 
33 void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
34 			const struct net_device *dev)
35 {
36 	static_branch_dec(&libeth_xdpsq_share);
37 
38 	if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit())
39 		netdev_notice(dev, "XDPSQ sharing disabled\n");
40 
41 	lock->share = false;
42 }
43 EXPORT_SYMBOL_GPL(__libeth_xdpsq_put);
44 
45 void __acquires(&lock->lock)
46 __libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
47 {
48 	spin_lock(&lock->lock);
49 }
50 EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock);
51 
52 void __releases(&lock->lock)
53 __libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
54 {
55 	spin_unlock(&lock->lock);
56 }
57 EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock);
58 
59 /* XDPSQ clean-up timers */
60 
61 /**
62  * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer
63  * @timer: timer to initialize
64  * @xdpsq: queue this timer belongs to
65  * @lock: corresponding XDPSQ lock
66  * @poll: queue polling/completion function
67  *
68  * XDPSQ clean-up timers must be set up before using at the queue configuration
69  * time. Set the required pointers and the cleaning callback.
70  */
71 void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
72 			     struct libeth_xdpsq_lock *lock,
73 			     void (*poll)(struct work_struct *work))
74 {
75 	timer->xdpsq = xdpsq;
76 	timer->lock = lock;
77 
78 	INIT_DELAYED_WORK(&timer->dwork, poll);
79 }
80 EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer);
81 
82 /* ``XDP_TX`` bulking */
83 
84 static void __cold
85 libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm)
86 {
87 	if (frm->len_fl & LIBETH_XDP_TX_MULTI)
88 		libeth_xdp_return_frags(frm->data + frm->soff, true);
89 
90 	libeth_xdp_return_va(frm->data, true);
91 }
92 
93 static void __cold
94 libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count)
95 {
96 	for (u32 i = 0; i < count; i++) {
97 		const struct libeth_xdp_tx_frame *frm = &bq[i];
98 
99 		if (!(frm->len_fl & LIBETH_XDP_TX_FIRST))
100 			continue;
101 
102 		libeth_xdp_tx_return_one(frm);
103 	}
104 }
105 
106 static void __cold libeth_trace_xdp_exception(const struct net_device *dev,
107 					      const struct bpf_prog *prog,
108 					      u32 act)
109 {
110 	trace_xdp_exception(dev, prog, act);
111 }
112 
113 /**
114  * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames
115  * @bq: XDP Tx frame bulk
116  * @sent: number of frames sent successfully (from this bulk)
117  * @flags: internal libeth_xdp flags (.ndo_xdp_xmit etc.)
118  *
119  * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly.
120  * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust
121  * the Tx bulk to try again later.
122  */
123 void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
124 				    u32 flags)
125 {
126 	const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent];
127 	u32 left = bq->count - sent;
128 
129 	if (!(flags & LIBETH_XDP_TX_NDO))
130 		libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX);
131 
132 	if (!(flags & LIBETH_XDP_TX_DROP)) {
133 		memmove(bq->bulk, pos, left * sizeof(*bq->bulk));
134 		bq->count = left;
135 
136 		return;
137 	}
138 
139 	if (!(flags & LIBETH_XDP_TX_NDO))
140 		libeth_xdp_tx_return_bulk(pos, left);
141 	else
142 		libeth_xdp_xmit_return_bulk(pos, left, bq->dev);
143 
144 	bq->count = 0;
145 }
146 EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception);
147 
148 /* .ndo_xdp_xmit() implementation */
149 
150 u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
151 				       u32 count, const struct net_device *dev)
152 {
153 	u32 n = 0;
154 
155 	for (u32 i = 0; i < count; i++) {
156 		const struct libeth_xdp_tx_frame *frm = &bq[i];
157 		dma_addr_t dma;
158 
159 		if (frm->flags & LIBETH_XDP_TX_FIRST)
160 			dma = *libeth_xdp_xmit_frame_dma(frm->xdpf);
161 		else
162 			dma = dma_unmap_addr(frm, dma);
163 
164 		dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len),
165 			       DMA_TO_DEVICE);
166 
167 		/* Actual xdp_frames are freed by the core */
168 		n += !!(frm->flags & LIBETH_XDP_TX_FIRST);
169 	}
170 
171 	return n;
172 }
173 EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk);
174 
175 /* Rx polling path */
176 
177 /**
178  * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash
179  * @dst: target &libeth_xdp_buff to initialize
180  * @src: source stash
181  *
182  * External helper used by libeth_xdp_init_buff(), do not call directly.
183  * Recreate an onstack &libeth_xdp_buff using the stash saved earlier.
184  * The only field untouched (rxq) is initialized later in the
185  * abovementioned function.
186  */
187 void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
188 			   const struct libeth_xdp_buff_stash *src)
189 {
190 	dst->data = src->data;
191 	dst->base.data_end = src->data + src->len;
192 	dst->base.data_meta = src->data;
193 	dst->base.data_hard_start = src->data - src->headroom;
194 
195 	dst->base.frame_sz = src->frame_sz;
196 	dst->base.flags = src->flags;
197 }
198 EXPORT_SYMBOL_GPL(libeth_xdp_load_stash);
199 
200 /**
201  * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash
202  * @dst: target &libeth_xdp_buff_stash to initialize
203  * @src: source XDP buffer
204  *
205  * External helper used by libeth_xdp_save_buff(), do not call directly.
206  * Use the fields from the passed XDP buffer to initialize the stash on the
207  * queue, so that a partially received frame can be finished later during
208  * the next NAPI poll.
209  */
210 void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
211 			   const struct libeth_xdp_buff *src)
212 {
213 	dst->data = src->data;
214 	dst->headroom = src->data - src->base.data_hard_start;
215 	dst->len = src->base.data_end - src->data;
216 
217 	dst->frame_sz = src->base.frame_sz;
218 	dst->flags = src->base.flags;
219 
220 	WARN_ON_ONCE(dst->flags != src->base.flags);
221 }
222 EXPORT_SYMBOL_GPL(libeth_xdp_save_stash);
223 
224 void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
225 {
226 	LIBETH_XDP_ONSTACK_BUFF(xdp);
227 
228 	libeth_xdp_load_stash(xdp, stash);
229 	libeth_xdp_return_buff_slow(xdp);
230 
231 	stash->data = NULL;
232 }
233 EXPORT_SYMBOL_GPL(__libeth_xdp_return_stash);
234 
235 /**
236  * libeth_xdp_return_buff_slow - free &libeth_xdp_buff
237  * @xdp: buffer to free/return
238  *
239  * Slowpath version of libeth_xdp_return_buff() to be called on exceptions,
240  * queue clean-ups etc., without unwanted inlining.
241  */
242 void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp)
243 {
244 	__libeth_xdp_return_buff(xdp, false);
245 }
246 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow);
247 
248 /**
249  * libeth_xdp_buff_add_frag - add frag to XDP buffer
250  * @xdp: head XDP buffer
251  * @fqe: Rx buffer containing the frag
252  * @len: frag length reported by HW
253  *
254  * External helper used by libeth_xdp_process_buff(), do not call directly.
255  * Frees both head and frag buffers on error.
256  *
257  * Return: true success, false on error (no space for a new frag).
258  */
259 bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
260 			      const struct libeth_fqe *fqe,
261 			      u32 len)
262 {
263 	netmem_ref netmem = fqe->netmem;
264 
265 	if (!xdp_buff_add_frag(&xdp->base, netmem,
266 			       fqe->offset + netmem_get_pp(netmem)->p.offset,
267 			       len, fqe->truesize))
268 		goto recycle;
269 
270 	return true;
271 
272 recycle:
273 	libeth_rx_recycle_slow(netmem);
274 	libeth_xdp_return_buff_slow(xdp);
275 
276 	return false;
277 }
278 EXPORT_SYMBOL_GPL(libeth_xdp_buff_add_frag);
279 
280 /**
281  * libeth_xdp_prog_exception - handle XDP prog exceptions
282  * @bq: XDP Tx bulk
283  * @xdp: buffer to process
284  * @act: original XDP prog verdict
285  * @ret: error code if redirect failed
286  *
287  * External helper used by __libeth_xdp_run_prog(), do not call directly.
288  * Reports invalid @act, XDP exception trace event and frees the buffer.
289  *
290  * Return: libeth_xdp XDP prog verdict.
291  */
292 u32 __cold libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
293 				     struct libeth_xdp_buff *xdp,
294 				     enum xdp_action act, int ret)
295 {
296 	if (act > XDP_REDIRECT)
297 		bpf_warn_invalid_xdp_action(bq->dev, bq->prog, act);
298 
299 	libeth_trace_xdp_exception(bq->dev, bq->prog, act);
300 
301 	libeth_xdp_return_buff_slow(xdp);
302 
303 	return LIBETH_XDP_DROP;
304 }
305 EXPORT_SYMBOL_GPL(libeth_xdp_prog_exception);
306 
307 /* Tx buffer completion */
308 
309 static void libeth_xdp_put_netmem_bulk(netmem_ref netmem,
310 				       struct xdp_frame_bulk *bq)
311 {
312 	if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE))
313 		xdp_flush_frame_bulk(bq);
314 
315 	bq->q[bq->count++] = netmem;
316 }
317 
318 /**
319  * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
320  * @sinfo: shared info corresponding to the buffer
321  * @bq: XDP frame bulk to store the buffer
322  * @frags: whether the buffer has frags
323  *
324  * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx
325  * completion of ``XDP_TX`` buffers and allows to free them in same bulks
326  * with &xdp_frame buffers.
327  */
328 void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
329 				 struct xdp_frame_bulk *bq, bool frags)
330 {
331 	if (!frags)
332 		goto head;
333 
334 	for (u32 i = 0; i < sinfo->nr_frags; i++)
335 		libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]),
336 					   bq);
337 
338 head:
339 	libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq);
340 }
341 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk);
342 
343 /* Misc */
344 
345 /**
346  * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold
347  * @count: number of descriptors in the queue
348  *
349  * The threshold is the limit at which RQs start to refill (when the number of
350  * empty buffers exceeds it) and SQs get cleaned up (when the number of free
351  * descriptors goes below it). To speed up hotpath processing, threshold is
352  * always pow-2, closest to 1/4 of the queue length.
353  * Don't call it on hotpath, calculate and cache the threshold during the
354  * queue initialization.
355  *
356  * Return: the calculated threshold.
357  */
358 u32 libeth_xdp_queue_threshold(u32 count)
359 {
360 	u32 quarter, low, high;
361 
362 	if (likely(is_power_of_2(count)))
363 		return count >> 2;
364 
365 	quarter = DIV_ROUND_CLOSEST(count, 4);
366 	low = rounddown_pow_of_two(quarter);
367 	high = roundup_pow_of_two(quarter);
368 
369 	return high - quarter <= quarter - low ? high : low;
370 }
371 EXPORT_SYMBOL_GPL(libeth_xdp_queue_threshold);
372 
373 /**
374  * __libeth_xdp_set_features - set XDP features for netdev
375  * @dev: &net_device to configure
376  * @xmo: XDP metadata ops (Rx hints)
377  *
378  * Set all the features libeth_xdp supports. Only the first argument is
379  * necessary.
380  * Use the non-underscored versions in drivers instead.
381  */
382 void __libeth_xdp_set_features(struct net_device *dev,
383 			       const struct xdp_metadata_ops *xmo)
384 {
385 	xdp_set_features_flag(dev,
386 			      NETDEV_XDP_ACT_BASIC |
387 			      NETDEV_XDP_ACT_REDIRECT |
388 			      NETDEV_XDP_ACT_NDO_XMIT |
389 			      NETDEV_XDP_ACT_RX_SG |
390 			      NETDEV_XDP_ACT_NDO_XMIT_SG);
391 	dev->xdp_metadata_ops = xmo;
392 }
393 EXPORT_SYMBOL_GPL(__libeth_xdp_set_features);
394 
395 /**
396  * libeth_xdp_set_redirect - toggle the XDP redirect feature
397  * @dev: &net_device to configure
398  * @enable: whether XDP is enabled
399  *
400  * Use this when XDPSQs are not always available to dynamically enable
401  * and disable redirect feature.
402  */
403 void libeth_xdp_set_redirect(struct net_device *dev, bool enable)
404 {
405 	if (enable)
406 		xdp_features_set_redirect_target(dev, true);
407 	else
408 		xdp_features_clear_redirect_target(dev);
409 }
410 EXPORT_SYMBOL_GPL(libeth_xdp_set_redirect);
411 
412 /* Module */
413 
414 static const struct libeth_xdp_ops xdp_ops __initconst = {
415 	.bulk	= libeth_xdp_return_buff_bulk,
416 };
417 
418 static int __init libeth_xdp_module_init(void)
419 {
420 	libeth_attach_xdp(&xdp_ops);
421 
422 	return 0;
423 }
424 module_init(libeth_xdp_module_init);
425 
426 static void __exit libeth_xdp_module_exit(void)
427 {
428 	libeth_detach_xdp();
429 }
430 module_exit(libeth_xdp_module_exit);
431 
432 MODULE_DESCRIPTION("Common Ethernet library - XDP infra");
433 MODULE_IMPORT_NS("LIBETH");
434 MODULE_LICENSE("GPL");
435