xref: /linux/drivers/net/ethernet/intel/libeth/xdp.c (revision 819bbaefeded93df36d71d58d9963d706e6e99e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #define DEFAULT_SYMBOL_NAMESPACE	"LIBETH_XDP"
5 
6 #include <linux/export.h>
7 
8 #include <net/libeth/xdp.h>
9 
10 #include "priv.h"
11 
12 /* XDPSQ sharing */
13 
14 DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share);
15 EXPORT_SYMBOL_GPL(libeth_xdpsq_share);
16 
17 void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
18 			const struct net_device *dev)
19 {
20 	bool warn;
21 
22 	spin_lock_init(&lock->lock);
23 	lock->share = true;
24 
25 	warn = !static_key_enabled(&libeth_xdpsq_share);
26 	static_branch_inc(&libeth_xdpsq_share);
27 
28 	if (warn && net_ratelimit())
29 		netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n");
30 }
31 EXPORT_SYMBOL_GPL(__libeth_xdpsq_get);
32 
33 void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
34 			const struct net_device *dev)
35 {
36 	static_branch_dec(&libeth_xdpsq_share);
37 
38 	if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit())
39 		netdev_notice(dev, "XDPSQ sharing disabled\n");
40 
41 	lock->share = false;
42 }
43 EXPORT_SYMBOL_GPL(__libeth_xdpsq_put);
44 
45 void __acquires(&lock->lock)
46 __libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
47 {
48 	spin_lock(&lock->lock);
49 }
50 EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock);
51 
52 void __releases(&lock->lock)
53 __libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
54 {
55 	spin_unlock(&lock->lock);
56 }
57 EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock);
58 
59 /* XDPSQ clean-up timers */
60 
61 /**
62  * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer
63  * @timer: timer to initialize
64  * @xdpsq: queue this timer belongs to
65  * @lock: corresponding XDPSQ lock
66  * @poll: queue polling/completion function
67  *
68  * XDPSQ clean-up timers must be set up before using at the queue configuration
69  * time. Set the required pointers and the cleaning callback.
70  */
71 void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
72 			     struct libeth_xdpsq_lock *lock,
73 			     void (*poll)(struct work_struct *work))
74 {
75 	timer->xdpsq = xdpsq;
76 	timer->lock = lock;
77 
78 	INIT_DELAYED_WORK(&timer->dwork, poll);
79 }
80 EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer);
81 
82 /* ``XDP_TX`` bulking */
83 
84 static void __cold
85 libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm)
86 {
87 	if (frm->len_fl & LIBETH_XDP_TX_MULTI)
88 		libeth_xdp_return_frags(frm->data + frm->soff, true);
89 
90 	libeth_xdp_return_va(frm->data, true);
91 }
92 
93 static void __cold
94 libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count)
95 {
96 	for (u32 i = 0; i < count; i++) {
97 		const struct libeth_xdp_tx_frame *frm = &bq[i];
98 
99 		if (!(frm->len_fl & LIBETH_XDP_TX_FIRST))
100 			continue;
101 
102 		libeth_xdp_tx_return_one(frm);
103 	}
104 }
105 
106 static void __cold libeth_trace_xdp_exception(const struct net_device *dev,
107 					      const struct bpf_prog *prog,
108 					      u32 act)
109 {
110 	trace_xdp_exception(dev, prog, act);
111 }
112 
113 /**
114  * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames
115  * @bq: XDP Tx frame bulk
116  * @sent: number of frames sent successfully (from this bulk)
117  * @flags: internal libeth_xdp flags (.ndo_xdp_xmit etc.)
118  *
119  * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly.
120  * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust
121  * the Tx bulk to try again later.
122  */
123 void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
124 				    u32 flags)
125 {
126 	const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent];
127 	u32 left = bq->count - sent;
128 
129 	if (!(flags & LIBETH_XDP_TX_NDO))
130 		libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX);
131 
132 	if (!(flags & LIBETH_XDP_TX_DROP)) {
133 		memmove(bq->bulk, pos, left * sizeof(*bq->bulk));
134 		bq->count = left;
135 
136 		return;
137 	}
138 
139 	if (!(flags & LIBETH_XDP_TX_NDO))
140 		libeth_xdp_tx_return_bulk(pos, left);
141 	else
142 		libeth_xdp_xmit_return_bulk(pos, left, bq->dev);
143 
144 	bq->count = 0;
145 }
146 EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception);
147 
148 /* .ndo_xdp_xmit() implementation */
149 
150 u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
151 				       u32 count, const struct net_device *dev)
152 {
153 	u32 n = 0;
154 
155 	for (u32 i = 0; i < count; i++) {
156 		const struct libeth_xdp_tx_frame *frm = &bq[i];
157 		dma_addr_t dma;
158 
159 		if (frm->flags & LIBETH_XDP_TX_FIRST)
160 			dma = *libeth_xdp_xmit_frame_dma(frm->xdpf);
161 		else
162 			dma = dma_unmap_addr(frm, dma);
163 
164 		dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len),
165 			       DMA_TO_DEVICE);
166 
167 		/* Actual xdp_frames are freed by the core */
168 		n += !!(frm->flags & LIBETH_XDP_TX_FIRST);
169 	}
170 
171 	return n;
172 }
173 EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk);
174 
175 /* Rx polling path */
176 
177 /**
178  * libeth_xdp_return_buff_slow - free &libeth_xdp_buff
179  * @xdp: buffer to free/return
180  *
181  * Slowpath version of libeth_xdp_return_buff() to be called on exceptions,
182  * queue clean-ups etc., without unwanted inlining.
183  */
184 void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp)
185 {
186 	__libeth_xdp_return_buff(xdp, false);
187 }
188 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow);
189 
190 /* Tx buffer completion */
191 
192 static void libeth_xdp_put_netmem_bulk(netmem_ref netmem,
193 				       struct xdp_frame_bulk *bq)
194 {
195 	if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE))
196 		xdp_flush_frame_bulk(bq);
197 
198 	bq->q[bq->count++] = netmem;
199 }
200 
201 /**
202  * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
203  * @sinfo: shared info corresponding to the buffer
204  * @bq: XDP frame bulk to store the buffer
205  * @frags: whether the buffer has frags
206  *
207  * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx
208  * completion of ``XDP_TX`` buffers and allows to free them in same bulks
209  * with &xdp_frame buffers.
210  */
211 void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
212 				 struct xdp_frame_bulk *bq, bool frags)
213 {
214 	if (!frags)
215 		goto head;
216 
217 	for (u32 i = 0; i < sinfo->nr_frags; i++)
218 		libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]),
219 					   bq);
220 
221 head:
222 	libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq);
223 }
224 EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk);
225 
226 /* Module */
227 
228 static const struct libeth_xdp_ops xdp_ops __initconst = {
229 	.bulk	= libeth_xdp_return_buff_bulk,
230 };
231 
232 static int __init libeth_xdp_module_init(void)
233 {
234 	libeth_attach_xdp(&xdp_ops);
235 
236 	return 0;
237 }
238 module_init(libeth_xdp_module_init);
239 
240 static void __exit libeth_xdp_module_exit(void)
241 {
242 	libeth_detach_xdp();
243 }
244 module_exit(libeth_xdp_module_exit);
245 
246 MODULE_DESCRIPTION("Common Ethernet library - XDP infra");
247 MODULE_IMPORT_NS("LIBETH");
248 MODULE_LICENSE("GPL");
249