xref: /linux/drivers/net/ethernet/intel/libeth/xsk.c (revision 57885276cc16a2e2b76282c808a4e84cbecb3aae)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #define DEFAULT_SYMBOL_NAMESPACE	"LIBETH_XDP"
5 
6 #include <linux/export.h>
7 
8 #include <net/libeth/xsk.h>
9 
10 #include "priv.h"
11 
12 /* ``XDP_TX`` bulking */
13 
14 void __cold libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
15 				      u32 count)
16 {
17 	for (u32 i = 0; i < count; i++)
18 		libeth_xsk_buff_free_slow(bq[i].xsk);
19 }
20 
21 /* XSk TMO */
22 
23 const struct xsk_tx_metadata_ops libeth_xsktmo_slow = {
24 	.tmo_request_checksum		= libeth_xsktmo_req_csum,
25 };
26 
27 /* Rx polling path */
28 
29 /**
30  * libeth_xsk_buff_free_slow - free an XSk Rx buffer
31  * @xdp: buffer to free
32  *
33  * Slowpath version of xsk_buff_free() to be used on exceptions, cleanups etc.
34  * to avoid unwanted inlining.
35  */
36 void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp)
37 {
38 	xsk_buff_free(&xdp->base);
39 }
40 EXPORT_SYMBOL_GPL(libeth_xsk_buff_free_slow);
41 
42 /**
43  * libeth_xsk_buff_add_frag - add frag to XSk Rx buffer
44  * @head: head buffer
45  * @xdp: frag buffer
46  *
47  * External helper used by libeth_xsk_process_buff(), do not call directly.
48  * Frees both main and frag buffers on error.
49  *
50  * Return: main buffer with attached frag on success, %NULL on error (no space
51  * for a new frag).
52  */
53 struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
54 						 struct libeth_xdp_buff *xdp)
55 {
56 	if (!xsk_buff_add_frag(&head->base, &xdp->base))
57 		goto free;
58 
59 	return head;
60 
61 free:
62 	libeth_xsk_buff_free_slow(xdp);
63 	libeth_xsk_buff_free_slow(head);
64 
65 	return NULL;
66 }
67 EXPORT_SYMBOL_GPL(libeth_xsk_buff_add_frag);
68 
69 /**
70  * libeth_xsk_buff_stats_frags - update onstack RQ stats with XSk frags info
71  * @rs: onstack stats to update
72  * @xdp: buffer to account
73  *
74  * External helper used by __libeth_xsk_run_pass(), do not call directly.
75  * Adds buffer's frags count and total len to the onstack stats.
76  */
77 void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
78 				 const struct libeth_xdp_buff *xdp)
79 {
80 	libeth_xdp_buff_stats_frags(rs, xdp);
81 }
82 EXPORT_SYMBOL_GPL(libeth_xsk_buff_stats_frags);
83 
84 /**
85  * __libeth_xsk_run_prog_slow - process the non-``XDP_REDIRECT`` verdicts
86  * @xdp: buffer to process
87  * @bq: Tx bulk for queueing on ``XDP_TX``
88  * @act: verdict to process
89  * @ret: error code if ``XDP_REDIRECT`` failed
90  *
91  * External helper used by __libeth_xsk_run_prog(), do not call directly.
92  * ``XDP_REDIRECT`` is the most common and hottest verdict on XSk, thus
93  * it is processed inline. The rest goes here for out-of-line processing,
94  * together with redirect errors.
95  *
96  * Return: libeth_xdp XDP prog verdict.
97  */
98 u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
99 			       const struct libeth_xdp_tx_bulk *bq,
100 			       enum xdp_action act, int ret)
101 {
102 	switch (act) {
103 	case XDP_DROP:
104 		xsk_buff_free(&xdp->base);
105 
106 		return LIBETH_XDP_DROP;
107 	case XDP_TX:
108 		return LIBETH_XDP_TX;
109 	case XDP_PASS:
110 		return LIBETH_XDP_PASS;
111 	default:
112 		break;
113 	}
114 
115 	return libeth_xdp_prog_exception(bq, xdp, act, ret);
116 }
117 EXPORT_SYMBOL_GPL(__libeth_xsk_run_prog_slow);
118 
119 /**
120  * libeth_xsk_prog_exception - handle XDP prog exceptions on XSk
121  * @xdp: buffer to process
122  * @act: verdict returned by the prog
123  * @ret: error code if ``XDP_REDIRECT`` failed
124  *
125  * Internal. Frees the buffer and, if the queue uses XSk wakeups, stop the
126  * current NAPI poll when there are no free buffers left.
127  *
128  * Return: libeth_xdp's XDP prog verdict.
129  */
130 u32 __cold libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp,
131 				     enum xdp_action act, int ret)
132 {
133 	const struct xdp_buff_xsk *xsk;
134 	u32 __ret = LIBETH_XDP_DROP;
135 
136 	if (act != XDP_REDIRECT)
137 		goto drop;
138 
139 	xsk = container_of(&xdp->base, typeof(*xsk), xdp);
140 	if (xsk_uses_need_wakeup(xsk->pool) && ret == -ENOBUFS)
141 		__ret = LIBETH_XDP_ABORTED;
142 
143 drop:
144 	libeth_xsk_buff_free_slow(xdp);
145 
146 	return __ret;
147 }
148 
149 /* Refill */
150 
151 /**
152  * libeth_xskfq_create - create an XSkFQ
153  * @fq: fill queue to initialize
154  *
155  * Allocates the FQEs and initializes the fields used by libeth_xdp: number
156  * of buffers to refill, refill threshold and buffer len.
157  *
158  * Return: %0 on success, -errno otherwise.
159  */
160 int libeth_xskfq_create(struct libeth_xskfq *fq)
161 {
162 	fq->fqes = kvcalloc_node(fq->count, sizeof(*fq->fqes), GFP_KERNEL,
163 				 fq->nid);
164 	if (!fq->fqes)
165 		return -ENOMEM;
166 
167 	fq->pending = fq->count;
168 	fq->thresh = libeth_xdp_queue_threshold(fq->count);
169 	fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool);
170 	fq->truesize = xsk_pool_get_rx_frag_step(fq->pool);
171 
172 	return 0;
173 }
174 EXPORT_SYMBOL_GPL(libeth_xskfq_create);
175 
176 /**
177  * libeth_xskfq_destroy - destroy an XSkFQ
178  * @fq: fill queue to destroy
179  *
180  * Zeroes the used fields and frees the FQEs array.
181  */
182 void libeth_xskfq_destroy(struct libeth_xskfq *fq)
183 {
184 	fq->buf_len = 0;
185 	fq->thresh = 0;
186 	fq->pending = 0;
187 
188 	kvfree(fq->fqes);
189 }
190 EXPORT_SYMBOL_GPL(libeth_xskfq_destroy);
191 
192 /* .ndo_xsk_wakeup */
193 
194 static void libeth_xsk_napi_sched(void *info)
195 {
196 	__napi_schedule_irqoff(info);
197 }
198 
199 /**
200  * libeth_xsk_init_wakeup - initialize libeth XSk wakeup structure
201  * @csd: struct to initialize
202  * @napi: NAPI corresponding to this queue
203  *
204  * libeth_xdp uses inter-processor interrupts to perform XSk wakeups. In order
205  * to do that, the corresponding CSDs must be initialized when creating the
206  * queues.
207  */
208 void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi)
209 {
210 	INIT_CSD(csd, libeth_xsk_napi_sched, napi);
211 }
212 EXPORT_SYMBOL_GPL(libeth_xsk_init_wakeup);
213 
214 /**
215  * libeth_xsk_wakeup - perform an XSk wakeup
216  * @csd: CSD corresponding to the queue
217  * @qid: the stack queue index
218  *
219  * Try to mark the NAPI as missed first, so that it could be rescheduled.
220  * If it's not, schedule it on the corresponding CPU using IPIs (or directly
221  * if already running on it).
222  */
223 void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid)
224 {
225 	struct napi_struct *napi = csd->info;
226 
227 	if (napi_if_scheduled_mark_missed(napi) ||
228 	    unlikely(!napi_schedule_prep(napi)))
229 		return;
230 
231 	if (unlikely(qid >= nr_cpu_ids))
232 		qid %= nr_cpu_ids;
233 
234 	if (qid != raw_smp_processor_id() && cpu_online(qid))
235 		smp_call_function_single_async(qid, csd);
236 	else
237 		__napi_schedule(napi);
238 }
239 EXPORT_SYMBOL_GPL(libeth_xsk_wakeup);
240 
241 /* Pool setup */
242 
243 #define LIBETH_XSK_DMA_ATTR					\
244 	(DMA_ATTR_WEAK_ORDERING | DMA_ATTR_SKIP_CPU_SYNC)
245 
246 /**
247  * libeth_xsk_setup_pool - setup or destroy an XSk pool for a queue
248  * @dev: target &net_device
249  * @qid: stack queue index to configure
250  * @enable: whether to enable or disable the pool
251  *
252  * Check that @qid is valid and then map or unmap the pool.
253  *
254  * Return: %0 on success, -errno otherwise.
255  */
256 int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable)
257 {
258 	struct xsk_buff_pool *pool;
259 
260 	pool = xsk_get_pool_from_qid(dev, qid);
261 	if (!pool)
262 		return -EINVAL;
263 
264 	if (enable)
265 		return xsk_pool_dma_map(pool, dev->dev.parent,
266 					LIBETH_XSK_DMA_ATTR);
267 	else
268 		xsk_pool_dma_unmap(pool, LIBETH_XSK_DMA_ATTR);
269 
270 	return 0;
271 }
272 EXPORT_SYMBOL_GPL(libeth_xsk_setup_pool);
273