xref: /linux/drivers/net/ethernet/intel/libeth/xsk.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #define DEFAULT_SYMBOL_NAMESPACE	"LIBETH_XDP"
5 
6 #include <linux/export.h>
7 
8 #include <net/libeth/xsk.h>
9 
10 #include "priv.h"
11 
12 /* ``XDP_TX`` bulking */
13 
libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame * bq,u32 count)14 void __cold libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
15 				      u32 count)
16 {
17 	for (u32 i = 0; i < count; i++)
18 		libeth_xsk_buff_free_slow(bq[i].xsk);
19 }
20 
21 /* XSk TMO */
22 
23 const struct xsk_tx_metadata_ops libeth_xsktmo_slow = {
24 	.tmo_request_checksum		= libeth_xsktmo_req_csum,
25 };
26 
27 /* Rx polling path */
28 
29 /**
30  * libeth_xsk_buff_free_slow - free an XSk Rx buffer
31  * @xdp: buffer to free
32  *
33  * Slowpath version of xsk_buff_free() to be used on exceptions, cleanups etc.
34  * to avoid unwanted inlining.
35  */
libeth_xsk_buff_free_slow(struct libeth_xdp_buff * xdp)36 void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp)
37 {
38 	xsk_buff_free(&xdp->base);
39 }
40 EXPORT_SYMBOL_GPL(libeth_xsk_buff_free_slow);
41 
42 /**
43  * libeth_xsk_buff_add_frag - add frag to XSk Rx buffer
44  * @head: head buffer
45  * @xdp: frag buffer
46  *
47  * External helper used by libeth_xsk_process_buff(), do not call directly.
48  * Frees both main and frag buffers on error.
49  *
50  * Return: main buffer with attached frag on success, %NULL on error (no space
51  * for a new frag).
52  */
libeth_xsk_buff_add_frag(struct libeth_xdp_buff * head,struct libeth_xdp_buff * xdp)53 struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
54 						 struct libeth_xdp_buff *xdp)
55 {
56 	if (!xsk_buff_add_frag(&head->base, &xdp->base))
57 		goto free;
58 
59 	return head;
60 
61 free:
62 	libeth_xsk_buff_free_slow(xdp);
63 	libeth_xsk_buff_free_slow(head);
64 
65 	return NULL;
66 }
67 EXPORT_SYMBOL_GPL(libeth_xsk_buff_add_frag);
68 
69 /**
70  * libeth_xsk_buff_stats_frags - update onstack RQ stats with XSk frags info
71  * @rs: onstack stats to update
72  * @xdp: buffer to account
73  *
74  * External helper used by __libeth_xsk_run_pass(), do not call directly.
75  * Adds buffer's frags count and total len to the onstack stats.
76  */
libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats * rs,const struct libeth_xdp_buff * xdp)77 void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
78 				 const struct libeth_xdp_buff *xdp)
79 {
80 	libeth_xdp_buff_stats_frags(rs, xdp);
81 }
82 EXPORT_SYMBOL_GPL(libeth_xsk_buff_stats_frags);
83 
84 /**
85  * __libeth_xsk_run_prog_slow - process the non-``XDP_REDIRECT`` verdicts
86  * @xdp: buffer to process
87  * @bq: Tx bulk for queueing on ``XDP_TX``
88  * @act: verdict to process
89  * @ret: error code if ``XDP_REDIRECT`` failed
90  *
91  * External helper used by __libeth_xsk_run_prog(), do not call directly.
92  * ``XDP_REDIRECT`` is the most common and hottest verdict on XSk, thus
93  * it is processed inline. The rest goes here for out-of-line processing,
94  * together with redirect errors.
95  *
96  * Return: libeth_xdp XDP prog verdict.
97  */
__libeth_xsk_run_prog_slow(struct libeth_xdp_buff * xdp,const struct libeth_xdp_tx_bulk * bq,enum xdp_action act,int ret)98 u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
99 			       const struct libeth_xdp_tx_bulk *bq,
100 			       enum xdp_action act, int ret)
101 {
102 	switch (act) {
103 	case XDP_DROP:
104 		xsk_buff_free(&xdp->base);
105 
106 		return LIBETH_XDP_DROP;
107 	case XDP_TX:
108 		return LIBETH_XDP_TX;
109 	case XDP_PASS:
110 		return LIBETH_XDP_PASS;
111 	default:
112 		break;
113 	}
114 
115 	return libeth_xdp_prog_exception(bq, xdp, act, ret);
116 }
117 EXPORT_SYMBOL_GPL(__libeth_xsk_run_prog_slow);
118 
119 /**
120  * libeth_xsk_prog_exception - handle XDP prog exceptions on XSk
121  * @xdp: buffer to process
122  * @act: verdict returned by the prog
123  * @ret: error code if ``XDP_REDIRECT`` failed
124  *
125  * Internal. Frees the buffer and, if the queue uses XSk wakeups, stop the
126  * current NAPI poll when there are no free buffers left.
127  *
128  * Return: libeth_xdp's XDP prog verdict.
129  */
libeth_xsk_prog_exception(struct libeth_xdp_buff * xdp,enum xdp_action act,int ret)130 u32 __cold libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp,
131 				     enum xdp_action act, int ret)
132 {
133 	const struct xdp_buff_xsk *xsk;
134 	u32 __ret = LIBETH_XDP_DROP;
135 
136 	if (act != XDP_REDIRECT)
137 		goto drop;
138 
139 	xsk = container_of(&xdp->base, typeof(*xsk), xdp);
140 	if (xsk_uses_need_wakeup(xsk->pool) && ret == -ENOBUFS)
141 		__ret = LIBETH_XDP_ABORTED;
142 
143 drop:
144 	libeth_xsk_buff_free_slow(xdp);
145 
146 	return __ret;
147 }
148 
149 /* Refill */
150 
151 /**
152  * libeth_xskfq_create - create an XSkFQ
153  * @fq: fill queue to initialize
154  *
155  * Allocates the FQEs and initializes the fields used by libeth_xdp: number
156  * of buffers to refill, refill threshold and buffer len.
157  *
158  * Return: %0 on success, -errno otherwise.
159  */
libeth_xskfq_create(struct libeth_xskfq * fq)160 int libeth_xskfq_create(struct libeth_xskfq *fq)
161 {
162 	fq->fqes = kvcalloc_node(fq->count, sizeof(*fq->fqes), GFP_KERNEL,
163 				 fq->nid);
164 	if (!fq->fqes)
165 		return -ENOMEM;
166 
167 	fq->pending = fq->count;
168 	fq->thresh = libeth_xdp_queue_threshold(fq->count);
169 	fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool);
170 
171 	return 0;
172 }
173 EXPORT_SYMBOL_GPL(libeth_xskfq_create);
174 
175 /**
176  * libeth_xskfq_destroy - destroy an XSkFQ
177  * @fq: fill queue to destroy
178  *
179  * Zeroes the used fields and frees the FQEs array.
180  */
libeth_xskfq_destroy(struct libeth_xskfq * fq)181 void libeth_xskfq_destroy(struct libeth_xskfq *fq)
182 {
183 	fq->buf_len = 0;
184 	fq->thresh = 0;
185 	fq->pending = 0;
186 
187 	kvfree(fq->fqes);
188 }
189 EXPORT_SYMBOL_GPL(libeth_xskfq_destroy);
190 
191 /* .ndo_xsk_wakeup */
192 
libeth_xsk_napi_sched(void * info)193 static void libeth_xsk_napi_sched(void *info)
194 {
195 	__napi_schedule_irqoff(info);
196 }
197 
198 /**
199  * libeth_xsk_init_wakeup - initialize libeth XSk wakeup structure
200  * @csd: struct to initialize
201  * @napi: NAPI corresponding to this queue
202  *
203  * libeth_xdp uses inter-processor interrupts to perform XSk wakeups. In order
204  * to do that, the corresponding CSDs must be initialized when creating the
205  * queues.
206  */
libeth_xsk_init_wakeup(call_single_data_t * csd,struct napi_struct * napi)207 void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi)
208 {
209 	INIT_CSD(csd, libeth_xsk_napi_sched, napi);
210 }
211 EXPORT_SYMBOL_GPL(libeth_xsk_init_wakeup);
212 
213 /**
214  * libeth_xsk_wakeup - perform an XSk wakeup
215  * @csd: CSD corresponding to the queue
216  * @qid: the stack queue index
217  *
218  * Try to mark the NAPI as missed first, so that it could be rescheduled.
219  * If it's not, schedule it on the corresponding CPU using IPIs (or directly
220  * if already running on it).
221  */
libeth_xsk_wakeup(call_single_data_t * csd,u32 qid)222 void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid)
223 {
224 	struct napi_struct *napi = csd->info;
225 
226 	if (napi_if_scheduled_mark_missed(napi) ||
227 	    unlikely(!napi_schedule_prep(napi)))
228 		return;
229 
230 	if (unlikely(qid >= nr_cpu_ids))
231 		qid %= nr_cpu_ids;
232 
233 	if (qid != raw_smp_processor_id() && cpu_online(qid))
234 		smp_call_function_single_async(qid, csd);
235 	else
236 		__napi_schedule(napi);
237 }
238 EXPORT_SYMBOL_GPL(libeth_xsk_wakeup);
239 
240 /* Pool setup */
241 
242 #define LIBETH_XSK_DMA_ATTR					\
243 	(DMA_ATTR_WEAK_ORDERING | DMA_ATTR_SKIP_CPU_SYNC)
244 
245 /**
246  * libeth_xsk_setup_pool - setup or destroy an XSk pool for a queue
247  * @dev: target &net_device
248  * @qid: stack queue index to configure
249  * @enable: whether to enable or disable the pool
250  *
251  * Check that @qid is valid and then map or unmap the pool.
252  *
253  * Return: %0 on success, -errno otherwise.
254  */
libeth_xsk_setup_pool(struct net_device * dev,u32 qid,bool enable)255 int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable)
256 {
257 	struct xsk_buff_pool *pool;
258 
259 	pool = xsk_get_pool_from_qid(dev, qid);
260 	if (!pool)
261 		return -EINVAL;
262 
263 	if (enable)
264 		return xsk_pool_dma_map(pool, dev->dev.parent,
265 					LIBETH_XSK_DMA_ATTR);
266 	else
267 		xsk_pool_dma_unmap(pool, LIBETH_XSK_DMA_ATTR);
268 
269 	return 0;
270 }
271 EXPORT_SYMBOL_GPL(libeth_xsk_setup_pool);
272