1b3ad8450SAlexander Lobakin // SPDX-License-Identifier: GPL-2.0-only
2b3ad8450SAlexander Lobakin /* Copyright (C) 2025 Intel Corporation */
3b3ad8450SAlexander Lobakin
4b3ad8450SAlexander Lobakin #define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
5b3ad8450SAlexander Lobakin
6b3ad8450SAlexander Lobakin #include <linux/export.h>
7b3ad8450SAlexander Lobakin
8b3ad8450SAlexander Lobakin #include <net/libeth/xsk.h>
9b3ad8450SAlexander Lobakin
10b3ad8450SAlexander Lobakin #include "priv.h"
11b3ad8450SAlexander Lobakin
12b3ad8450SAlexander Lobakin /* ``XDP_TX`` bulking */
13b3ad8450SAlexander Lobakin
libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame * bq,u32 count)14b3ad8450SAlexander Lobakin void __cold libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
15b3ad8450SAlexander Lobakin u32 count)
16b3ad8450SAlexander Lobakin {
17b3ad8450SAlexander Lobakin for (u32 i = 0; i < count; i++)
18b3ad8450SAlexander Lobakin libeth_xsk_buff_free_slow(bq[i].xsk);
19b3ad8450SAlexander Lobakin }
20b3ad8450SAlexander Lobakin
2140e846d1SAlexander Lobakin /* XSk TMO */
2240e846d1SAlexander Lobakin
2340e846d1SAlexander Lobakin const struct xsk_tx_metadata_ops libeth_xsktmo_slow = {
2440e846d1SAlexander Lobakin .tmo_request_checksum = libeth_xsktmo_req_csum,
2540e846d1SAlexander Lobakin };
2640e846d1SAlexander Lobakin
27b3ad8450SAlexander Lobakin /* Rx polling path */
28b3ad8450SAlexander Lobakin
29b3ad8450SAlexander Lobakin /**
30b3ad8450SAlexander Lobakin * libeth_xsk_buff_free_slow - free an XSk Rx buffer
31b3ad8450SAlexander Lobakin * @xdp: buffer to free
32b3ad8450SAlexander Lobakin *
33b3ad8450SAlexander Lobakin * Slowpath version of xsk_buff_free() to be used on exceptions, cleanups etc.
34b3ad8450SAlexander Lobakin * to avoid unwanted inlining.
35b3ad8450SAlexander Lobakin */
libeth_xsk_buff_free_slow(struct libeth_xdp_buff * xdp)36b3ad8450SAlexander Lobakin void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp)
37b3ad8450SAlexander Lobakin {
38b3ad8450SAlexander Lobakin xsk_buff_free(&xdp->base);
39b3ad8450SAlexander Lobakin }
40b3ad8450SAlexander Lobakin EXPORT_SYMBOL_GPL(libeth_xsk_buff_free_slow);
415495c58cSAlexander Lobakin
425495c58cSAlexander Lobakin /**
435495c58cSAlexander Lobakin * libeth_xsk_buff_add_frag - add frag to XSk Rx buffer
445495c58cSAlexander Lobakin * @head: head buffer
455495c58cSAlexander Lobakin * @xdp: frag buffer
465495c58cSAlexander Lobakin *
475495c58cSAlexander Lobakin * External helper used by libeth_xsk_process_buff(), do not call directly.
485495c58cSAlexander Lobakin * Frees both main and frag buffers on error.
495495c58cSAlexander Lobakin *
505495c58cSAlexander Lobakin * Return: main buffer with attached frag on success, %NULL on error (no space
515495c58cSAlexander Lobakin * for a new frag).
525495c58cSAlexander Lobakin */
libeth_xsk_buff_add_frag(struct libeth_xdp_buff * head,struct libeth_xdp_buff * xdp)535495c58cSAlexander Lobakin struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
545495c58cSAlexander Lobakin struct libeth_xdp_buff *xdp)
555495c58cSAlexander Lobakin {
565495c58cSAlexander Lobakin if (!xsk_buff_add_frag(&head->base, &xdp->base))
575495c58cSAlexander Lobakin goto free;
585495c58cSAlexander Lobakin
595495c58cSAlexander Lobakin return head;
605495c58cSAlexander Lobakin
615495c58cSAlexander Lobakin free:
625495c58cSAlexander Lobakin libeth_xsk_buff_free_slow(xdp);
635495c58cSAlexander Lobakin libeth_xsk_buff_free_slow(head);
645495c58cSAlexander Lobakin
655495c58cSAlexander Lobakin return NULL;
665495c58cSAlexander Lobakin }
675495c58cSAlexander Lobakin EXPORT_SYMBOL_GPL(libeth_xsk_buff_add_frag);
685495c58cSAlexander Lobakin
695495c58cSAlexander Lobakin /**
705495c58cSAlexander Lobakin * libeth_xsk_buff_stats_frags - update onstack RQ stats with XSk frags info
715495c58cSAlexander Lobakin * @rs: onstack stats to update
725495c58cSAlexander Lobakin * @xdp: buffer to account
735495c58cSAlexander Lobakin *
745495c58cSAlexander Lobakin * External helper used by __libeth_xsk_run_pass(), do not call directly.
755495c58cSAlexander Lobakin * Adds buffer's frags count and total len to the onstack stats.
765495c58cSAlexander Lobakin */
libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats * rs,const struct libeth_xdp_buff * xdp)775495c58cSAlexander Lobakin void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
785495c58cSAlexander Lobakin const struct libeth_xdp_buff *xdp)
795495c58cSAlexander Lobakin {
805495c58cSAlexander Lobakin libeth_xdp_buff_stats_frags(rs, xdp);
815495c58cSAlexander Lobakin }
825495c58cSAlexander Lobakin EXPORT_SYMBOL_GPL(libeth_xsk_buff_stats_frags);
835495c58cSAlexander Lobakin
845495c58cSAlexander Lobakin /**
855495c58cSAlexander Lobakin * __libeth_xsk_run_prog_slow - process the non-``XDP_REDIRECT`` verdicts
865495c58cSAlexander Lobakin * @xdp: buffer to process
875495c58cSAlexander Lobakin * @bq: Tx bulk for queueing on ``XDP_TX``
885495c58cSAlexander Lobakin * @act: verdict to process
895495c58cSAlexander Lobakin * @ret: error code if ``XDP_REDIRECT`` failed
905495c58cSAlexander Lobakin *
915495c58cSAlexander Lobakin * External helper used by __libeth_xsk_run_prog(), do not call directly.
925495c58cSAlexander Lobakin * ``XDP_REDIRECT`` is the most common and hottest verdict on XSk, thus
935495c58cSAlexander Lobakin * it is processed inline. The rest goes here for out-of-line processing,
945495c58cSAlexander Lobakin * together with redirect errors.
955495c58cSAlexander Lobakin *
965495c58cSAlexander Lobakin * Return: libeth_xdp XDP prog verdict.
975495c58cSAlexander Lobakin */
__libeth_xsk_run_prog_slow(struct libeth_xdp_buff * xdp,const struct libeth_xdp_tx_bulk * bq,enum xdp_action act,int ret)985495c58cSAlexander Lobakin u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
995495c58cSAlexander Lobakin const struct libeth_xdp_tx_bulk *bq,
1005495c58cSAlexander Lobakin enum xdp_action act, int ret)
1015495c58cSAlexander Lobakin {
1025495c58cSAlexander Lobakin switch (act) {
1035495c58cSAlexander Lobakin case XDP_DROP:
1045495c58cSAlexander Lobakin xsk_buff_free(&xdp->base);
1055495c58cSAlexander Lobakin
1065495c58cSAlexander Lobakin return LIBETH_XDP_DROP;
1075495c58cSAlexander Lobakin case XDP_TX:
1085495c58cSAlexander Lobakin return LIBETH_XDP_TX;
1095495c58cSAlexander Lobakin case XDP_PASS:
1105495c58cSAlexander Lobakin return LIBETH_XDP_PASS;
1115495c58cSAlexander Lobakin default:
1125495c58cSAlexander Lobakin break;
1135495c58cSAlexander Lobakin }
1145495c58cSAlexander Lobakin
1155495c58cSAlexander Lobakin return libeth_xdp_prog_exception(bq, xdp, act, ret);
1165495c58cSAlexander Lobakin }
1175495c58cSAlexander Lobakin EXPORT_SYMBOL_GPL(__libeth_xsk_run_prog_slow);
1185495c58cSAlexander Lobakin
1195495c58cSAlexander Lobakin /**
1205495c58cSAlexander Lobakin * libeth_xsk_prog_exception - handle XDP prog exceptions on XSk
1215495c58cSAlexander Lobakin * @xdp: buffer to process
1225495c58cSAlexander Lobakin * @act: verdict returned by the prog
1235495c58cSAlexander Lobakin * @ret: error code if ``XDP_REDIRECT`` failed
1245495c58cSAlexander Lobakin *
1255495c58cSAlexander Lobakin * Internal. Frees the buffer and, if the queue uses XSk wakeups, stop the
1265495c58cSAlexander Lobakin * current NAPI poll when there are no free buffers left.
1275495c58cSAlexander Lobakin *
1285495c58cSAlexander Lobakin * Return: libeth_xdp's XDP prog verdict.
1295495c58cSAlexander Lobakin */
libeth_xsk_prog_exception(struct libeth_xdp_buff * xdp,enum xdp_action act,int ret)1305495c58cSAlexander Lobakin u32 __cold libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp,
1315495c58cSAlexander Lobakin enum xdp_action act, int ret)
1325495c58cSAlexander Lobakin {
1335495c58cSAlexander Lobakin const struct xdp_buff_xsk *xsk;
1345495c58cSAlexander Lobakin u32 __ret = LIBETH_XDP_DROP;
1355495c58cSAlexander Lobakin
1365495c58cSAlexander Lobakin if (act != XDP_REDIRECT)
1375495c58cSAlexander Lobakin goto drop;
1385495c58cSAlexander Lobakin
1395495c58cSAlexander Lobakin xsk = container_of(&xdp->base, typeof(*xsk), xdp);
1405495c58cSAlexander Lobakin if (xsk_uses_need_wakeup(xsk->pool) && ret == -ENOBUFS)
1415495c58cSAlexander Lobakin __ret = LIBETH_XDP_ABORTED;
1425495c58cSAlexander Lobakin
1435495c58cSAlexander Lobakin drop:
1445495c58cSAlexander Lobakin libeth_xsk_buff_free_slow(xdp);
1455495c58cSAlexander Lobakin
1465495c58cSAlexander Lobakin return __ret;
1475495c58cSAlexander Lobakin }
148*3ced71a8SAlexander Lobakin
149*3ced71a8SAlexander Lobakin /* Refill */
150*3ced71a8SAlexander Lobakin
151*3ced71a8SAlexander Lobakin /**
152*3ced71a8SAlexander Lobakin * libeth_xskfq_create - create an XSkFQ
153*3ced71a8SAlexander Lobakin * @fq: fill queue to initialize
154*3ced71a8SAlexander Lobakin *
155*3ced71a8SAlexander Lobakin * Allocates the FQEs and initializes the fields used by libeth_xdp: number
156*3ced71a8SAlexander Lobakin * of buffers to refill, refill threshold and buffer len.
157*3ced71a8SAlexander Lobakin *
158*3ced71a8SAlexander Lobakin * Return: %0 on success, -errno otherwise.
159*3ced71a8SAlexander Lobakin */
libeth_xskfq_create(struct libeth_xskfq * fq)160*3ced71a8SAlexander Lobakin int libeth_xskfq_create(struct libeth_xskfq *fq)
161*3ced71a8SAlexander Lobakin {
162*3ced71a8SAlexander Lobakin fq->fqes = kvcalloc_node(fq->count, sizeof(*fq->fqes), GFP_KERNEL,
163*3ced71a8SAlexander Lobakin fq->nid);
164*3ced71a8SAlexander Lobakin if (!fq->fqes)
165*3ced71a8SAlexander Lobakin return -ENOMEM;
166*3ced71a8SAlexander Lobakin
167*3ced71a8SAlexander Lobakin fq->pending = fq->count;
168*3ced71a8SAlexander Lobakin fq->thresh = libeth_xdp_queue_threshold(fq->count);
169*3ced71a8SAlexander Lobakin fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool);
170*3ced71a8SAlexander Lobakin
171*3ced71a8SAlexander Lobakin return 0;
172*3ced71a8SAlexander Lobakin }
173*3ced71a8SAlexander Lobakin EXPORT_SYMBOL_GPL(libeth_xskfq_create);
174*3ced71a8SAlexander Lobakin
175*3ced71a8SAlexander Lobakin /**
176*3ced71a8SAlexander Lobakin * libeth_xskfq_destroy - destroy an XSkFQ
177*3ced71a8SAlexander Lobakin * @fq: fill queue to destroy
178*3ced71a8SAlexander Lobakin *
179*3ced71a8SAlexander Lobakin * Zeroes the used fields and frees the FQEs array.
180*3ced71a8SAlexander Lobakin */
libeth_xskfq_destroy(struct libeth_xskfq * fq)181*3ced71a8SAlexander Lobakin void libeth_xskfq_destroy(struct libeth_xskfq *fq)
182*3ced71a8SAlexander Lobakin {
183*3ced71a8SAlexander Lobakin fq->buf_len = 0;
184*3ced71a8SAlexander Lobakin fq->thresh = 0;
185*3ced71a8SAlexander Lobakin fq->pending = 0;
186*3ced71a8SAlexander Lobakin
187*3ced71a8SAlexander Lobakin kvfree(fq->fqes);
188*3ced71a8SAlexander Lobakin }
189*3ced71a8SAlexander Lobakin EXPORT_SYMBOL_GPL(libeth_xskfq_destroy);
190*3ced71a8SAlexander Lobakin
191*3ced71a8SAlexander Lobakin /* .ndo_xsk_wakeup */
192*3ced71a8SAlexander Lobakin
libeth_xsk_napi_sched(void * info)193*3ced71a8SAlexander Lobakin static void libeth_xsk_napi_sched(void *info)
194*3ced71a8SAlexander Lobakin {
195*3ced71a8SAlexander Lobakin __napi_schedule_irqoff(info);
196*3ced71a8SAlexander Lobakin }
197*3ced71a8SAlexander Lobakin
198*3ced71a8SAlexander Lobakin /**
199*3ced71a8SAlexander Lobakin * libeth_xsk_init_wakeup - initialize libeth XSk wakeup structure
200*3ced71a8SAlexander Lobakin * @csd: struct to initialize
201*3ced71a8SAlexander Lobakin * @napi: NAPI corresponding to this queue
202*3ced71a8SAlexander Lobakin *
203*3ced71a8SAlexander Lobakin * libeth_xdp uses inter-processor interrupts to perform XSk wakeups. In order
204*3ced71a8SAlexander Lobakin * to do that, the corresponding CSDs must be initialized when creating the
205*3ced71a8SAlexander Lobakin * queues.
206*3ced71a8SAlexander Lobakin */
libeth_xsk_init_wakeup(call_single_data_t * csd,struct napi_struct * napi)207*3ced71a8SAlexander Lobakin void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi)
208*3ced71a8SAlexander Lobakin {
209*3ced71a8SAlexander Lobakin INIT_CSD(csd, libeth_xsk_napi_sched, napi);
210*3ced71a8SAlexander Lobakin }
211*3ced71a8SAlexander Lobakin EXPORT_SYMBOL_GPL(libeth_xsk_init_wakeup);
212*3ced71a8SAlexander Lobakin
213*3ced71a8SAlexander Lobakin /**
214*3ced71a8SAlexander Lobakin * libeth_xsk_wakeup - perform an XSk wakeup
215*3ced71a8SAlexander Lobakin * @csd: CSD corresponding to the queue
216*3ced71a8SAlexander Lobakin * @qid: the stack queue index
217*3ced71a8SAlexander Lobakin *
218*3ced71a8SAlexander Lobakin * Try to mark the NAPI as missed first, so that it could be rescheduled.
219*3ced71a8SAlexander Lobakin * If it's not, schedule it on the corresponding CPU using IPIs (or directly
220*3ced71a8SAlexander Lobakin * if already running on it).
221*3ced71a8SAlexander Lobakin */
libeth_xsk_wakeup(call_single_data_t * csd,u32 qid)222*3ced71a8SAlexander Lobakin void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid)
223*3ced71a8SAlexander Lobakin {
224*3ced71a8SAlexander Lobakin struct napi_struct *napi = csd->info;
225*3ced71a8SAlexander Lobakin
226*3ced71a8SAlexander Lobakin if (napi_if_scheduled_mark_missed(napi) ||
227*3ced71a8SAlexander Lobakin unlikely(!napi_schedule_prep(napi)))
228*3ced71a8SAlexander Lobakin return;
229*3ced71a8SAlexander Lobakin
230*3ced71a8SAlexander Lobakin if (unlikely(qid >= nr_cpu_ids))
231*3ced71a8SAlexander Lobakin qid %= nr_cpu_ids;
232*3ced71a8SAlexander Lobakin
233*3ced71a8SAlexander Lobakin if (qid != raw_smp_processor_id() && cpu_online(qid))
234*3ced71a8SAlexander Lobakin smp_call_function_single_async(qid, csd);
235*3ced71a8SAlexander Lobakin else
236*3ced71a8SAlexander Lobakin __napi_schedule(napi);
237*3ced71a8SAlexander Lobakin }
238*3ced71a8SAlexander Lobakin EXPORT_SYMBOL_GPL(libeth_xsk_wakeup);
239*3ced71a8SAlexander Lobakin
240*3ced71a8SAlexander Lobakin /* Pool setup */
241*3ced71a8SAlexander Lobakin
242*3ced71a8SAlexander Lobakin #define LIBETH_XSK_DMA_ATTR \
243*3ced71a8SAlexander Lobakin (DMA_ATTR_WEAK_ORDERING | DMA_ATTR_SKIP_CPU_SYNC)
244*3ced71a8SAlexander Lobakin
245*3ced71a8SAlexander Lobakin /**
246*3ced71a8SAlexander Lobakin * libeth_xsk_setup_pool - setup or destroy an XSk pool for a queue
247*3ced71a8SAlexander Lobakin * @dev: target &net_device
248*3ced71a8SAlexander Lobakin * @qid: stack queue index to configure
249*3ced71a8SAlexander Lobakin * @enable: whether to enable or disable the pool
250*3ced71a8SAlexander Lobakin *
251*3ced71a8SAlexander Lobakin * Check that @qid is valid and then map or unmap the pool.
252*3ced71a8SAlexander Lobakin *
253*3ced71a8SAlexander Lobakin * Return: %0 on success, -errno otherwise.
254*3ced71a8SAlexander Lobakin */
libeth_xsk_setup_pool(struct net_device * dev,u32 qid,bool enable)255*3ced71a8SAlexander Lobakin int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable)
256*3ced71a8SAlexander Lobakin {
257*3ced71a8SAlexander Lobakin struct xsk_buff_pool *pool;
258*3ced71a8SAlexander Lobakin
259*3ced71a8SAlexander Lobakin pool = xsk_get_pool_from_qid(dev, qid);
260*3ced71a8SAlexander Lobakin if (!pool)
261*3ced71a8SAlexander Lobakin return -EINVAL;
262*3ced71a8SAlexander Lobakin
263*3ced71a8SAlexander Lobakin if (enable)
264*3ced71a8SAlexander Lobakin return xsk_pool_dma_map(pool, dev->dev.parent,
265*3ced71a8SAlexander Lobakin LIBETH_XSK_DMA_ATTR);
266*3ced71a8SAlexander Lobakin else
267*3ced71a8SAlexander Lobakin xsk_pool_dma_unmap(pool, LIBETH_XSK_DMA_ATTR);
268*3ced71a8SAlexander Lobakin
269*3ced71a8SAlexander Lobakin return 0;
270*3ced71a8SAlexander Lobakin }
271*3ced71a8SAlexander Lobakin EXPORT_SYMBOL_GPL(libeth_xsk_setup_pool);
272