xref: /linux/drivers/net/ethernet/intel/idpf/xsk.c (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
13d57b2c0SMichal Kubiak // SPDX-License-Identifier: GPL-2.0-only
23d57b2c0SMichal Kubiak /* Copyright (C) 2025 Intel Corporation */
33d57b2c0SMichal Kubiak 
43d57b2c0SMichal Kubiak #include <net/libeth/xsk.h>
53d57b2c0SMichal Kubiak 
63d57b2c0SMichal Kubiak #include "idpf.h"
78ff6d622SAlexander Lobakin #include "xdp.h"
83d57b2c0SMichal Kubiak #include "xsk.h"
93d57b2c0SMichal Kubiak 
108ff6d622SAlexander Lobakin static void idpf_xsk_tx_timer(struct work_struct *work);
118ff6d622SAlexander Lobakin 
129705d655SAlexander Lobakin static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
139705d655SAlexander Lobakin 			       struct idpf_rx_queue *rxq)
149705d655SAlexander Lobakin {
159705d655SAlexander Lobakin 	struct xsk_buff_pool *pool;
169705d655SAlexander Lobakin 
179705d655SAlexander Lobakin 	pool = xsk_get_pool_from_qid(vport->netdev, rxq->idx);
189705d655SAlexander Lobakin 	if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
199705d655SAlexander Lobakin 		return;
209705d655SAlexander Lobakin 
219705d655SAlexander Lobakin 	rxq->pool = pool;
229705d655SAlexander Lobakin 
239705d655SAlexander Lobakin 	idpf_queue_set(XSK, rxq);
249705d655SAlexander Lobakin }
259705d655SAlexander Lobakin 
269705d655SAlexander Lobakin static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
279705d655SAlexander Lobakin 				struct idpf_buf_queue *bufq)
289705d655SAlexander Lobakin {
299705d655SAlexander Lobakin 	struct xsk_buff_pool *pool;
309705d655SAlexander Lobakin 	u32 qid = U32_MAX;
319705d655SAlexander Lobakin 
329705d655SAlexander Lobakin 	for (u32 i = 0; i < vport->num_rxq_grp; i++) {
339705d655SAlexander Lobakin 		const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
349705d655SAlexander Lobakin 
359705d655SAlexander Lobakin 		for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
369705d655SAlexander Lobakin 			if (&grp->splitq.bufq_sets[j].bufq == bufq) {
379705d655SAlexander Lobakin 				qid = grp->splitq.rxq_sets[0]->rxq.idx;
389705d655SAlexander Lobakin 				goto setup;
399705d655SAlexander Lobakin 			}
409705d655SAlexander Lobakin 		}
419705d655SAlexander Lobakin 	}
429705d655SAlexander Lobakin 
439705d655SAlexander Lobakin setup:
449705d655SAlexander Lobakin 	pool = xsk_get_pool_from_qid(vport->netdev, qid);
459705d655SAlexander Lobakin 	if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
469705d655SAlexander Lobakin 		return;
479705d655SAlexander Lobakin 
489705d655SAlexander Lobakin 	bufq->pool = pool;
499705d655SAlexander Lobakin 
509705d655SAlexander Lobakin 	idpf_queue_set(XSK, bufq);
519705d655SAlexander Lobakin }
529705d655SAlexander Lobakin 
538ff6d622SAlexander Lobakin static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
548ff6d622SAlexander Lobakin 			       struct idpf_tx_queue *txq)
558ff6d622SAlexander Lobakin {
568ff6d622SAlexander Lobakin 	struct xsk_buff_pool *pool;
578ff6d622SAlexander Lobakin 	u32 qid;
588ff6d622SAlexander Lobakin 
598ff6d622SAlexander Lobakin 	idpf_queue_clear(XSK, txq);
608ff6d622SAlexander Lobakin 
618ff6d622SAlexander Lobakin 	if (!idpf_queue_has(XDP, txq))
628ff6d622SAlexander Lobakin 		return;
638ff6d622SAlexander Lobakin 
648ff6d622SAlexander Lobakin 	qid = txq->idx - vport->xdp_txq_offset;
658ff6d622SAlexander Lobakin 
668ff6d622SAlexander Lobakin 	pool = xsk_get_pool_from_qid(vport->netdev, qid);
678ff6d622SAlexander Lobakin 	if (!pool || !pool->dev)
688ff6d622SAlexander Lobakin 		return;
698ff6d622SAlexander Lobakin 
708ff6d622SAlexander Lobakin 	txq->pool = pool;
718ff6d622SAlexander Lobakin 	libeth_xdpsq_init_timer(txq->timer, txq, &txq->xdp_lock,
728ff6d622SAlexander Lobakin 				idpf_xsk_tx_timer);
738ff6d622SAlexander Lobakin 
748ff6d622SAlexander Lobakin 	idpf_queue_assign(NOIRQ, txq, xsk_uses_need_wakeup(pool));
758ff6d622SAlexander Lobakin 	idpf_queue_set(XSK, txq);
768ff6d622SAlexander Lobakin }
778ff6d622SAlexander Lobakin 
788ff6d622SAlexander Lobakin static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
798ff6d622SAlexander Lobakin 				  struct idpf_compl_queue *complq)
808ff6d622SAlexander Lobakin {
818ff6d622SAlexander Lobakin 	const struct xsk_buff_pool *pool;
828ff6d622SAlexander Lobakin 	u32 qid;
838ff6d622SAlexander Lobakin 
848ff6d622SAlexander Lobakin 	idpf_queue_clear(XSK, complq);
858ff6d622SAlexander Lobakin 
868ff6d622SAlexander Lobakin 	if (!idpf_queue_has(XDP, complq))
878ff6d622SAlexander Lobakin 		return;
888ff6d622SAlexander Lobakin 
898ff6d622SAlexander Lobakin 	qid = complq->txq_grp->txqs[0]->idx - vport->xdp_txq_offset;
908ff6d622SAlexander Lobakin 
918ff6d622SAlexander Lobakin 	pool = xsk_get_pool_from_qid(vport->netdev, qid);
928ff6d622SAlexander Lobakin 	if (!pool || !pool->dev)
938ff6d622SAlexander Lobakin 		return;
948ff6d622SAlexander Lobakin 
958ff6d622SAlexander Lobakin 	idpf_queue_set(XSK, complq);
968ff6d622SAlexander Lobakin }
978ff6d622SAlexander Lobakin 
988ff6d622SAlexander Lobakin void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
998ff6d622SAlexander Lobakin 			  enum virtchnl2_queue_type type)
1008ff6d622SAlexander Lobakin {
1018ff6d622SAlexander Lobakin 	if (!idpf_xdp_enabled(vport))
1028ff6d622SAlexander Lobakin 		return;
1038ff6d622SAlexander Lobakin 
1048ff6d622SAlexander Lobakin 	switch (type) {
1059705d655SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_RX:
1069705d655SAlexander Lobakin 		idpf_xsk_setup_rxq(vport, q);
1079705d655SAlexander Lobakin 		break;
1089705d655SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1099705d655SAlexander Lobakin 		idpf_xsk_setup_bufq(vport, q);
1109705d655SAlexander Lobakin 		break;
1118ff6d622SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_TX:
1128ff6d622SAlexander Lobakin 		idpf_xsk_setup_txq(vport, q);
1138ff6d622SAlexander Lobakin 		break;
1148ff6d622SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
1158ff6d622SAlexander Lobakin 		idpf_xsk_setup_complq(vport, q);
1168ff6d622SAlexander Lobakin 		break;
1178ff6d622SAlexander Lobakin 	default:
1188ff6d622SAlexander Lobakin 		break;
1198ff6d622SAlexander Lobakin 	}
1208ff6d622SAlexander Lobakin }
1218ff6d622SAlexander Lobakin 
1228ff6d622SAlexander Lobakin void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
1238ff6d622SAlexander Lobakin {
1248ff6d622SAlexander Lobakin 	struct idpf_compl_queue *complq;
1259705d655SAlexander Lobakin 	struct idpf_buf_queue *bufq;
1269705d655SAlexander Lobakin 	struct idpf_rx_queue *rxq;
1278ff6d622SAlexander Lobakin 	struct idpf_tx_queue *txq;
1288ff6d622SAlexander Lobakin 
1298ff6d622SAlexander Lobakin 	switch (type) {
1309705d655SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_RX:
1319705d655SAlexander Lobakin 		rxq = q;
1329705d655SAlexander Lobakin 		if (!idpf_queue_has_clear(XSK, rxq))
1339705d655SAlexander Lobakin 			return;
1349705d655SAlexander Lobakin 
1359705d655SAlexander Lobakin 		rxq->pool = NULL;
1369705d655SAlexander Lobakin 		break;
1379705d655SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1389705d655SAlexander Lobakin 		bufq = q;
1399705d655SAlexander Lobakin 		if (!idpf_queue_has_clear(XSK, bufq))
1409705d655SAlexander Lobakin 			return;
1419705d655SAlexander Lobakin 
1429705d655SAlexander Lobakin 		bufq->pool = NULL;
1439705d655SAlexander Lobakin 		break;
1448ff6d622SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_TX:
1458ff6d622SAlexander Lobakin 		txq = q;
1468ff6d622SAlexander Lobakin 		if (!idpf_queue_has_clear(XSK, txq))
1478ff6d622SAlexander Lobakin 			return;
1488ff6d622SAlexander Lobakin 
1498ff6d622SAlexander Lobakin 		idpf_queue_set(NOIRQ, txq);
1508ff6d622SAlexander Lobakin 		txq->dev = txq->netdev->dev.parent;
1518ff6d622SAlexander Lobakin 		break;
1528ff6d622SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
1538ff6d622SAlexander Lobakin 		complq = q;
1548ff6d622SAlexander Lobakin 		idpf_queue_clear(XSK, complq);
1558ff6d622SAlexander Lobakin 		break;
1568ff6d622SAlexander Lobakin 	default:
1578ff6d622SAlexander Lobakin 		break;
1588ff6d622SAlexander Lobakin 	}
1598ff6d622SAlexander Lobakin }
1608ff6d622SAlexander Lobakin 
161*96da9d67SAlexander Lobakin void idpf_xsk_init_wakeup(struct idpf_q_vector *qv)
162*96da9d67SAlexander Lobakin {
163*96da9d67SAlexander Lobakin 	libeth_xsk_init_wakeup(&qv->csd, &qv->napi);
164*96da9d67SAlexander Lobakin }
165*96da9d67SAlexander Lobakin 
1668ff6d622SAlexander Lobakin void idpf_xsksq_clean(struct idpf_tx_queue *xdpsq)
1678ff6d622SAlexander Lobakin {
1688ff6d622SAlexander Lobakin 	struct libeth_xdpsq_napi_stats ss = { };
1698ff6d622SAlexander Lobakin 	u32 ntc = xdpsq->next_to_clean;
1708ff6d622SAlexander Lobakin 	struct xdp_frame_bulk bq;
1718ff6d622SAlexander Lobakin 	struct libeth_cq_pp cp = {
1728ff6d622SAlexander Lobakin 		.dev	= xdpsq->pool->dev,
1738ff6d622SAlexander Lobakin 		.bq	= &bq,
1748ff6d622SAlexander Lobakin 		.xss	= &ss,
1758ff6d622SAlexander Lobakin 	};
1768ff6d622SAlexander Lobakin 	u32 xsk_frames = 0;
1778ff6d622SAlexander Lobakin 
1788ff6d622SAlexander Lobakin 	xdp_frame_bulk_init(&bq);
1798ff6d622SAlexander Lobakin 
1808ff6d622SAlexander Lobakin 	while (ntc != xdpsq->next_to_use) {
1818ff6d622SAlexander Lobakin 		struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
1828ff6d622SAlexander Lobakin 
1838ff6d622SAlexander Lobakin 		if (sqe->type)
1848ff6d622SAlexander Lobakin 			libeth_xdp_complete_tx(sqe, &cp);
1858ff6d622SAlexander Lobakin 		else
1868ff6d622SAlexander Lobakin 			xsk_frames++;
1878ff6d622SAlexander Lobakin 
1888ff6d622SAlexander Lobakin 		if (unlikely(++ntc == xdpsq->desc_count))
1898ff6d622SAlexander Lobakin 			ntc = 0;
1908ff6d622SAlexander Lobakin 	}
1918ff6d622SAlexander Lobakin 
1928ff6d622SAlexander Lobakin 	xdp_flush_frame_bulk(&bq);
1938ff6d622SAlexander Lobakin 
1948ff6d622SAlexander Lobakin 	if (xsk_frames)
1958ff6d622SAlexander Lobakin 		xsk_tx_completed(xdpsq->pool, xsk_frames);
1968ff6d622SAlexander Lobakin }
1978ff6d622SAlexander Lobakin 
1988ff6d622SAlexander Lobakin static noinline u32 idpf_xsksq_complete_slow(struct idpf_tx_queue *xdpsq,
1998ff6d622SAlexander Lobakin 					     u32 done)
2008ff6d622SAlexander Lobakin {
2018ff6d622SAlexander Lobakin 	struct libeth_xdpsq_napi_stats ss = { };
2028ff6d622SAlexander Lobakin 	u32 ntc = xdpsq->next_to_clean;
2038ff6d622SAlexander Lobakin 	u32 cnt = xdpsq->desc_count;
2048ff6d622SAlexander Lobakin 	struct xdp_frame_bulk bq;
2058ff6d622SAlexander Lobakin 	struct libeth_cq_pp cp = {
2068ff6d622SAlexander Lobakin 		.dev	= xdpsq->pool->dev,
2078ff6d622SAlexander Lobakin 		.bq	= &bq,
2088ff6d622SAlexander Lobakin 		.xss	= &ss,
2098ff6d622SAlexander Lobakin 		.napi	= true,
2108ff6d622SAlexander Lobakin 	};
2118ff6d622SAlexander Lobakin 	u32 xsk_frames = 0;
2128ff6d622SAlexander Lobakin 
2138ff6d622SAlexander Lobakin 	xdp_frame_bulk_init(&bq);
2148ff6d622SAlexander Lobakin 
2158ff6d622SAlexander Lobakin 	for (u32 i = 0; likely(i < done); i++) {
2168ff6d622SAlexander Lobakin 		struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
2178ff6d622SAlexander Lobakin 
2188ff6d622SAlexander Lobakin 		if (sqe->type)
2198ff6d622SAlexander Lobakin 			libeth_xdp_complete_tx(sqe, &cp);
2208ff6d622SAlexander Lobakin 		else
2218ff6d622SAlexander Lobakin 			xsk_frames++;
2228ff6d622SAlexander Lobakin 
2238ff6d622SAlexander Lobakin 		if (unlikely(++ntc == cnt))
2248ff6d622SAlexander Lobakin 			ntc = 0;
2258ff6d622SAlexander Lobakin 	}
2268ff6d622SAlexander Lobakin 
2278ff6d622SAlexander Lobakin 	xdp_flush_frame_bulk(&bq);
2288ff6d622SAlexander Lobakin 
2298ff6d622SAlexander Lobakin 	xdpsq->next_to_clean = ntc;
2308ff6d622SAlexander Lobakin 	xdpsq->xdp_tx -= cp.xdp_tx;
2318ff6d622SAlexander Lobakin 
2328ff6d622SAlexander Lobakin 	return xsk_frames;
2338ff6d622SAlexander Lobakin }
2348ff6d622SAlexander Lobakin 
2358ff6d622SAlexander Lobakin static __always_inline u32 idpf_xsksq_complete(void *_xdpsq, u32 budget)
2368ff6d622SAlexander Lobakin {
2378ff6d622SAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
2388ff6d622SAlexander Lobakin 	u32 tx_ntc = xdpsq->next_to_clean;
2398ff6d622SAlexander Lobakin 	u32 tx_cnt = xdpsq->desc_count;
2408ff6d622SAlexander Lobakin 	u32 done_frames;
2418ff6d622SAlexander Lobakin 	u32 xsk_frames;
2428ff6d622SAlexander Lobakin 
2438ff6d622SAlexander Lobakin 	done_frames = idpf_xdpsq_poll(xdpsq, budget);
2448ff6d622SAlexander Lobakin 	if (unlikely(!done_frames))
2458ff6d622SAlexander Lobakin 		return 0;
2468ff6d622SAlexander Lobakin 
2478ff6d622SAlexander Lobakin 	if (likely(!xdpsq->xdp_tx)) {
2488ff6d622SAlexander Lobakin 		tx_ntc += done_frames;
2498ff6d622SAlexander Lobakin 		if (tx_ntc >= tx_cnt)
2508ff6d622SAlexander Lobakin 			tx_ntc -= tx_cnt;
2518ff6d622SAlexander Lobakin 
2528ff6d622SAlexander Lobakin 		xdpsq->next_to_clean = tx_ntc;
2538ff6d622SAlexander Lobakin 		xsk_frames = done_frames;
2548ff6d622SAlexander Lobakin 
2558ff6d622SAlexander Lobakin 		goto finalize;
2568ff6d622SAlexander Lobakin 	}
2578ff6d622SAlexander Lobakin 
2588ff6d622SAlexander Lobakin 	xsk_frames = idpf_xsksq_complete_slow(xdpsq, done_frames);
2598ff6d622SAlexander Lobakin 	if (xsk_frames)
2608ff6d622SAlexander Lobakin finalize:
2618ff6d622SAlexander Lobakin 		xsk_tx_completed(xdpsq->pool, xsk_frames);
2628ff6d622SAlexander Lobakin 
2638ff6d622SAlexander Lobakin 	xdpsq->pending -= done_frames;
2648ff6d622SAlexander Lobakin 
2658ff6d622SAlexander Lobakin 	return done_frames;
2668ff6d622SAlexander Lobakin }
2678ff6d622SAlexander Lobakin 
2689705d655SAlexander Lobakin static u32 idpf_xsk_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
2699705d655SAlexander Lobakin {
2709705d655SAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
2719705d655SAlexander Lobakin 	u32 free;
2729705d655SAlexander Lobakin 
2739705d655SAlexander Lobakin 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
2749705d655SAlexander Lobakin 
2759705d655SAlexander Lobakin 	free = xdpsq->desc_count - xdpsq->pending;
2769705d655SAlexander Lobakin 	if (free < xdpsq->thresh)
2779705d655SAlexander Lobakin 		free += idpf_xsksq_complete(xdpsq, xdpsq->thresh);
2789705d655SAlexander Lobakin 
2799705d655SAlexander Lobakin 	*sq = (struct libeth_xdpsq){
2809705d655SAlexander Lobakin 		.pool		= xdpsq->pool,
2819705d655SAlexander Lobakin 		.sqes		= xdpsq->tx_buf,
2829705d655SAlexander Lobakin 		.descs		= xdpsq->desc_ring,
2839705d655SAlexander Lobakin 		.count		= xdpsq->desc_count,
2849705d655SAlexander Lobakin 		.lock		= &xdpsq->xdp_lock,
2859705d655SAlexander Lobakin 		.ntu		= &xdpsq->next_to_use,
2869705d655SAlexander Lobakin 		.pending	= &xdpsq->pending,
2879705d655SAlexander Lobakin 		.xdp_tx		= &xdpsq->xdp_tx,
2889705d655SAlexander Lobakin 	};
2899705d655SAlexander Lobakin 
2909705d655SAlexander Lobakin 	return free;
2919705d655SAlexander Lobakin }
2929705d655SAlexander Lobakin 
2938ff6d622SAlexander Lobakin static u32 idpf_xsk_xmit_prep(void *_xdpsq, struct libeth_xdpsq *sq)
2948ff6d622SAlexander Lobakin {
2958ff6d622SAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
2968ff6d622SAlexander Lobakin 
2978ff6d622SAlexander Lobakin 	*sq = (struct libeth_xdpsq){
2988ff6d622SAlexander Lobakin 		.pool		= xdpsq->pool,
2998ff6d622SAlexander Lobakin 		.sqes		= xdpsq->tx_buf,
3008ff6d622SAlexander Lobakin 		.descs		= xdpsq->desc_ring,
3018ff6d622SAlexander Lobakin 		.count		= xdpsq->desc_count,
3028ff6d622SAlexander Lobakin 		.lock		= &xdpsq->xdp_lock,
3038ff6d622SAlexander Lobakin 		.ntu		= &xdpsq->next_to_use,
3048ff6d622SAlexander Lobakin 		.pending	= &xdpsq->pending,
3058ff6d622SAlexander Lobakin 	};
3068ff6d622SAlexander Lobakin 
3078ff6d622SAlexander Lobakin 	/*
3088ff6d622SAlexander Lobakin 	 * The queue is cleaned, the budget is already known, optimize out
3098ff6d622SAlexander Lobakin 	 * the second min() by passing the type limit.
3108ff6d622SAlexander Lobakin 	 */
3118ff6d622SAlexander Lobakin 	return U32_MAX;
3128ff6d622SAlexander Lobakin }
3138ff6d622SAlexander Lobakin 
3148ff6d622SAlexander Lobakin bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq)
3158ff6d622SAlexander Lobakin {
3168ff6d622SAlexander Lobakin 	u32 free;
3178ff6d622SAlexander Lobakin 
3188ff6d622SAlexander Lobakin 	libeth_xdpsq_lock(&xsksq->xdp_lock);
3198ff6d622SAlexander Lobakin 
3208ff6d622SAlexander Lobakin 	free = xsksq->desc_count - xsksq->pending;
3218ff6d622SAlexander Lobakin 	if (free < xsksq->thresh)
3228ff6d622SAlexander Lobakin 		free += idpf_xsksq_complete(xsksq, xsksq->thresh);
3238ff6d622SAlexander Lobakin 
3248ff6d622SAlexander Lobakin 	return libeth_xsk_xmit_do_bulk(xsksq->pool, xsksq,
3258ff6d622SAlexander Lobakin 				       min(free - 1, xsksq->thresh),
3268ff6d622SAlexander Lobakin 				       libeth_xsktmo, idpf_xsk_xmit_prep,
3278ff6d622SAlexander Lobakin 				       idpf_xdp_tx_xmit, idpf_xdp_tx_finalize);
3288ff6d622SAlexander Lobakin }
3298ff6d622SAlexander Lobakin 
3308ff6d622SAlexander Lobakin LIBETH_XDP_DEFINE_START();
3318ff6d622SAlexander Lobakin LIBETH_XDP_DEFINE_TIMER(static idpf_xsk_tx_timer, idpf_xsksq_complete);
3329705d655SAlexander Lobakin LIBETH_XSK_DEFINE_FLUSH_TX(static idpf_xsk_tx_flush_bulk, idpf_xsk_tx_prep,
3339705d655SAlexander Lobakin 			   idpf_xdp_tx_xmit);
3349705d655SAlexander Lobakin LIBETH_XSK_DEFINE_RUN(static idpf_xsk_run_pass, idpf_xsk_run_prog,
3359705d655SAlexander Lobakin 		      idpf_xsk_tx_flush_bulk, idpf_rx_process_skb_fields);
3369705d655SAlexander Lobakin LIBETH_XSK_DEFINE_FINALIZE(static idpf_xsk_finalize_rx, idpf_xsk_tx_flush_bulk,
3379705d655SAlexander Lobakin 			   idpf_xdp_tx_finalize);
3388ff6d622SAlexander Lobakin LIBETH_XDP_DEFINE_END();
3398ff6d622SAlexander Lobakin 
3409705d655SAlexander Lobakin static void idpf_xskfqe_init(const struct libeth_xskfq_fp *fq, u32 i)
3419705d655SAlexander Lobakin {
3429705d655SAlexander Lobakin 	struct virtchnl2_splitq_rx_buf_desc *desc = fq->descs;
3439705d655SAlexander Lobakin 
3449705d655SAlexander Lobakin 	desc = &desc[i];
3459705d655SAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS
3469705d655SAlexander Lobakin 	*(u64 *)&desc->qword0 = i;
3479705d655SAlexander Lobakin #else
3489705d655SAlexander Lobakin 	desc->qword0.buf_id = cpu_to_le16(i);
3499705d655SAlexander Lobakin #endif
3509705d655SAlexander Lobakin 	desc->pkt_addr = cpu_to_le64(libeth_xsk_buff_xdp_get_dma(fq->fqes[i]));
3519705d655SAlexander Lobakin }
3529705d655SAlexander Lobakin 
3539705d655SAlexander Lobakin static bool idpf_xskfq_refill_thresh(struct idpf_buf_queue *bufq, u32 count)
3549705d655SAlexander Lobakin {
3559705d655SAlexander Lobakin 	struct libeth_xskfq_fp fq = {
3569705d655SAlexander Lobakin 		.pool	= bufq->pool,
3579705d655SAlexander Lobakin 		.fqes	= bufq->xsk_buf,
3589705d655SAlexander Lobakin 		.descs	= bufq->split_buf,
3599705d655SAlexander Lobakin 		.ntu	= bufq->next_to_use,
3609705d655SAlexander Lobakin 		.count	= bufq->desc_count,
3619705d655SAlexander Lobakin 	};
3629705d655SAlexander Lobakin 	u32 done;
3639705d655SAlexander Lobakin 
3649705d655SAlexander Lobakin 	done = libeth_xskfqe_alloc(&fq, count, idpf_xskfqe_init);
3659705d655SAlexander Lobakin 	writel(fq.ntu, bufq->tail);
3669705d655SAlexander Lobakin 
3679705d655SAlexander Lobakin 	bufq->next_to_use = fq.ntu;
3689705d655SAlexander Lobakin 	bufq->pending -= done;
3699705d655SAlexander Lobakin 
3709705d655SAlexander Lobakin 	return done == count;
3719705d655SAlexander Lobakin }
3729705d655SAlexander Lobakin 
3739705d655SAlexander Lobakin static bool idpf_xskfq_refill(struct idpf_buf_queue *bufq)
3749705d655SAlexander Lobakin {
3759705d655SAlexander Lobakin 	u32 count, rx_thresh = bufq->thresh;
3769705d655SAlexander Lobakin 
3779705d655SAlexander Lobakin 	count = ALIGN_DOWN(bufq->pending - 1, rx_thresh);
3789705d655SAlexander Lobakin 
3799705d655SAlexander Lobakin 	for (u32 i = 0; i < count; i += rx_thresh) {
3809705d655SAlexander Lobakin 		if (unlikely(!idpf_xskfq_refill_thresh(bufq, rx_thresh)))
3819705d655SAlexander Lobakin 			return false;
3829705d655SAlexander Lobakin 	}
3839705d655SAlexander Lobakin 
3849705d655SAlexander Lobakin 	return true;
3859705d655SAlexander Lobakin }
3869705d655SAlexander Lobakin 
3879705d655SAlexander Lobakin int idpf_xskfq_init(struct idpf_buf_queue *bufq)
3889705d655SAlexander Lobakin {
3899705d655SAlexander Lobakin 	struct libeth_xskfq fq = {
3909705d655SAlexander Lobakin 		.pool	= bufq->pool,
3919705d655SAlexander Lobakin 		.count	= bufq->desc_count,
3929705d655SAlexander Lobakin 		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
3939705d655SAlexander Lobakin 	};
3949705d655SAlexander Lobakin 	int ret;
3959705d655SAlexander Lobakin 
3969705d655SAlexander Lobakin 	ret = libeth_xskfq_create(&fq);
3979705d655SAlexander Lobakin 	if (ret)
3989705d655SAlexander Lobakin 		return ret;
3999705d655SAlexander Lobakin 
4009705d655SAlexander Lobakin 	bufq->xsk_buf = fq.fqes;
4019705d655SAlexander Lobakin 	bufq->pending = fq.pending;
4029705d655SAlexander Lobakin 	bufq->thresh = fq.thresh;
4039705d655SAlexander Lobakin 	bufq->rx_buf_size = fq.buf_len;
4049705d655SAlexander Lobakin 
4059705d655SAlexander Lobakin 	if (!idpf_xskfq_refill(bufq))
4069705d655SAlexander Lobakin 		netdev_err(bufq->pool->netdev,
4079705d655SAlexander Lobakin 			   "failed to allocate XSk buffers for qid %d\n",
4089705d655SAlexander Lobakin 			   bufq->pool->queue_id);
4099705d655SAlexander Lobakin 
4109705d655SAlexander Lobakin 	bufq->next_to_alloc = bufq->next_to_use;
4119705d655SAlexander Lobakin 
4129705d655SAlexander Lobakin 	idpf_queue_clear(HSPLIT_EN, bufq);
4139705d655SAlexander Lobakin 	bufq->rx_hbuf_size = 0;
4149705d655SAlexander Lobakin 
4159705d655SAlexander Lobakin 	return 0;
4169705d655SAlexander Lobakin }
4179705d655SAlexander Lobakin 
4189705d655SAlexander Lobakin void idpf_xskfq_rel(struct idpf_buf_queue *bufq)
4199705d655SAlexander Lobakin {
4209705d655SAlexander Lobakin 	struct libeth_xskfq fq = {
4219705d655SAlexander Lobakin 		.fqes	= bufq->xsk_buf,
4229705d655SAlexander Lobakin 	};
4239705d655SAlexander Lobakin 
4249705d655SAlexander Lobakin 	libeth_xskfq_destroy(&fq);
4259705d655SAlexander Lobakin 
4269705d655SAlexander Lobakin 	bufq->rx_buf_size = fq.buf_len;
4279705d655SAlexander Lobakin 	bufq->thresh = fq.thresh;
4289705d655SAlexander Lobakin 	bufq->pending = fq.pending;
4299705d655SAlexander Lobakin }
4309705d655SAlexander Lobakin 
4319705d655SAlexander Lobakin struct idpf_xskfq_refill_set {
4329705d655SAlexander Lobakin 	struct {
4339705d655SAlexander Lobakin 		struct idpf_buf_queue	*q;
4349705d655SAlexander Lobakin 		u32			buf_id;
4359705d655SAlexander Lobakin 		u32			pending;
4369705d655SAlexander Lobakin 	} bufqs[IDPF_MAX_BUFQS_PER_RXQ_GRP];
4379705d655SAlexander Lobakin };
4389705d655SAlexander Lobakin 
4399705d655SAlexander Lobakin static bool idpf_xskfq_refill_set(const struct idpf_xskfq_refill_set *set)
4409705d655SAlexander Lobakin {
4419705d655SAlexander Lobakin 	bool ret = true;
4429705d655SAlexander Lobakin 
4439705d655SAlexander Lobakin 	for (u32 i = 0; i < ARRAY_SIZE(set->bufqs); i++) {
4449705d655SAlexander Lobakin 		struct idpf_buf_queue *bufq = set->bufqs[i].q;
4459705d655SAlexander Lobakin 		u32 ntc;
4469705d655SAlexander Lobakin 
4479705d655SAlexander Lobakin 		if (!bufq)
4489705d655SAlexander Lobakin 			continue;
4499705d655SAlexander Lobakin 
4509705d655SAlexander Lobakin 		ntc = set->bufqs[i].buf_id;
4519705d655SAlexander Lobakin 		if (unlikely(++ntc == bufq->desc_count))
4529705d655SAlexander Lobakin 			ntc = 0;
4539705d655SAlexander Lobakin 
4549705d655SAlexander Lobakin 		bufq->next_to_clean = ntc;
4559705d655SAlexander Lobakin 		bufq->pending += set->bufqs[i].pending;
4569705d655SAlexander Lobakin 
4579705d655SAlexander Lobakin 		if (bufq->pending > bufq->thresh)
4589705d655SAlexander Lobakin 			ret &= idpf_xskfq_refill(bufq);
4599705d655SAlexander Lobakin 	}
4609705d655SAlexander Lobakin 
4619705d655SAlexander Lobakin 	return ret;
4629705d655SAlexander Lobakin }
4639705d655SAlexander Lobakin 
4649705d655SAlexander Lobakin int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget)
4659705d655SAlexander Lobakin {
4669705d655SAlexander Lobakin 	struct idpf_xskfq_refill_set set = { };
4679705d655SAlexander Lobakin 	struct libeth_rq_napi_stats rs = { };
4689705d655SAlexander Lobakin 	bool wake, gen, fail = false;
4699705d655SAlexander Lobakin 	u32 ntc = rxq->next_to_clean;
4709705d655SAlexander Lobakin 	struct libeth_xdp_buff *xdp;
4719705d655SAlexander Lobakin 	LIBETH_XDP_ONSTACK_BULK(bq);
4729705d655SAlexander Lobakin 	u32 cnt = rxq->desc_count;
4739705d655SAlexander Lobakin 
4749705d655SAlexander Lobakin 	wake = xsk_uses_need_wakeup(rxq->pool);
4759705d655SAlexander Lobakin 	if (wake)
4769705d655SAlexander Lobakin 		xsk_clear_rx_need_wakeup(rxq->pool);
4779705d655SAlexander Lobakin 
4789705d655SAlexander Lobakin 	gen = idpf_queue_has(GEN_CHK, rxq);
4799705d655SAlexander Lobakin 
4809705d655SAlexander Lobakin 	libeth_xsk_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
4819705d655SAlexander Lobakin 				rxq->xdpsqs, rxq->num_xdp_txq);
4829705d655SAlexander Lobakin 	xdp = rxq->xsk;
4839705d655SAlexander Lobakin 
4849705d655SAlexander Lobakin 	while (likely(rs.packets < budget)) {
4859705d655SAlexander Lobakin 		const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
4869705d655SAlexander Lobakin 		struct idpf_xdp_rx_desc desc __uninitialized;
4879705d655SAlexander Lobakin 		struct idpf_buf_queue *bufq;
4889705d655SAlexander Lobakin 		u32 bufq_id, buf_id;
4899705d655SAlexander Lobakin 
4909705d655SAlexander Lobakin 		rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
4919705d655SAlexander Lobakin 
4929705d655SAlexander Lobakin 		idpf_xdp_get_qw0(&desc, rx_desc);
4939705d655SAlexander Lobakin 		if (idpf_xdp_rx_gen(&desc) != gen)
4949705d655SAlexander Lobakin 			break;
4959705d655SAlexander Lobakin 
4969705d655SAlexander Lobakin 		dma_rmb();
4979705d655SAlexander Lobakin 
4989705d655SAlexander Lobakin 		bufq_id = idpf_xdp_rx_bufq(&desc);
4999705d655SAlexander Lobakin 		bufq = set.bufqs[bufq_id].q;
5009705d655SAlexander Lobakin 		if (!bufq) {
5019705d655SAlexander Lobakin 			bufq = &rxq->bufq_sets[bufq_id].bufq;
5029705d655SAlexander Lobakin 			set.bufqs[bufq_id].q = bufq;
5039705d655SAlexander Lobakin 		}
5049705d655SAlexander Lobakin 
5059705d655SAlexander Lobakin 		idpf_xdp_get_qw1(&desc, rx_desc);
5069705d655SAlexander Lobakin 		buf_id = idpf_xdp_rx_buf(&desc);
5079705d655SAlexander Lobakin 
5089705d655SAlexander Lobakin 		set.bufqs[bufq_id].buf_id = buf_id;
5099705d655SAlexander Lobakin 		set.bufqs[bufq_id].pending++;
5109705d655SAlexander Lobakin 
5119705d655SAlexander Lobakin 		xdp = libeth_xsk_process_buff(xdp, bufq->xsk_buf[buf_id],
5129705d655SAlexander Lobakin 					      idpf_xdp_rx_len(&desc));
5139705d655SAlexander Lobakin 
5149705d655SAlexander Lobakin 		if (unlikely(++ntc == cnt)) {
5159705d655SAlexander Lobakin 			ntc = 0;
5169705d655SAlexander Lobakin 			gen = !gen;
5179705d655SAlexander Lobakin 			idpf_queue_change(GEN_CHK, rxq);
5189705d655SAlexander Lobakin 		}
5199705d655SAlexander Lobakin 
5209705d655SAlexander Lobakin 		if (!idpf_xdp_rx_eop(&desc) || unlikely(!xdp))
5219705d655SAlexander Lobakin 			continue;
5229705d655SAlexander Lobakin 
5239705d655SAlexander Lobakin 		fail = !idpf_xsk_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
5249705d655SAlexander Lobakin 		xdp = NULL;
5259705d655SAlexander Lobakin 
5269705d655SAlexander Lobakin 		if (fail)
5279705d655SAlexander Lobakin 			break;
5289705d655SAlexander Lobakin 	}
5299705d655SAlexander Lobakin 
5309705d655SAlexander Lobakin 	idpf_xsk_finalize_rx(&bq);
5319705d655SAlexander Lobakin 
5329705d655SAlexander Lobakin 	rxq->next_to_clean = ntc;
5339705d655SAlexander Lobakin 	rxq->xsk = xdp;
5349705d655SAlexander Lobakin 
5359705d655SAlexander Lobakin 	fail |= !idpf_xskfq_refill_set(&set);
5369705d655SAlexander Lobakin 
5379705d655SAlexander Lobakin 	u64_stats_update_begin(&rxq->stats_sync);
5389705d655SAlexander Lobakin 	u64_stats_add(&rxq->q_stats.packets, rs.packets);
5399705d655SAlexander Lobakin 	u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
5409705d655SAlexander Lobakin 	u64_stats_update_end(&rxq->stats_sync);
5419705d655SAlexander Lobakin 
5429705d655SAlexander Lobakin 	if (!wake)
5439705d655SAlexander Lobakin 		return unlikely(fail) ? budget : rs.packets;
5449705d655SAlexander Lobakin 
5459705d655SAlexander Lobakin 	if (unlikely(fail))
5469705d655SAlexander Lobakin 		xsk_set_rx_need_wakeup(rxq->pool);
5479705d655SAlexander Lobakin 
5489705d655SAlexander Lobakin 	return rs.packets;
5499705d655SAlexander Lobakin }
5509705d655SAlexander Lobakin 
5513d57b2c0SMichal Kubiak int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
5523d57b2c0SMichal Kubiak {
5533d57b2c0SMichal Kubiak 	struct xsk_buff_pool *pool = bpf->xsk.pool;
5543d57b2c0SMichal Kubiak 	u32 qid = bpf->xsk.queue_id;
5553d57b2c0SMichal Kubiak 	bool restart;
5563d57b2c0SMichal Kubiak 	int ret;
5573d57b2c0SMichal Kubiak 
5589705d655SAlexander Lobakin 	if (pool && !IS_ALIGNED(xsk_pool_get_rx_frame_size(pool),
5599705d655SAlexander Lobakin 				LIBETH_RX_BUF_STRIDE)) {
5609705d655SAlexander Lobakin 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
5619705d655SAlexander Lobakin 				       "%s: HW doesn't support frames sizes not aligned to %u (qid %u: %u)",
5629705d655SAlexander Lobakin 				       netdev_name(vport->netdev),
5639705d655SAlexander Lobakin 				       LIBETH_RX_BUF_STRIDE, qid,
5649705d655SAlexander Lobakin 				       xsk_pool_get_rx_frame_size(pool));
5659705d655SAlexander Lobakin 		return -EINVAL;
5669705d655SAlexander Lobakin 	}
5679705d655SAlexander Lobakin 
5683d57b2c0SMichal Kubiak 	restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev);
5693d57b2c0SMichal Kubiak 	if (!restart)
5703d57b2c0SMichal Kubiak 		goto pool;
5713d57b2c0SMichal Kubiak 
5723d57b2c0SMichal Kubiak 	ret = idpf_qp_switch(vport, qid, false);
5733d57b2c0SMichal Kubiak 	if (ret) {
5743d57b2c0SMichal Kubiak 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
5753d57b2c0SMichal Kubiak 				       "%s: failed to disable queue pair %u: %pe",
5763d57b2c0SMichal Kubiak 				       netdev_name(vport->netdev), qid,
5773d57b2c0SMichal Kubiak 				       ERR_PTR(ret));
5783d57b2c0SMichal Kubiak 		return ret;
5793d57b2c0SMichal Kubiak 	}
5803d57b2c0SMichal Kubiak 
5813d57b2c0SMichal Kubiak pool:
5823d57b2c0SMichal Kubiak 	ret = libeth_xsk_setup_pool(vport->netdev, qid, pool);
5833d57b2c0SMichal Kubiak 	if (ret) {
5843d57b2c0SMichal Kubiak 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
5853d57b2c0SMichal Kubiak 				       "%s: failed to configure XSk pool for pair %u: %pe",
5863d57b2c0SMichal Kubiak 				       netdev_name(vport->netdev), qid,
5873d57b2c0SMichal Kubiak 				       ERR_PTR(ret));
5883d57b2c0SMichal Kubiak 		return ret;
5893d57b2c0SMichal Kubiak 	}
5903d57b2c0SMichal Kubiak 
5913d57b2c0SMichal Kubiak 	if (!restart)
5923d57b2c0SMichal Kubiak 		return 0;
5933d57b2c0SMichal Kubiak 
5943d57b2c0SMichal Kubiak 	ret = idpf_qp_switch(vport, qid, true);
5953d57b2c0SMichal Kubiak 	if (ret) {
5963d57b2c0SMichal Kubiak 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
5973d57b2c0SMichal Kubiak 				       "%s: failed to enable queue pair %u: %pe",
5983d57b2c0SMichal Kubiak 				       netdev_name(vport->netdev), qid,
5993d57b2c0SMichal Kubiak 				       ERR_PTR(ret));
6003d57b2c0SMichal Kubiak 		goto err_dis;
6013d57b2c0SMichal Kubiak 	}
6023d57b2c0SMichal Kubiak 
6033d57b2c0SMichal Kubiak 	return 0;
6043d57b2c0SMichal Kubiak 
6053d57b2c0SMichal Kubiak err_dis:
6063d57b2c0SMichal Kubiak 	libeth_xsk_setup_pool(vport->netdev, qid, false);
6073d57b2c0SMichal Kubiak 
6083d57b2c0SMichal Kubiak 	return ret;
6093d57b2c0SMichal Kubiak }
610*96da9d67SAlexander Lobakin 
611*96da9d67SAlexander Lobakin int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
612*96da9d67SAlexander Lobakin {
613*96da9d67SAlexander Lobakin 	const struct idpf_netdev_priv *np = netdev_priv(dev);
614*96da9d67SAlexander Lobakin 	const struct idpf_vport *vport = np->vport;
615*96da9d67SAlexander Lobakin 	struct idpf_q_vector *q_vector;
616*96da9d67SAlexander Lobakin 
617*96da9d67SAlexander Lobakin 	if (unlikely(idpf_vport_ctrl_is_locked(dev)))
618*96da9d67SAlexander Lobakin 		return -EBUSY;
619*96da9d67SAlexander Lobakin 
620*96da9d67SAlexander Lobakin 	if (unlikely(!vport->link_up))
621*96da9d67SAlexander Lobakin 		return -ENETDOWN;
622*96da9d67SAlexander Lobakin 
623*96da9d67SAlexander Lobakin 	if (unlikely(!vport->num_xdp_txq))
624*96da9d67SAlexander Lobakin 		return -ENXIO;
625*96da9d67SAlexander Lobakin 
626*96da9d67SAlexander Lobakin 	q_vector = idpf_find_rxq_vec(vport, qid);
627*96da9d67SAlexander Lobakin 	if (unlikely(!q_vector->xsksq))
628*96da9d67SAlexander Lobakin 		return -ENXIO;
629*96da9d67SAlexander Lobakin 
630*96da9d67SAlexander Lobakin 	libeth_xsk_wakeup(&q_vector->csd, qid);
631*96da9d67SAlexander Lobakin 
632*96da9d67SAlexander Lobakin 	return 0;
633*96da9d67SAlexander Lobakin }
634