xref: /linux/drivers/net/ethernet/intel/idpf/xsk.c (revision 8ff6d62261a3d9a522e4bc90e27a2f6b745a22c4)
13d57b2c0SMichal Kubiak // SPDX-License-Identifier: GPL-2.0-only
23d57b2c0SMichal Kubiak /* Copyright (C) 2025 Intel Corporation */
33d57b2c0SMichal Kubiak 
43d57b2c0SMichal Kubiak #include <net/libeth/xsk.h>
53d57b2c0SMichal Kubiak 
63d57b2c0SMichal Kubiak #include "idpf.h"
7*8ff6d622SAlexander Lobakin #include "xdp.h"
83d57b2c0SMichal Kubiak #include "xsk.h"
93d57b2c0SMichal Kubiak 
10*8ff6d622SAlexander Lobakin static void idpf_xsk_tx_timer(struct work_struct *work);
11*8ff6d622SAlexander Lobakin 
12*8ff6d622SAlexander Lobakin static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
13*8ff6d622SAlexander Lobakin 			       struct idpf_tx_queue *txq)
14*8ff6d622SAlexander Lobakin {
15*8ff6d622SAlexander Lobakin 	struct xsk_buff_pool *pool;
16*8ff6d622SAlexander Lobakin 	u32 qid;
17*8ff6d622SAlexander Lobakin 
18*8ff6d622SAlexander Lobakin 	idpf_queue_clear(XSK, txq);
19*8ff6d622SAlexander Lobakin 
20*8ff6d622SAlexander Lobakin 	if (!idpf_queue_has(XDP, txq))
21*8ff6d622SAlexander Lobakin 		return;
22*8ff6d622SAlexander Lobakin 
23*8ff6d622SAlexander Lobakin 	qid = txq->idx - vport->xdp_txq_offset;
24*8ff6d622SAlexander Lobakin 
25*8ff6d622SAlexander Lobakin 	pool = xsk_get_pool_from_qid(vport->netdev, qid);
26*8ff6d622SAlexander Lobakin 	if (!pool || !pool->dev)
27*8ff6d622SAlexander Lobakin 		return;
28*8ff6d622SAlexander Lobakin 
29*8ff6d622SAlexander Lobakin 	txq->pool = pool;
30*8ff6d622SAlexander Lobakin 	libeth_xdpsq_init_timer(txq->timer, txq, &txq->xdp_lock,
31*8ff6d622SAlexander Lobakin 				idpf_xsk_tx_timer);
32*8ff6d622SAlexander Lobakin 
33*8ff6d622SAlexander Lobakin 	idpf_queue_assign(NOIRQ, txq, xsk_uses_need_wakeup(pool));
34*8ff6d622SAlexander Lobakin 	idpf_queue_set(XSK, txq);
35*8ff6d622SAlexander Lobakin }
36*8ff6d622SAlexander Lobakin 
37*8ff6d622SAlexander Lobakin static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
38*8ff6d622SAlexander Lobakin 				  struct idpf_compl_queue *complq)
39*8ff6d622SAlexander Lobakin {
40*8ff6d622SAlexander Lobakin 	const struct xsk_buff_pool *pool;
41*8ff6d622SAlexander Lobakin 	u32 qid;
42*8ff6d622SAlexander Lobakin 
43*8ff6d622SAlexander Lobakin 	idpf_queue_clear(XSK, complq);
44*8ff6d622SAlexander Lobakin 
45*8ff6d622SAlexander Lobakin 	if (!idpf_queue_has(XDP, complq))
46*8ff6d622SAlexander Lobakin 		return;
47*8ff6d622SAlexander Lobakin 
48*8ff6d622SAlexander Lobakin 	qid = complq->txq_grp->txqs[0]->idx - vport->xdp_txq_offset;
49*8ff6d622SAlexander Lobakin 
50*8ff6d622SAlexander Lobakin 	pool = xsk_get_pool_from_qid(vport->netdev, qid);
51*8ff6d622SAlexander Lobakin 	if (!pool || !pool->dev)
52*8ff6d622SAlexander Lobakin 		return;
53*8ff6d622SAlexander Lobakin 
54*8ff6d622SAlexander Lobakin 	idpf_queue_set(XSK, complq);
55*8ff6d622SAlexander Lobakin }
56*8ff6d622SAlexander Lobakin 
57*8ff6d622SAlexander Lobakin void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
58*8ff6d622SAlexander Lobakin 			  enum virtchnl2_queue_type type)
59*8ff6d622SAlexander Lobakin {
60*8ff6d622SAlexander Lobakin 	if (!idpf_xdp_enabled(vport))
61*8ff6d622SAlexander Lobakin 		return;
62*8ff6d622SAlexander Lobakin 
63*8ff6d622SAlexander Lobakin 	switch (type) {
64*8ff6d622SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_TX:
65*8ff6d622SAlexander Lobakin 		idpf_xsk_setup_txq(vport, q);
66*8ff6d622SAlexander Lobakin 		break;
67*8ff6d622SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
68*8ff6d622SAlexander Lobakin 		idpf_xsk_setup_complq(vport, q);
69*8ff6d622SAlexander Lobakin 		break;
70*8ff6d622SAlexander Lobakin 	default:
71*8ff6d622SAlexander Lobakin 		break;
72*8ff6d622SAlexander Lobakin 	}
73*8ff6d622SAlexander Lobakin }
74*8ff6d622SAlexander Lobakin 
75*8ff6d622SAlexander Lobakin void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
76*8ff6d622SAlexander Lobakin {
77*8ff6d622SAlexander Lobakin 	struct idpf_compl_queue *complq;
78*8ff6d622SAlexander Lobakin 	struct idpf_tx_queue *txq;
79*8ff6d622SAlexander Lobakin 
80*8ff6d622SAlexander Lobakin 	switch (type) {
81*8ff6d622SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_TX:
82*8ff6d622SAlexander Lobakin 		txq = q;
83*8ff6d622SAlexander Lobakin 		if (!idpf_queue_has_clear(XSK, txq))
84*8ff6d622SAlexander Lobakin 			return;
85*8ff6d622SAlexander Lobakin 
86*8ff6d622SAlexander Lobakin 		idpf_queue_set(NOIRQ, txq);
87*8ff6d622SAlexander Lobakin 		txq->dev = txq->netdev->dev.parent;
88*8ff6d622SAlexander Lobakin 		break;
89*8ff6d622SAlexander Lobakin 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
90*8ff6d622SAlexander Lobakin 		complq = q;
91*8ff6d622SAlexander Lobakin 		idpf_queue_clear(XSK, complq);
92*8ff6d622SAlexander Lobakin 		break;
93*8ff6d622SAlexander Lobakin 	default:
94*8ff6d622SAlexander Lobakin 		break;
95*8ff6d622SAlexander Lobakin 	}
96*8ff6d622SAlexander Lobakin }
97*8ff6d622SAlexander Lobakin 
98*8ff6d622SAlexander Lobakin void idpf_xsksq_clean(struct idpf_tx_queue *xdpsq)
99*8ff6d622SAlexander Lobakin {
100*8ff6d622SAlexander Lobakin 	struct libeth_xdpsq_napi_stats ss = { };
101*8ff6d622SAlexander Lobakin 	u32 ntc = xdpsq->next_to_clean;
102*8ff6d622SAlexander Lobakin 	struct xdp_frame_bulk bq;
103*8ff6d622SAlexander Lobakin 	struct libeth_cq_pp cp = {
104*8ff6d622SAlexander Lobakin 		.dev	= xdpsq->pool->dev,
105*8ff6d622SAlexander Lobakin 		.bq	= &bq,
106*8ff6d622SAlexander Lobakin 		.xss	= &ss,
107*8ff6d622SAlexander Lobakin 	};
108*8ff6d622SAlexander Lobakin 	u32 xsk_frames = 0;
109*8ff6d622SAlexander Lobakin 
110*8ff6d622SAlexander Lobakin 	xdp_frame_bulk_init(&bq);
111*8ff6d622SAlexander Lobakin 
112*8ff6d622SAlexander Lobakin 	while (ntc != xdpsq->next_to_use) {
113*8ff6d622SAlexander Lobakin 		struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
114*8ff6d622SAlexander Lobakin 
115*8ff6d622SAlexander Lobakin 		if (sqe->type)
116*8ff6d622SAlexander Lobakin 			libeth_xdp_complete_tx(sqe, &cp);
117*8ff6d622SAlexander Lobakin 		else
118*8ff6d622SAlexander Lobakin 			xsk_frames++;
119*8ff6d622SAlexander Lobakin 
120*8ff6d622SAlexander Lobakin 		if (unlikely(++ntc == xdpsq->desc_count))
121*8ff6d622SAlexander Lobakin 			ntc = 0;
122*8ff6d622SAlexander Lobakin 	}
123*8ff6d622SAlexander Lobakin 
124*8ff6d622SAlexander Lobakin 	xdp_flush_frame_bulk(&bq);
125*8ff6d622SAlexander Lobakin 
126*8ff6d622SAlexander Lobakin 	if (xsk_frames)
127*8ff6d622SAlexander Lobakin 		xsk_tx_completed(xdpsq->pool, xsk_frames);
128*8ff6d622SAlexander Lobakin }
129*8ff6d622SAlexander Lobakin 
130*8ff6d622SAlexander Lobakin static noinline u32 idpf_xsksq_complete_slow(struct idpf_tx_queue *xdpsq,
131*8ff6d622SAlexander Lobakin 					     u32 done)
132*8ff6d622SAlexander Lobakin {
133*8ff6d622SAlexander Lobakin 	struct libeth_xdpsq_napi_stats ss = { };
134*8ff6d622SAlexander Lobakin 	u32 ntc = xdpsq->next_to_clean;
135*8ff6d622SAlexander Lobakin 	u32 cnt = xdpsq->desc_count;
136*8ff6d622SAlexander Lobakin 	struct xdp_frame_bulk bq;
137*8ff6d622SAlexander Lobakin 	struct libeth_cq_pp cp = {
138*8ff6d622SAlexander Lobakin 		.dev	= xdpsq->pool->dev,
139*8ff6d622SAlexander Lobakin 		.bq	= &bq,
140*8ff6d622SAlexander Lobakin 		.xss	= &ss,
141*8ff6d622SAlexander Lobakin 		.napi	= true,
142*8ff6d622SAlexander Lobakin 	};
143*8ff6d622SAlexander Lobakin 	u32 xsk_frames = 0;
144*8ff6d622SAlexander Lobakin 
145*8ff6d622SAlexander Lobakin 	xdp_frame_bulk_init(&bq);
146*8ff6d622SAlexander Lobakin 
147*8ff6d622SAlexander Lobakin 	for (u32 i = 0; likely(i < done); i++) {
148*8ff6d622SAlexander Lobakin 		struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
149*8ff6d622SAlexander Lobakin 
150*8ff6d622SAlexander Lobakin 		if (sqe->type)
151*8ff6d622SAlexander Lobakin 			libeth_xdp_complete_tx(sqe, &cp);
152*8ff6d622SAlexander Lobakin 		else
153*8ff6d622SAlexander Lobakin 			xsk_frames++;
154*8ff6d622SAlexander Lobakin 
155*8ff6d622SAlexander Lobakin 		if (unlikely(++ntc == cnt))
156*8ff6d622SAlexander Lobakin 			ntc = 0;
157*8ff6d622SAlexander Lobakin 	}
158*8ff6d622SAlexander Lobakin 
159*8ff6d622SAlexander Lobakin 	xdp_flush_frame_bulk(&bq);
160*8ff6d622SAlexander Lobakin 
161*8ff6d622SAlexander Lobakin 	xdpsq->next_to_clean = ntc;
162*8ff6d622SAlexander Lobakin 	xdpsq->xdp_tx -= cp.xdp_tx;
163*8ff6d622SAlexander Lobakin 
164*8ff6d622SAlexander Lobakin 	return xsk_frames;
165*8ff6d622SAlexander Lobakin }
166*8ff6d622SAlexander Lobakin 
167*8ff6d622SAlexander Lobakin static __always_inline u32 idpf_xsksq_complete(void *_xdpsq, u32 budget)
168*8ff6d622SAlexander Lobakin {
169*8ff6d622SAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
170*8ff6d622SAlexander Lobakin 	u32 tx_ntc = xdpsq->next_to_clean;
171*8ff6d622SAlexander Lobakin 	u32 tx_cnt = xdpsq->desc_count;
172*8ff6d622SAlexander Lobakin 	u32 done_frames;
173*8ff6d622SAlexander Lobakin 	u32 xsk_frames;
174*8ff6d622SAlexander Lobakin 
175*8ff6d622SAlexander Lobakin 	done_frames = idpf_xdpsq_poll(xdpsq, budget);
176*8ff6d622SAlexander Lobakin 	if (unlikely(!done_frames))
177*8ff6d622SAlexander Lobakin 		return 0;
178*8ff6d622SAlexander Lobakin 
179*8ff6d622SAlexander Lobakin 	if (likely(!xdpsq->xdp_tx)) {
180*8ff6d622SAlexander Lobakin 		tx_ntc += done_frames;
181*8ff6d622SAlexander Lobakin 		if (tx_ntc >= tx_cnt)
182*8ff6d622SAlexander Lobakin 			tx_ntc -= tx_cnt;
183*8ff6d622SAlexander Lobakin 
184*8ff6d622SAlexander Lobakin 		xdpsq->next_to_clean = tx_ntc;
185*8ff6d622SAlexander Lobakin 		xsk_frames = done_frames;
186*8ff6d622SAlexander Lobakin 
187*8ff6d622SAlexander Lobakin 		goto finalize;
188*8ff6d622SAlexander Lobakin 	}
189*8ff6d622SAlexander Lobakin 
190*8ff6d622SAlexander Lobakin 	xsk_frames = idpf_xsksq_complete_slow(xdpsq, done_frames);
191*8ff6d622SAlexander Lobakin 	if (xsk_frames)
192*8ff6d622SAlexander Lobakin finalize:
193*8ff6d622SAlexander Lobakin 		xsk_tx_completed(xdpsq->pool, xsk_frames);
194*8ff6d622SAlexander Lobakin 
195*8ff6d622SAlexander Lobakin 	xdpsq->pending -= done_frames;
196*8ff6d622SAlexander Lobakin 
197*8ff6d622SAlexander Lobakin 	return done_frames;
198*8ff6d622SAlexander Lobakin }
199*8ff6d622SAlexander Lobakin 
200*8ff6d622SAlexander Lobakin static u32 idpf_xsk_xmit_prep(void *_xdpsq, struct libeth_xdpsq *sq)
201*8ff6d622SAlexander Lobakin {
202*8ff6d622SAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
203*8ff6d622SAlexander Lobakin 
204*8ff6d622SAlexander Lobakin 	*sq = (struct libeth_xdpsq){
205*8ff6d622SAlexander Lobakin 		.pool		= xdpsq->pool,
206*8ff6d622SAlexander Lobakin 		.sqes		= xdpsq->tx_buf,
207*8ff6d622SAlexander Lobakin 		.descs		= xdpsq->desc_ring,
208*8ff6d622SAlexander Lobakin 		.count		= xdpsq->desc_count,
209*8ff6d622SAlexander Lobakin 		.lock		= &xdpsq->xdp_lock,
210*8ff6d622SAlexander Lobakin 		.ntu		= &xdpsq->next_to_use,
211*8ff6d622SAlexander Lobakin 		.pending	= &xdpsq->pending,
212*8ff6d622SAlexander Lobakin 	};
213*8ff6d622SAlexander Lobakin 
214*8ff6d622SAlexander Lobakin 	/*
215*8ff6d622SAlexander Lobakin 	 * The queue is cleaned, the budget is already known, optimize out
216*8ff6d622SAlexander Lobakin 	 * the second min() by passing the type limit.
217*8ff6d622SAlexander Lobakin 	 */
218*8ff6d622SAlexander Lobakin 	return U32_MAX;
219*8ff6d622SAlexander Lobakin }
220*8ff6d622SAlexander Lobakin 
221*8ff6d622SAlexander Lobakin bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq)
222*8ff6d622SAlexander Lobakin {
223*8ff6d622SAlexander Lobakin 	u32 free;
224*8ff6d622SAlexander Lobakin 
225*8ff6d622SAlexander Lobakin 	libeth_xdpsq_lock(&xsksq->xdp_lock);
226*8ff6d622SAlexander Lobakin 
227*8ff6d622SAlexander Lobakin 	free = xsksq->desc_count - xsksq->pending;
228*8ff6d622SAlexander Lobakin 	if (free < xsksq->thresh)
229*8ff6d622SAlexander Lobakin 		free += idpf_xsksq_complete(xsksq, xsksq->thresh);
230*8ff6d622SAlexander Lobakin 
231*8ff6d622SAlexander Lobakin 	return libeth_xsk_xmit_do_bulk(xsksq->pool, xsksq,
232*8ff6d622SAlexander Lobakin 				       min(free - 1, xsksq->thresh),
233*8ff6d622SAlexander Lobakin 				       libeth_xsktmo, idpf_xsk_xmit_prep,
234*8ff6d622SAlexander Lobakin 				       idpf_xdp_tx_xmit, idpf_xdp_tx_finalize);
235*8ff6d622SAlexander Lobakin }
236*8ff6d622SAlexander Lobakin 
237*8ff6d622SAlexander Lobakin LIBETH_XDP_DEFINE_START();
238*8ff6d622SAlexander Lobakin LIBETH_XDP_DEFINE_TIMER(static idpf_xsk_tx_timer, idpf_xsksq_complete);
239*8ff6d622SAlexander Lobakin LIBETH_XDP_DEFINE_END();
240*8ff6d622SAlexander Lobakin 
2413d57b2c0SMichal Kubiak int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
2423d57b2c0SMichal Kubiak {
2433d57b2c0SMichal Kubiak 	struct xsk_buff_pool *pool = bpf->xsk.pool;
2443d57b2c0SMichal Kubiak 	u32 qid = bpf->xsk.queue_id;
2453d57b2c0SMichal Kubiak 	bool restart;
2463d57b2c0SMichal Kubiak 	int ret;
2473d57b2c0SMichal Kubiak 
2483d57b2c0SMichal Kubiak 	restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev);
2493d57b2c0SMichal Kubiak 	if (!restart)
2503d57b2c0SMichal Kubiak 		goto pool;
2513d57b2c0SMichal Kubiak 
2523d57b2c0SMichal Kubiak 	ret = idpf_qp_switch(vport, qid, false);
2533d57b2c0SMichal Kubiak 	if (ret) {
2543d57b2c0SMichal Kubiak 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
2553d57b2c0SMichal Kubiak 				       "%s: failed to disable queue pair %u: %pe",
2563d57b2c0SMichal Kubiak 				       netdev_name(vport->netdev), qid,
2573d57b2c0SMichal Kubiak 				       ERR_PTR(ret));
2583d57b2c0SMichal Kubiak 		return ret;
2593d57b2c0SMichal Kubiak 	}
2603d57b2c0SMichal Kubiak 
2613d57b2c0SMichal Kubiak pool:
2623d57b2c0SMichal Kubiak 	ret = libeth_xsk_setup_pool(vport->netdev, qid, pool);
2633d57b2c0SMichal Kubiak 	if (ret) {
2643d57b2c0SMichal Kubiak 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
2653d57b2c0SMichal Kubiak 				       "%s: failed to configure XSk pool for pair %u: %pe",
2663d57b2c0SMichal Kubiak 				       netdev_name(vport->netdev), qid,
2673d57b2c0SMichal Kubiak 				       ERR_PTR(ret));
2683d57b2c0SMichal Kubiak 		return ret;
2693d57b2c0SMichal Kubiak 	}
2703d57b2c0SMichal Kubiak 
2713d57b2c0SMichal Kubiak 	if (!restart)
2723d57b2c0SMichal Kubiak 		return 0;
2733d57b2c0SMichal Kubiak 
2743d57b2c0SMichal Kubiak 	ret = idpf_qp_switch(vport, qid, true);
2753d57b2c0SMichal Kubiak 	if (ret) {
2763d57b2c0SMichal Kubiak 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
2773d57b2c0SMichal Kubiak 				       "%s: failed to enable queue pair %u: %pe",
2783d57b2c0SMichal Kubiak 				       netdev_name(vport->netdev), qid,
2793d57b2c0SMichal Kubiak 				       ERR_PTR(ret));
2803d57b2c0SMichal Kubiak 		goto err_dis;
2813d57b2c0SMichal Kubiak 	}
2823d57b2c0SMichal Kubiak 
2833d57b2c0SMichal Kubiak 	return 0;
2843d57b2c0SMichal Kubiak 
2853d57b2c0SMichal Kubiak err_dis:
2863d57b2c0SMichal Kubiak 	libeth_xsk_setup_pool(vport->netdev, qid, false);
2873d57b2c0SMichal Kubiak 
2883d57b2c0SMichal Kubiak 	return ret;
2893d57b2c0SMichal Kubiak }
290