xref: /linux/drivers/net/ethernet/intel/idpf/xsk.c (revision abacaf559950eec0d99d37ff6b92049409af5943)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #include <net/libeth/xsk.h>
5 
6 #include "idpf.h"
7 #include "xdp.h"
8 #include "xsk.h"
9 
10 static void idpf_xsk_tx_timer(struct work_struct *work);
11 
idpf_xsk_setup_rxq(const struct idpf_vport * vport,struct idpf_rx_queue * rxq)12 static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
13 			       struct idpf_rx_queue *rxq)
14 {
15 	struct xsk_buff_pool *pool;
16 
17 	pool = xsk_get_pool_from_qid(vport->netdev, rxq->idx);
18 	if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
19 		return;
20 
21 	rxq->pool = pool;
22 
23 	idpf_queue_set(XSK, rxq);
24 }
25 
idpf_xsk_setup_bufq(const struct idpf_vport * vport,struct idpf_buf_queue * bufq)26 static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
27 				struct idpf_buf_queue *bufq)
28 {
29 	const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
30 	struct xsk_buff_pool *pool;
31 	u32 qid = U32_MAX;
32 
33 	for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
34 		const struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
35 
36 		for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
37 			if (&grp->splitq.bufq_sets[j].bufq == bufq) {
38 				qid = grp->splitq.rxq_sets[0]->rxq.idx;
39 				goto setup;
40 			}
41 		}
42 	}
43 
44 setup:
45 	pool = xsk_get_pool_from_qid(vport->netdev, qid);
46 	if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
47 		return;
48 
49 	bufq->pool = pool;
50 
51 	idpf_queue_set(XSK, bufq);
52 }
53 
idpf_xsk_setup_txq(const struct idpf_vport * vport,struct idpf_tx_queue * txq)54 static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
55 			       struct idpf_tx_queue *txq)
56 {
57 	struct xsk_buff_pool *pool;
58 	u32 qid;
59 
60 	idpf_queue_clear(XSK, txq);
61 
62 	if (!idpf_queue_has(XDP, txq))
63 		return;
64 
65 	qid = txq->idx - vport->dflt_qv_rsrc.xdp_txq_offset;
66 
67 	pool = xsk_get_pool_from_qid(vport->netdev, qid);
68 	if (!pool || !pool->dev)
69 		return;
70 
71 	txq->pool = pool;
72 	libeth_xdpsq_init_timer(txq->timer, txq, &txq->xdp_lock,
73 				idpf_xsk_tx_timer);
74 
75 	idpf_queue_assign(NOIRQ, txq, xsk_uses_need_wakeup(pool));
76 	idpf_queue_set(XSK, txq);
77 }
78 
idpf_xsk_setup_complq(const struct idpf_vport * vport,struct idpf_compl_queue * complq)79 static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
80 				  struct idpf_compl_queue *complq)
81 {
82 	const struct xsk_buff_pool *pool;
83 	u32 qid;
84 
85 	idpf_queue_clear(XSK, complq);
86 
87 	if (!idpf_queue_has(XDP, complq))
88 		return;
89 
90 	qid = complq->txq_grp->txqs[0]->idx -
91 		vport->dflt_qv_rsrc.xdp_txq_offset;
92 
93 	pool = xsk_get_pool_from_qid(vport->netdev, qid);
94 	if (!pool || !pool->dev)
95 		return;
96 
97 	idpf_queue_set(XSK, complq);
98 }
99 
idpf_xsk_setup_queue(const struct idpf_vport * vport,void * q,enum virtchnl2_queue_type type)100 void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
101 			  enum virtchnl2_queue_type type)
102 {
103 	if (!idpf_xdp_enabled(vport))
104 		return;
105 
106 	switch (type) {
107 	case VIRTCHNL2_QUEUE_TYPE_RX:
108 		idpf_xsk_setup_rxq(vport, q);
109 		break;
110 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
111 		idpf_xsk_setup_bufq(vport, q);
112 		break;
113 	case VIRTCHNL2_QUEUE_TYPE_TX:
114 		idpf_xsk_setup_txq(vport, q);
115 		break;
116 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
117 		idpf_xsk_setup_complq(vport, q);
118 		break;
119 	default:
120 		break;
121 	}
122 }
123 
idpf_xsk_clear_queue(void * q,enum virtchnl2_queue_type type)124 void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
125 {
126 	struct idpf_compl_queue *complq;
127 	struct idpf_buf_queue *bufq;
128 	struct idpf_rx_queue *rxq;
129 	struct idpf_tx_queue *txq;
130 
131 	switch (type) {
132 	case VIRTCHNL2_QUEUE_TYPE_RX:
133 		rxq = q;
134 		if (!idpf_queue_has_clear(XSK, rxq))
135 			return;
136 
137 		rxq->pool = NULL;
138 		break;
139 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
140 		bufq = q;
141 		if (!idpf_queue_has_clear(XSK, bufq))
142 			return;
143 
144 		bufq->pool = NULL;
145 		break;
146 	case VIRTCHNL2_QUEUE_TYPE_TX:
147 		txq = q;
148 		if (!idpf_queue_has_clear(XSK, txq))
149 			return;
150 
151 		idpf_queue_set(NOIRQ, txq);
152 		txq->dev = txq->netdev->dev.parent;
153 		break;
154 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
155 		complq = q;
156 		idpf_queue_clear(XSK, complq);
157 		break;
158 	default:
159 		break;
160 	}
161 }
162 
idpf_xsk_init_wakeup(struct idpf_q_vector * qv)163 void idpf_xsk_init_wakeup(struct idpf_q_vector *qv)
164 {
165 	libeth_xsk_init_wakeup(&qv->csd, &qv->napi);
166 }
167 
idpf_xsksq_clean(struct idpf_tx_queue * xdpsq)168 void idpf_xsksq_clean(struct idpf_tx_queue *xdpsq)
169 {
170 	struct libeth_xdpsq_napi_stats ss = { };
171 	u32 ntc = xdpsq->next_to_clean;
172 	struct xdp_frame_bulk bq;
173 	struct libeth_cq_pp cp = {
174 		.dev	= xdpsq->pool->dev,
175 		.bq	= &bq,
176 		.xss	= &ss,
177 	};
178 	u32 xsk_frames = 0;
179 
180 	xdp_frame_bulk_init(&bq);
181 
182 	while (ntc != xdpsq->next_to_use) {
183 		struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
184 
185 		if (sqe->type)
186 			libeth_xdp_complete_tx(sqe, &cp);
187 		else
188 			xsk_frames++;
189 
190 		if (unlikely(++ntc == xdpsq->desc_count))
191 			ntc = 0;
192 	}
193 
194 	xdp_flush_frame_bulk(&bq);
195 
196 	if (xsk_frames)
197 		xsk_tx_completed(xdpsq->pool, xsk_frames);
198 }
199 
idpf_xsksq_complete_slow(struct idpf_tx_queue * xdpsq,u32 done)200 static noinline u32 idpf_xsksq_complete_slow(struct idpf_tx_queue *xdpsq,
201 					     u32 done)
202 {
203 	struct libeth_xdpsq_napi_stats ss = { };
204 	u32 ntc = xdpsq->next_to_clean;
205 	u32 cnt = xdpsq->desc_count;
206 	struct xdp_frame_bulk bq;
207 	struct libeth_cq_pp cp = {
208 		.dev	= xdpsq->pool->dev,
209 		.bq	= &bq,
210 		.xss	= &ss,
211 		.napi	= true,
212 	};
213 	u32 xsk_frames = 0;
214 
215 	xdp_frame_bulk_init(&bq);
216 
217 	for (u32 i = 0; likely(i < done); i++) {
218 		struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
219 
220 		if (sqe->type)
221 			libeth_xdp_complete_tx(sqe, &cp);
222 		else
223 			xsk_frames++;
224 
225 		if (unlikely(++ntc == cnt))
226 			ntc = 0;
227 	}
228 
229 	xdp_flush_frame_bulk(&bq);
230 
231 	xdpsq->next_to_clean = ntc;
232 	xdpsq->xdp_tx -= cp.xdp_tx;
233 
234 	return xsk_frames;
235 }
236 
idpf_xsksq_complete(void * _xdpsq,u32 budget)237 static __always_inline u32 idpf_xsksq_complete(void *_xdpsq, u32 budget)
238 {
239 	struct idpf_tx_queue *xdpsq = _xdpsq;
240 	u32 tx_ntc = xdpsq->next_to_clean;
241 	u32 tx_cnt = xdpsq->desc_count;
242 	u32 done_frames;
243 	u32 xsk_frames;
244 
245 	done_frames = idpf_xdpsq_poll(xdpsq, budget);
246 	if (unlikely(!done_frames))
247 		return 0;
248 
249 	if (likely(!xdpsq->xdp_tx)) {
250 		tx_ntc += done_frames;
251 		if (tx_ntc >= tx_cnt)
252 			tx_ntc -= tx_cnt;
253 
254 		xdpsq->next_to_clean = tx_ntc;
255 		xsk_frames = done_frames;
256 
257 		goto finalize;
258 	}
259 
260 	xsk_frames = idpf_xsksq_complete_slow(xdpsq, done_frames);
261 	if (xsk_frames)
262 finalize:
263 		xsk_tx_completed(xdpsq->pool, xsk_frames);
264 
265 	xdpsq->pending -= done_frames;
266 
267 	return done_frames;
268 }
269 
idpf_xsk_tx_prep(void * _xdpsq,struct libeth_xdpsq * sq)270 static u32 idpf_xsk_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
271 {
272 	struct idpf_tx_queue *xdpsq = _xdpsq;
273 	u32 free;
274 
275 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
276 
277 	free = xdpsq->desc_count - xdpsq->pending;
278 	if (free < xdpsq->thresh)
279 		free += idpf_xsksq_complete(xdpsq, xdpsq->thresh);
280 
281 	*sq = (struct libeth_xdpsq){
282 		.pool		= xdpsq->pool,
283 		.sqes		= xdpsq->tx_buf,
284 		.descs		= xdpsq->desc_ring,
285 		.count		= xdpsq->desc_count,
286 		.lock		= &xdpsq->xdp_lock,
287 		.ntu		= &xdpsq->next_to_use,
288 		.pending	= &xdpsq->pending,
289 		.xdp_tx		= &xdpsq->xdp_tx,
290 	};
291 
292 	return free;
293 }
294 
idpf_xsk_xmit_prep(void * _xdpsq,struct libeth_xdpsq * sq)295 static u32 idpf_xsk_xmit_prep(void *_xdpsq, struct libeth_xdpsq *sq)
296 {
297 	struct idpf_tx_queue *xdpsq = _xdpsq;
298 
299 	*sq = (struct libeth_xdpsq){
300 		.pool		= xdpsq->pool,
301 		.sqes		= xdpsq->tx_buf,
302 		.descs		= xdpsq->desc_ring,
303 		.count		= xdpsq->desc_count,
304 		.lock		= &xdpsq->xdp_lock,
305 		.ntu		= &xdpsq->next_to_use,
306 		.pending	= &xdpsq->pending,
307 	};
308 
309 	/*
310 	 * The queue is cleaned, the budget is already known, optimize out
311 	 * the second min() by passing the type limit.
312 	 */
313 	return U32_MAX;
314 }
315 
idpf_xsk_xmit(struct idpf_tx_queue * xsksq)316 bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq)
317 {
318 	u32 free;
319 
320 	libeth_xdpsq_lock(&xsksq->xdp_lock);
321 
322 	free = xsksq->desc_count - xsksq->pending;
323 	if (free < xsksq->thresh)
324 		free += idpf_xsksq_complete(xsksq, xsksq->thresh);
325 
326 	return libeth_xsk_xmit_do_bulk(xsksq->pool, xsksq,
327 				       min(free - 1, xsksq->thresh),
328 				       libeth_xsktmo, idpf_xsk_xmit_prep,
329 				       idpf_xdp_tx_xmit, idpf_xdp_tx_finalize);
330 }
331 
332 LIBETH_XDP_DEFINE_START();
333 LIBETH_XDP_DEFINE_TIMER(static idpf_xsk_tx_timer, idpf_xsksq_complete);
334 LIBETH_XSK_DEFINE_FLUSH_TX(static idpf_xsk_tx_flush_bulk, idpf_xsk_tx_prep,
335 			   idpf_xdp_tx_xmit);
336 LIBETH_XSK_DEFINE_RUN(static idpf_xsk_run_pass, idpf_xsk_run_prog,
337 		      idpf_xsk_tx_flush_bulk, idpf_rx_process_skb_fields);
338 LIBETH_XSK_DEFINE_FINALIZE(static idpf_xsk_finalize_rx, idpf_xsk_tx_flush_bulk,
339 			   idpf_xdp_tx_finalize);
340 LIBETH_XDP_DEFINE_END();
341 
idpf_xskfqe_init(const struct libeth_xskfq_fp * fq,u32 i)342 static void idpf_xskfqe_init(const struct libeth_xskfq_fp *fq, u32 i)
343 {
344 	struct virtchnl2_splitq_rx_buf_desc *desc = fq->descs;
345 
346 	desc = &desc[i];
347 #ifdef __LIBETH_WORD_ACCESS
348 	*(u64 *)&desc->qword0 = i;
349 #else
350 	desc->qword0.buf_id = cpu_to_le16(i);
351 #endif
352 	desc->pkt_addr = cpu_to_le64(libeth_xsk_buff_xdp_get_dma(fq->fqes[i]));
353 }
354 
idpf_xskfq_refill_thresh(struct idpf_buf_queue * bufq,u32 count)355 static bool idpf_xskfq_refill_thresh(struct idpf_buf_queue *bufq, u32 count)
356 {
357 	struct libeth_xskfq_fp fq = {
358 		.pool	= bufq->pool,
359 		.fqes	= bufq->xsk_buf,
360 		.descs	= bufq->split_buf,
361 		.ntu	= bufq->next_to_use,
362 		.count	= bufq->desc_count,
363 	};
364 	u32 done;
365 
366 	done = libeth_xskfqe_alloc(&fq, count, idpf_xskfqe_init);
367 	writel(fq.ntu, bufq->tail);
368 
369 	bufq->next_to_use = fq.ntu;
370 	bufq->pending -= done;
371 
372 	return done == count;
373 }
374 
idpf_xskfq_refill(struct idpf_buf_queue * bufq)375 static bool idpf_xskfq_refill(struct idpf_buf_queue *bufq)
376 {
377 	u32 count, rx_thresh = bufq->thresh;
378 
379 	count = ALIGN_DOWN(bufq->pending - 1, rx_thresh);
380 
381 	for (u32 i = 0; i < count; i += rx_thresh) {
382 		if (unlikely(!idpf_xskfq_refill_thresh(bufq, rx_thresh)))
383 			return false;
384 	}
385 
386 	return true;
387 }
388 
idpf_xskfq_init(struct idpf_buf_queue * bufq)389 int idpf_xskfq_init(struct idpf_buf_queue *bufq)
390 {
391 	struct libeth_xskfq fq = {
392 		.pool	= bufq->pool,
393 		.count	= bufq->desc_count,
394 		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
395 	};
396 	int ret;
397 
398 	ret = libeth_xskfq_create(&fq);
399 	if (ret)
400 		return ret;
401 
402 	bufq->xsk_buf = fq.fqes;
403 	bufq->pending = fq.pending;
404 	bufq->thresh = fq.thresh;
405 	bufq->rx_buf_size = fq.buf_len;
406 	bufq->truesize = fq.truesize;
407 
408 	if (!idpf_xskfq_refill(bufq))
409 		netdev_err(bufq->pool->netdev,
410 			   "failed to allocate XSk buffers for qid %d\n",
411 			   bufq->pool->queue_id);
412 
413 	bufq->next_to_alloc = bufq->next_to_use;
414 
415 	idpf_queue_clear(HSPLIT_EN, bufq);
416 	bufq->rx_hbuf_size = 0;
417 
418 	return 0;
419 }
420 
idpf_xskfq_rel(struct idpf_buf_queue * bufq)421 void idpf_xskfq_rel(struct idpf_buf_queue *bufq)
422 {
423 	struct libeth_xskfq fq = {
424 		.fqes	= bufq->xsk_buf,
425 	};
426 
427 	libeth_xskfq_destroy(&fq);
428 
429 	bufq->rx_buf_size = fq.buf_len;
430 	bufq->thresh = fq.thresh;
431 	bufq->pending = fq.pending;
432 }
433 
434 struct idpf_xskfq_refill_set {
435 	struct {
436 		struct idpf_buf_queue	*q;
437 		u32			buf_id;
438 		u32			pending;
439 	} bufqs[IDPF_MAX_BUFQS_PER_RXQ_GRP];
440 };
441 
idpf_xskfq_refill_set(const struct idpf_xskfq_refill_set * set)442 static bool idpf_xskfq_refill_set(const struct idpf_xskfq_refill_set *set)
443 {
444 	bool ret = true;
445 
446 	for (u32 i = 0; i < ARRAY_SIZE(set->bufqs); i++) {
447 		struct idpf_buf_queue *bufq = set->bufqs[i].q;
448 		u32 ntc;
449 
450 		if (!bufq)
451 			continue;
452 
453 		ntc = set->bufqs[i].buf_id;
454 		if (unlikely(++ntc == bufq->desc_count))
455 			ntc = 0;
456 
457 		bufq->next_to_clean = ntc;
458 		bufq->pending += set->bufqs[i].pending;
459 
460 		if (bufq->pending > bufq->thresh)
461 			ret &= idpf_xskfq_refill(bufq);
462 	}
463 
464 	return ret;
465 }
466 
idpf_xskrq_poll(struct idpf_rx_queue * rxq,u32 budget)467 int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget)
468 {
469 	struct idpf_xskfq_refill_set set = { };
470 	struct libeth_rq_napi_stats rs = { };
471 	bool wake, gen, fail = false;
472 	u32 ntc = rxq->next_to_clean;
473 	struct libeth_xdp_buff *xdp;
474 	LIBETH_XDP_ONSTACK_BULK(bq);
475 	u32 cnt = rxq->desc_count;
476 
477 	wake = xsk_uses_need_wakeup(rxq->pool);
478 	if (wake)
479 		xsk_clear_rx_need_wakeup(rxq->pool);
480 
481 	gen = idpf_queue_has(GEN_CHK, rxq);
482 
483 	libeth_xsk_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
484 				rxq->xdpsqs, rxq->num_xdp_txq);
485 	xdp = rxq->xsk;
486 
487 	while (likely(rs.packets < budget)) {
488 		const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
489 		struct idpf_xdp_rx_desc desc __uninitialized;
490 		struct idpf_buf_queue *bufq;
491 		u32 bufq_id, buf_id;
492 
493 		rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
494 
495 		idpf_xdp_get_qw0(&desc, rx_desc);
496 		if (idpf_xdp_rx_gen(&desc) != gen)
497 			break;
498 
499 		dma_rmb();
500 
501 		bufq_id = idpf_xdp_rx_bufq(&desc);
502 		bufq = set.bufqs[bufq_id].q;
503 		if (!bufq) {
504 			bufq = &rxq->bufq_sets[bufq_id].bufq;
505 			set.bufqs[bufq_id].q = bufq;
506 		}
507 
508 		idpf_xdp_get_qw1(&desc, rx_desc);
509 		buf_id = idpf_xdp_rx_buf(&desc);
510 
511 		set.bufqs[bufq_id].buf_id = buf_id;
512 		set.bufqs[bufq_id].pending++;
513 
514 		xdp = libeth_xsk_process_buff(xdp, bufq->xsk_buf[buf_id],
515 					      idpf_xdp_rx_len(&desc));
516 
517 		if (unlikely(++ntc == cnt)) {
518 			ntc = 0;
519 			gen = !gen;
520 			idpf_queue_change(GEN_CHK, rxq);
521 		}
522 
523 		if (!idpf_xdp_rx_eop(&desc) || unlikely(!xdp))
524 			continue;
525 
526 		fail = !idpf_xsk_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
527 		xdp = NULL;
528 
529 		if (fail)
530 			break;
531 	}
532 
533 	idpf_xsk_finalize_rx(&bq);
534 
535 	rxq->next_to_clean = ntc;
536 	rxq->xsk = xdp;
537 
538 	fail |= !idpf_xskfq_refill_set(&set);
539 
540 	u64_stats_update_begin(&rxq->stats_sync);
541 	u64_stats_add(&rxq->q_stats.packets, rs.packets);
542 	u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
543 	u64_stats_update_end(&rxq->stats_sync);
544 
545 	if (!wake)
546 		return unlikely(fail) ? budget : rs.packets;
547 
548 	if (unlikely(fail))
549 		xsk_set_rx_need_wakeup(rxq->pool);
550 
551 	return rs.packets;
552 }
553 
idpf_xsk_pool_setup(struct idpf_vport * vport,struct netdev_bpf * bpf)554 int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
555 {
556 	struct xsk_buff_pool *pool = bpf->xsk.pool;
557 	u32 qid = bpf->xsk.queue_id;
558 	bool restart;
559 	int ret;
560 
561 	if (pool && !IS_ALIGNED(xsk_pool_get_rx_frame_size(pool),
562 				LIBETH_RX_BUF_STRIDE)) {
563 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
564 				       "%s: HW doesn't support frames sizes not aligned to %u (qid %u: %u)",
565 				       netdev_name(vport->netdev),
566 				       LIBETH_RX_BUF_STRIDE, qid,
567 				       xsk_pool_get_rx_frame_size(pool));
568 		return -EINVAL;
569 	}
570 
571 	restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev);
572 	if (!restart)
573 		goto pool;
574 
575 	ret = idpf_qp_switch(vport, qid, false);
576 	if (ret) {
577 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
578 				       "%s: failed to disable queue pair %u: %pe",
579 				       netdev_name(vport->netdev), qid,
580 				       ERR_PTR(ret));
581 		return ret;
582 	}
583 
584 pool:
585 	ret = libeth_xsk_setup_pool(vport->netdev, qid, pool);
586 	if (ret) {
587 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
588 				       "%s: failed to configure XSk pool for pair %u: %pe",
589 				       netdev_name(vport->netdev), qid,
590 				       ERR_PTR(ret));
591 		return ret;
592 	}
593 
594 	if (!restart)
595 		return 0;
596 
597 	ret = idpf_qp_switch(vport, qid, true);
598 	if (ret) {
599 		NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
600 				       "%s: failed to enable queue pair %u: %pe",
601 				       netdev_name(vport->netdev), qid,
602 				       ERR_PTR(ret));
603 		goto err_dis;
604 	}
605 
606 	return 0;
607 
608 err_dis:
609 	libeth_xsk_setup_pool(vport->netdev, qid, false);
610 
611 	return ret;
612 }
613 
idpf_xsk_wakeup(struct net_device * dev,u32 qid,u32 flags)614 int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
615 {
616 	const struct idpf_netdev_priv *np = netdev_priv(dev);
617 	const struct idpf_vport *vport = np->vport;
618 	struct idpf_q_vector *q_vector;
619 
620 	if (unlikely(idpf_vport_ctrl_is_locked(dev)))
621 		return -EBUSY;
622 
623 	if (unlikely(!vport->link_up))
624 		return -ENETDOWN;
625 
626 	if (unlikely(!vport->num_xdp_txq))
627 		return -ENXIO;
628 
629 	q_vector = idpf_find_rxq_vec(vport, qid);
630 	if (unlikely(!q_vector->xsksq))
631 		return -ENXIO;
632 
633 	libeth_xsk_wakeup(&q_vector->csd, qid);
634 
635 	return 0;
636 }
637