xref: /linux/drivers/net/ethernet/intel/ice/ice_xsk.c (revision 1f8d99de1d1b4b3764203ae02db57041475dab84)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 #include "ice.h"
8 #include "ice_base.h"
9 #include "ice_type.h"
10 #include "ice_xsk.h"
11 #include "ice_txrx.h"
12 #include "ice_txrx_lib.h"
13 #include "ice_lib.h"
14 
15 static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
16 {
17 	return &rx_ring->xdp_buf[idx];
18 }
19 
20 /**
21  * ice_qp_reset_stats - Resets all stats for rings of given index
22  * @vsi: VSI that contains rings of interest
23  * @q_idx: ring index in array
24  */
25 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
26 {
27 	memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
28 	       sizeof(vsi->rx_rings[q_idx]->rx_stats));
29 	memset(&vsi->tx_rings[q_idx]->stats, 0,
30 	       sizeof(vsi->tx_rings[q_idx]->stats));
31 	if (ice_is_xdp_ena_vsi(vsi))
32 		memset(&vsi->xdp_rings[q_idx]->stats, 0,
33 		       sizeof(vsi->xdp_rings[q_idx]->stats));
34 }
35 
36 /**
37  * ice_qp_clean_rings - Cleans all the rings of a given index
38  * @vsi: VSI that contains rings of interest
39  * @q_idx: ring index in array
40  */
41 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
42 {
43 	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
44 	if (ice_is_xdp_ena_vsi(vsi))
45 		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
46 	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
47 }
48 
49 /**
50  * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
51  * @vsi: VSI that has netdev
52  * @q_vector: q_vector that has NAPI context
53  * @enable: true for enable, false for disable
54  */
55 static void
56 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
57 		     bool enable)
58 {
59 	if (!vsi->netdev || !q_vector)
60 		return;
61 
62 	if (enable)
63 		napi_enable(&q_vector->napi);
64 	else
65 		napi_disable(&q_vector->napi);
66 }
67 
68 /**
69  * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
70  * @vsi: the VSI that contains queue vector being un-configured
71  * @rx_ring: Rx ring that will have its IRQ disabled
72  * @q_vector: queue vector
73  */
74 static void
75 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
76 		 struct ice_q_vector *q_vector)
77 {
78 	struct ice_pf *pf = vsi->back;
79 	struct ice_hw *hw = &pf->hw;
80 	int base = vsi->base_vector;
81 	u16 reg;
82 	u32 val;
83 
84 	/* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
85 	 * here only QINT_RQCTL
86 	 */
87 	reg = rx_ring->reg_idx;
88 	val = rd32(hw, QINT_RQCTL(reg));
89 	val &= ~QINT_RQCTL_CAUSE_ENA_M;
90 	wr32(hw, QINT_RQCTL(reg), val);
91 
92 	if (q_vector) {
93 		u16 v_idx = q_vector->v_idx;
94 
95 		wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
96 		ice_flush(hw);
97 		synchronize_irq(pf->msix_entries[v_idx + base].vector);
98 	}
99 }
100 
101 /**
102  * ice_qvec_cfg_msix - Enable IRQ for given queue vector
103  * @vsi: the VSI that contains queue vector
104  * @q_vector: queue vector
105  */
106 static void
107 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
108 {
109 	u16 reg_idx = q_vector->reg_idx;
110 	struct ice_pf *pf = vsi->back;
111 	struct ice_hw *hw = &pf->hw;
112 	struct ice_tx_ring *tx_ring;
113 	struct ice_rx_ring *rx_ring;
114 
115 	ice_cfg_itr(hw, q_vector);
116 
117 	ice_for_each_tx_ring(tx_ring, q_vector->tx)
118 		ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
119 				      q_vector->tx.itr_idx);
120 
121 	ice_for_each_rx_ring(rx_ring, q_vector->rx)
122 		ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
123 				      q_vector->rx.itr_idx);
124 
125 	ice_flush(hw);
126 }
127 
128 /**
129  * ice_qvec_ena_irq - Enable IRQ for given queue vector
130  * @vsi: the VSI that contains queue vector
131  * @q_vector: queue vector
132  */
133 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
134 {
135 	struct ice_pf *pf = vsi->back;
136 	struct ice_hw *hw = &pf->hw;
137 
138 	ice_irq_dynamic_ena(hw, vsi, q_vector);
139 
140 	ice_flush(hw);
141 }
142 
143 /**
144  * ice_qp_dis - Disables a queue pair
145  * @vsi: VSI of interest
146  * @q_idx: ring index in array
147  *
148  * Returns 0 on success, negative on failure.
149  */
150 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
151 {
152 	struct ice_txq_meta txq_meta = { };
153 	struct ice_q_vector *q_vector;
154 	struct ice_tx_ring *tx_ring;
155 	struct ice_rx_ring *rx_ring;
156 	int timeout = 50;
157 	int err;
158 
159 	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
160 		return -EINVAL;
161 
162 	tx_ring = vsi->tx_rings[q_idx];
163 	rx_ring = vsi->rx_rings[q_idx];
164 	q_vector = rx_ring->q_vector;
165 
166 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
167 		timeout--;
168 		if (!timeout)
169 			return -EBUSY;
170 		usleep_range(1000, 2000);
171 	}
172 	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
173 
174 	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
175 
176 	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
177 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
178 	if (err)
179 		return err;
180 	if (ice_is_xdp_ena_vsi(vsi)) {
181 		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
182 
183 		memset(&txq_meta, 0, sizeof(txq_meta));
184 		ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
185 		err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
186 					   &txq_meta);
187 		if (err)
188 			return err;
189 	}
190 	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
191 	if (err)
192 		return err;
193 
194 	ice_qvec_toggle_napi(vsi, q_vector, false);
195 	ice_qp_clean_rings(vsi, q_idx);
196 	ice_qp_reset_stats(vsi, q_idx);
197 
198 	return 0;
199 }
200 
201 /**
202  * ice_qp_ena - Enables a queue pair
203  * @vsi: VSI of interest
204  * @q_idx: ring index in array
205  *
206  * Returns 0 on success, negative on failure.
207  */
208 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
209 {
210 	struct ice_aqc_add_tx_qgrp *qg_buf;
211 	struct ice_q_vector *q_vector;
212 	struct ice_tx_ring *tx_ring;
213 	struct ice_rx_ring *rx_ring;
214 	u16 size;
215 	int err;
216 
217 	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
218 		return -EINVAL;
219 
220 	size = struct_size(qg_buf, txqs, 1);
221 	qg_buf = kzalloc(size, GFP_KERNEL);
222 	if (!qg_buf)
223 		return -ENOMEM;
224 
225 	qg_buf->num_txqs = 1;
226 
227 	tx_ring = vsi->tx_rings[q_idx];
228 	rx_ring = vsi->rx_rings[q_idx];
229 	q_vector = rx_ring->q_vector;
230 
231 	err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
232 	if (err)
233 		goto free_buf;
234 
235 	if (ice_is_xdp_ena_vsi(vsi)) {
236 		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
237 
238 		memset(qg_buf, 0, size);
239 		qg_buf->num_txqs = 1;
240 		err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
241 		if (err)
242 			goto free_buf;
243 		ice_set_ring_xdp(xdp_ring);
244 		xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
245 	}
246 
247 	err = ice_vsi_cfg_rxq(rx_ring);
248 	if (err)
249 		goto free_buf;
250 
251 	ice_qvec_cfg_msix(vsi, q_vector);
252 
253 	err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
254 	if (err)
255 		goto free_buf;
256 
257 	clear_bit(ICE_CFG_BUSY, vsi->state);
258 	ice_qvec_toggle_napi(vsi, q_vector, true);
259 	ice_qvec_ena_irq(vsi, q_vector);
260 
261 	netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
262 free_buf:
263 	kfree(qg_buf);
264 	return err;
265 }
266 
267 /**
268  * ice_xsk_pool_disable - disable a buffer pool region
269  * @vsi: Current VSI
270  * @qid: queue ID
271  *
272  * Returns 0 on success, negative on failure
273  */
274 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
275 {
276 	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
277 
278 	if (!pool)
279 		return -EINVAL;
280 
281 	clear_bit(qid, vsi->af_xdp_zc_qps);
282 	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
283 
284 	return 0;
285 }
286 
287 /**
288  * ice_xsk_pool_enable - enable a buffer pool region
289  * @vsi: Current VSI
290  * @pool: pointer to a requested buffer pool region
291  * @qid: queue ID
292  *
293  * Returns 0 on success, negative on failure
294  */
295 static int
296 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
297 {
298 	int err;
299 
300 	if (vsi->type != ICE_VSI_PF)
301 		return -EINVAL;
302 
303 	if (qid >= vsi->netdev->real_num_rx_queues ||
304 	    qid >= vsi->netdev->real_num_tx_queues)
305 		return -EINVAL;
306 
307 	err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
308 			       ICE_RX_DMA_ATTR);
309 	if (err)
310 		return err;
311 
312 	set_bit(qid, vsi->af_xdp_zc_qps);
313 
314 	return 0;
315 }
316 
317 /**
318  * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
319  * @vsi: Current VSI
320  * @pool: buffer pool to enable/associate to a ring, NULL to disable
321  * @qid: queue ID
322  *
323  * Returns 0 on success, negative on failure
324  */
325 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
326 {
327 	bool if_running, pool_present = !!pool;
328 	int ret = 0, pool_failure = 0;
329 
330 	if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
331 
332 	if (if_running) {
333 		ret = ice_qp_dis(vsi, qid);
334 		if (ret) {
335 			netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
336 			goto xsk_pool_if_up;
337 		}
338 	}
339 
340 	pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
341 				      ice_xsk_pool_disable(vsi, qid);
342 
343 xsk_pool_if_up:
344 	if (if_running) {
345 		ret = ice_qp_ena(vsi, qid);
346 		if (!ret && pool_present)
347 			napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
348 		else if (ret)
349 			netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
350 	}
351 
352 	if (pool_failure) {
353 		netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
354 			   pool_present ? "en" : "dis", pool_failure);
355 		return pool_failure;
356 	}
357 
358 	return ret;
359 }
360 
361 /**
362  * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
363  * @rx_ring: Rx ring
364  * @count: The number of buffers to allocate
365  *
366  * This function allocates a number of Rx buffers from the fill ring
367  * or the internal recycle mechanism and places them on the Rx ring.
368  *
369  * Returns true if all allocations were successful, false if any fail.
370  */
371 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
372 {
373 	union ice_32b_rx_flex_desc *rx_desc;
374 	u16 ntu = rx_ring->next_to_use;
375 	struct xdp_buff **xdp;
376 	u32 nb_buffs, i;
377 	dma_addr_t dma;
378 
379 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
380 	xdp = ice_xdp_buf(rx_ring, ntu);
381 
382 	nb_buffs = min_t(u16, count, rx_ring->count - ntu);
383 	nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
384 	if (!nb_buffs)
385 		return false;
386 
387 	i = nb_buffs;
388 	while (i--) {
389 		dma = xsk_buff_xdp_get_dma(*xdp);
390 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
391 		rx_desc->wb.status_error0 = 0;
392 
393 		rx_desc++;
394 		xdp++;
395 	}
396 
397 	ntu += nb_buffs;
398 	if (ntu == rx_ring->count)
399 		ntu = 0;
400 
401 	ice_release_rx_desc(rx_ring, ntu);
402 
403 	return count == nb_buffs;
404 }
405 
406 /**
407  * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
408  * @rx_ring: Rx ring
409  */
410 static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
411 {
412 	int ntc = rx_ring->next_to_clean + 1;
413 
414 	ntc = (ntc < rx_ring->count) ? ntc : 0;
415 	rx_ring->next_to_clean = ntc;
416 	prefetch(ICE_RX_DESC(rx_ring, ntc));
417 }
418 
419 /**
420  * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
421  * @rx_ring: Rx ring
422  * @xdp: Pointer to XDP buffer
423  *
424  * This function allocates a new skb from a zero-copy Rx buffer.
425  *
426  * Returns the skb on success, NULL on failure.
427  */
428 static struct sk_buff *
429 ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
430 {
431 	unsigned int totalsize = xdp->data_end - xdp->data_meta;
432 	unsigned int metasize = xdp->data - xdp->data_meta;
433 	struct sk_buff *skb;
434 
435 	net_prefetch(xdp->data_meta);
436 
437 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
438 			       GFP_ATOMIC | __GFP_NOWARN);
439 	if (unlikely(!skb))
440 		return NULL;
441 
442 	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
443 	       ALIGN(totalsize, sizeof(long)));
444 
445 	if (metasize) {
446 		skb_metadata_set(skb, metasize);
447 		__skb_pull(skb, metasize);
448 	}
449 
450 	xsk_buff_free(xdp);
451 	return skb;
452 }
453 
454 /**
455  * ice_run_xdp_zc - Executes an XDP program in zero-copy path
456  * @rx_ring: Rx ring
457  * @xdp: xdp_buff used as input to the XDP program
458  * @xdp_prog: XDP program to run
459  * @xdp_ring: ring to be used for XDP_TX action
460  *
461  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
462  */
463 static int
464 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
465 	       struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
466 {
467 	int err, result = ICE_XDP_PASS;
468 	u32 act;
469 
470 	act = bpf_prog_run_xdp(xdp_prog, xdp);
471 
472 	if (likely(act == XDP_REDIRECT)) {
473 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
474 		if (err)
475 			goto out_failure;
476 		return ICE_XDP_REDIR;
477 	}
478 
479 	switch (act) {
480 	case XDP_PASS:
481 		break;
482 	case XDP_TX:
483 		result = ice_xmit_xdp_buff(xdp, xdp_ring);
484 		if (result == ICE_XDP_CONSUMED)
485 			goto out_failure;
486 		break;
487 	default:
488 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
489 		fallthrough;
490 	case XDP_ABORTED:
491 out_failure:
492 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
493 		fallthrough;
494 	case XDP_DROP:
495 		result = ICE_XDP_CONSUMED;
496 		break;
497 	}
498 
499 	return result;
500 }
501 
502 /**
503  * ice_clean_rx_irq_zc - consumes packets from the hardware ring
504  * @rx_ring: AF_XDP Rx ring
505  * @budget: NAPI budget
506  *
507  * Returns number of processed packets on success, remaining budget on failure.
508  */
509 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
510 {
511 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
512 	struct ice_tx_ring *xdp_ring;
513 	unsigned int xdp_xmit = 0;
514 	struct bpf_prog *xdp_prog;
515 	bool failure = false;
516 
517 	/* ZC patch is enabled only when XDP program is set,
518 	 * so here it can not be NULL
519 	 */
520 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
521 	xdp_ring = rx_ring->xdp_ring;
522 
523 	while (likely(total_rx_packets < (unsigned int)budget)) {
524 		union ice_32b_rx_flex_desc *rx_desc;
525 		unsigned int size, xdp_res = 0;
526 		struct xdp_buff *xdp;
527 		struct sk_buff *skb;
528 		u16 stat_err_bits;
529 		u16 vlan_tag = 0;
530 		u16 rx_ptype;
531 
532 		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
533 
534 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
535 		if (!ice_test_staterr(rx_desc, stat_err_bits))
536 			break;
537 
538 		/* This memory barrier is needed to keep us from reading
539 		 * any other fields out of the rx_desc until we have
540 		 * verified the descriptor has been written back.
541 		 */
542 		dma_rmb();
543 
544 		xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
545 
546 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
547 				   ICE_RX_FLX_DESC_PKT_LEN_M;
548 		if (!size) {
549 			xdp->data = NULL;
550 			xdp->data_end = NULL;
551 			xdp->data_hard_start = NULL;
552 			xdp->data_meta = NULL;
553 			goto construct_skb;
554 		}
555 
556 		xsk_buff_set_size(xdp, size);
557 		xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
558 
559 		xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
560 		if (xdp_res) {
561 			if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
562 				xdp_xmit |= xdp_res;
563 			else
564 				xsk_buff_free(xdp);
565 
566 			total_rx_bytes += size;
567 			total_rx_packets++;
568 
569 			ice_bump_ntc(rx_ring);
570 			continue;
571 		}
572 construct_skb:
573 		/* XDP_PASS path */
574 		skb = ice_construct_skb_zc(rx_ring, xdp);
575 		if (!skb) {
576 			rx_ring->rx_stats.alloc_buf_failed++;
577 			break;
578 		}
579 
580 		ice_bump_ntc(rx_ring);
581 
582 		if (eth_skb_pad(skb)) {
583 			skb = NULL;
584 			continue;
585 		}
586 
587 		total_rx_bytes += skb->len;
588 		total_rx_packets++;
589 
590 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
591 		if (ice_test_staterr(rx_desc, stat_err_bits))
592 			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
593 
594 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
595 				       ICE_RX_FLEX_DESC_PTYPE_M;
596 
597 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
598 		ice_receive_skb(rx_ring, skb, vlan_tag);
599 	}
600 
601 	failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
602 
603 	ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
604 	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
605 
606 	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
607 		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
608 			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
609 		else
610 			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
611 
612 		return (int)total_rx_packets;
613 	}
614 
615 	return failure ? budget : (int)total_rx_packets;
616 }
617 
618 /**
619  * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
620  * @xdp_ring: XDP Tx ring
621  * @budget: max number of frames to xmit
622  *
623  * Returns true if cleanup/transmission is done.
624  */
625 static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
626 {
627 	struct ice_tx_desc *tx_desc = NULL;
628 	bool work_done = true;
629 	struct xdp_desc desc;
630 	dma_addr_t dma;
631 
632 	while (likely(budget-- > 0)) {
633 		struct ice_tx_buf *tx_buf;
634 
635 		if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
636 			xdp_ring->tx_stats.tx_busy++;
637 			work_done = false;
638 			break;
639 		}
640 
641 		tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
642 
643 		if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
644 			break;
645 
646 		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
647 		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
648 						 desc.len);
649 
650 		tx_buf->bytecount = desc.len;
651 
652 		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
653 		tx_desc->buf_addr = cpu_to_le64(dma);
654 		tx_desc->cmd_type_offset_bsz =
655 			ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
656 
657 		xdp_ring->next_to_use++;
658 		if (xdp_ring->next_to_use == xdp_ring->count)
659 			xdp_ring->next_to_use = 0;
660 	}
661 
662 	if (tx_desc) {
663 		ice_xdp_ring_update_tail(xdp_ring);
664 		xsk_tx_release(xdp_ring->xsk_pool);
665 	}
666 
667 	return budget > 0 && work_done;
668 }
669 
670 /**
671  * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
672  * @xdp_ring: XDP Tx ring
673  * @tx_buf: Tx buffer to clean
674  */
675 static void
676 ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
677 {
678 	xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
679 	dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
680 			 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
681 	dma_unmap_len_set(tx_buf, len, 0);
682 }
683 
684 /**
685  * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
686  * @xdp_ring: XDP Tx ring
687  * @budget: NAPI budget
688  *
689  * Returns true if cleanup/tranmission is done.
690  */
691 bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
692 {
693 	int total_packets = 0, total_bytes = 0;
694 	s16 ntc = xdp_ring->next_to_clean;
695 	struct ice_tx_desc *tx_desc;
696 	struct ice_tx_buf *tx_buf;
697 	u32 xsk_frames = 0;
698 	bool xmit_done;
699 
700 	tx_desc = ICE_TX_DESC(xdp_ring, ntc);
701 	tx_buf = &xdp_ring->tx_buf[ntc];
702 	ntc -= xdp_ring->count;
703 
704 	do {
705 		if (!(tx_desc->cmd_type_offset_bsz &
706 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
707 			break;
708 
709 		total_bytes += tx_buf->bytecount;
710 		total_packets++;
711 
712 		if (tx_buf->raw_buf) {
713 			ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
714 			tx_buf->raw_buf = NULL;
715 		} else {
716 			xsk_frames++;
717 		}
718 
719 		tx_desc->cmd_type_offset_bsz = 0;
720 		tx_buf++;
721 		tx_desc++;
722 		ntc++;
723 
724 		if (unlikely(!ntc)) {
725 			ntc -= xdp_ring->count;
726 			tx_buf = xdp_ring->tx_buf;
727 			tx_desc = ICE_TX_DESC(xdp_ring, 0);
728 		}
729 
730 		prefetch(tx_desc);
731 
732 	} while (likely(--budget));
733 
734 	ntc += xdp_ring->count;
735 	xdp_ring->next_to_clean = ntc;
736 
737 	if (xsk_frames)
738 		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
739 
740 	if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
741 		xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
742 
743 	ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
744 	xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
745 
746 	return budget > 0 && xmit_done;
747 }
748 
749 /**
750  * ice_xsk_wakeup - Implements ndo_xsk_wakeup
751  * @netdev: net_device
752  * @queue_id: queue to wake up
753  * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
754  *
755  * Returns negative on error, zero otherwise.
756  */
757 int
758 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
759 	       u32 __always_unused flags)
760 {
761 	struct ice_netdev_priv *np = netdev_priv(netdev);
762 	struct ice_q_vector *q_vector;
763 	struct ice_vsi *vsi = np->vsi;
764 	struct ice_tx_ring *ring;
765 
766 	if (test_bit(ICE_DOWN, vsi->state))
767 		return -ENETDOWN;
768 
769 	if (!ice_is_xdp_ena_vsi(vsi))
770 		return -ENXIO;
771 
772 	if (queue_id >= vsi->num_txq)
773 		return -ENXIO;
774 
775 	if (!vsi->xdp_rings[queue_id]->xsk_pool)
776 		return -ENXIO;
777 
778 	ring = vsi->xdp_rings[queue_id];
779 
780 	/* The idea here is that if NAPI is running, mark a miss, so
781 	 * it will run again. If not, trigger an interrupt and
782 	 * schedule the NAPI from interrupt context. If NAPI would be
783 	 * scheduled here, the interrupt affinity would not be
784 	 * honored.
785 	 */
786 	q_vector = ring->q_vector;
787 	if (!napi_if_scheduled_mark_missed(&q_vector->napi))
788 		ice_trigger_sw_intr(&vsi->back->hw, q_vector);
789 
790 	return 0;
791 }
792 
793 /**
794  * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
795  * @vsi: VSI to be checked
796  *
797  * Returns true if any of the Rx rings has an AF_XDP buff pool attached
798  */
799 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
800 {
801 	int i;
802 
803 	ice_for_each_rxq(vsi, i) {
804 		if (xsk_get_pool_from_qid(vsi->netdev, i))
805 			return true;
806 	}
807 
808 	return false;
809 }
810 
811 /**
812  * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
813  * @rx_ring: ring to be cleaned
814  */
815 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
816 {
817 	u16 count_mask = rx_ring->count - 1;
818 	u16 ntc = rx_ring->next_to_clean;
819 	u16 ntu = rx_ring->next_to_use;
820 
821 	for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
822 		struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
823 
824 		xsk_buff_free(xdp);
825 	}
826 }
827 
828 /**
829  * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
830  * @xdp_ring: XDP_Tx ring
831  */
832 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
833 {
834 	u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
835 	u32 xsk_frames = 0;
836 
837 	while (ntc != ntu) {
838 		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
839 
840 		if (tx_buf->raw_buf)
841 			ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
842 		else
843 			xsk_frames++;
844 
845 		tx_buf->raw_buf = NULL;
846 
847 		ntc++;
848 		if (ntc >= xdp_ring->count)
849 			ntc = 0;
850 	}
851 
852 	if (xsk_frames)
853 		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
854 }
855