xref: /linux/drivers/net/ethernet/intel/i40e/i40e_xsk.c (revision d1bc532e99becf104635ed4da6fefa306f452321)
10a714186SBjörn Töpel // SPDX-License-Identifier: GPL-2.0
20a714186SBjörn Töpel /* Copyright(c) 2018 Intel Corporation. */
30a714186SBjörn Töpel 
40a714186SBjörn Töpel #include <linux/bpf_trace.h>
53106c580SMagnus Karlsson #include <linux/stringify.h>
6a71506a4SMagnus Karlsson #include <net/xdp_sock_drv.h>
70a714186SBjörn Töpel #include <net/xdp.h>
80a714186SBjörn Töpel 
90a714186SBjörn Töpel #include "i40e.h"
100a714186SBjörn Töpel #include "i40e_txrx_common.h"
110a714186SBjörn Töpel #include "i40e_xsk.h"
120a714186SBjörn Töpel 
13be1222b5SBjörn Töpel int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
14e1675f97SBjörn Töpel {
15be1222b5SBjörn Töpel 	unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
16be1222b5SBjörn Töpel 
17be1222b5SBjörn Töpel 	rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
18be1222b5SBjörn Töpel 	return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
19be1222b5SBjörn Töpel }
20be1222b5SBjörn Töpel 
21be1222b5SBjörn Töpel void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
22be1222b5SBjörn Töpel {
23be1222b5SBjörn Töpel 	memset(rx_ring->rx_bi_zc, 0,
24be1222b5SBjörn Töpel 	       sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
25be1222b5SBjörn Töpel }
26be1222b5SBjörn Töpel 
273b4f0b66SBjörn Töpel static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
28be1222b5SBjörn Töpel {
29be1222b5SBjörn Töpel 	return &rx_ring->rx_bi_zc[idx];
30e1675f97SBjörn Töpel }
31e1675f97SBjörn Töpel 
320a714186SBjörn Töpel /**
331742b3d5SMagnus Karlsson  * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
341742b3d5SMagnus Karlsson  * certain ring/qid
350a714186SBjörn Töpel  * @vsi: Current VSI
361742b3d5SMagnus Karlsson  * @pool: buffer pool
371742b3d5SMagnus Karlsson  * @qid: Rx ring to associate buffer pool with
380a714186SBjörn Töpel  *
390a714186SBjörn Töpel  * Returns 0 on success, <0 on failure
400a714186SBjörn Töpel  **/
411742b3d5SMagnus Karlsson static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
421742b3d5SMagnus Karlsson 				struct xsk_buff_pool *pool,
430a714186SBjörn Töpel 				u16 qid)
440a714186SBjörn Töpel {
45f3fef2b6SJan Sokolowski 	struct net_device *netdev = vsi->netdev;
460a714186SBjörn Töpel 	bool if_running;
470a714186SBjörn Töpel 	int err;
480a714186SBjörn Töpel 
490a714186SBjörn Töpel 	if (vsi->type != I40E_VSI_MAIN)
500a714186SBjörn Töpel 		return -EINVAL;
510a714186SBjörn Töpel 
520a714186SBjörn Töpel 	if (qid >= vsi->num_queue_pairs)
530a714186SBjörn Töpel 		return -EINVAL;
540a714186SBjörn Töpel 
55f3fef2b6SJan Sokolowski 	if (qid >= netdev->real_num_rx_queues ||
56f3fef2b6SJan Sokolowski 	    qid >= netdev->real_num_tx_queues)
570a714186SBjörn Töpel 		return -EINVAL;
580a714186SBjörn Töpel 
59c4655761SMagnus Karlsson 	err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
600a714186SBjörn Töpel 	if (err)
610a714186SBjörn Töpel 		return err;
620a714186SBjörn Töpel 
6344ddd4f1SBjörn Töpel 	set_bit(qid, vsi->af_xdp_zc_qps);
6444ddd4f1SBjörn Töpel 
650a714186SBjörn Töpel 	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
660a714186SBjörn Töpel 
670a714186SBjörn Töpel 	if (if_running) {
680a714186SBjörn Töpel 		err = i40e_queue_pair_disable(vsi, qid);
690a714186SBjörn Töpel 		if (err)
700a714186SBjörn Töpel 			return err;
710a714186SBjörn Töpel 
720a714186SBjörn Töpel 		err = i40e_queue_pair_enable(vsi, qid);
730a714186SBjörn Töpel 		if (err)
740a714186SBjörn Töpel 			return err;
7514ffeb52SMagnus Karlsson 
7614ffeb52SMagnus Karlsson 		/* Kick start the NAPI context so that receiving will start */
779116e5e2SMagnus Karlsson 		err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
7814ffeb52SMagnus Karlsson 		if (err)
7914ffeb52SMagnus Karlsson 			return err;
800a714186SBjörn Töpel 	}
810a714186SBjörn Töpel 
820a714186SBjörn Töpel 	return 0;
830a714186SBjörn Töpel }
840a714186SBjörn Töpel 
850a714186SBjörn Töpel /**
861742b3d5SMagnus Karlsson  * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
871742b3d5SMagnus Karlsson  * certain ring/qid
880a714186SBjörn Töpel  * @vsi: Current VSI
891742b3d5SMagnus Karlsson  * @qid: Rx ring to associate buffer pool with
900a714186SBjörn Töpel  *
910a714186SBjörn Töpel  * Returns 0 on success, <0 on failure
920a714186SBjörn Töpel  **/
931742b3d5SMagnus Karlsson static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
940a714186SBjörn Töpel {
95f3fef2b6SJan Sokolowski 	struct net_device *netdev = vsi->netdev;
961742b3d5SMagnus Karlsson 	struct xsk_buff_pool *pool;
970a714186SBjörn Töpel 	bool if_running;
980a714186SBjörn Töpel 	int err;
990a714186SBjörn Töpel 
100c4655761SMagnus Karlsson 	pool = xsk_get_pool_from_qid(netdev, qid);
1011742b3d5SMagnus Karlsson 	if (!pool)
1020a714186SBjörn Töpel 		return -EINVAL;
1030a714186SBjörn Töpel 
1040a714186SBjörn Töpel 	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
1050a714186SBjörn Töpel 
1060a714186SBjörn Töpel 	if (if_running) {
1070a714186SBjörn Töpel 		err = i40e_queue_pair_disable(vsi, qid);
1080a714186SBjörn Töpel 		if (err)
1090a714186SBjörn Töpel 			return err;
1100a714186SBjörn Töpel 	}
1110a714186SBjörn Töpel 
11244ddd4f1SBjörn Töpel 	clear_bit(qid, vsi->af_xdp_zc_qps);
113c4655761SMagnus Karlsson 	xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
1140a714186SBjörn Töpel 
1150a714186SBjörn Töpel 	if (if_running) {
1160a714186SBjörn Töpel 		err = i40e_queue_pair_enable(vsi, qid);
1170a714186SBjörn Töpel 		if (err)
1180a714186SBjörn Töpel 			return err;
1190a714186SBjörn Töpel 	}
1200a714186SBjörn Töpel 
1210a714186SBjörn Töpel 	return 0;
1220a714186SBjörn Töpel }
1230a714186SBjörn Töpel 
1240a714186SBjörn Töpel /**
1251742b3d5SMagnus Karlsson  * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
1261742b3d5SMagnus Karlsson  * a ring/qid
1270a714186SBjörn Töpel  * @vsi: Current VSI
1281742b3d5SMagnus Karlsson  * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
1291742b3d5SMagnus Karlsson  * @qid: Rx ring to (dis)associate buffer pool (from)to
1300a714186SBjörn Töpel  *
1311742b3d5SMagnus Karlsson  * This function enables or disables a buffer pool to a certain ring.
1320a714186SBjörn Töpel  *
1330a714186SBjörn Töpel  * Returns 0 on success, <0 on failure
1340a714186SBjörn Töpel  **/
1351742b3d5SMagnus Karlsson int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
1360a714186SBjörn Töpel 			u16 qid)
1370a714186SBjörn Töpel {
1381742b3d5SMagnus Karlsson 	return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
1391742b3d5SMagnus Karlsson 		i40e_xsk_pool_disable(vsi, qid);
1400a714186SBjörn Töpel }
1410a714186SBjörn Töpel 
1420a714186SBjörn Töpel /**
1430a714186SBjörn Töpel  * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
1440a714186SBjörn Töpel  * @rx_ring: Rx ring
1450a714186SBjörn Töpel  * @xdp: xdp_buff used as input to the XDP program
1460a714186SBjörn Töpel  *
1470a714186SBjörn Töpel  * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
1480a714186SBjörn Töpel  **/
1490a714186SBjörn Töpel static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
1500a714186SBjörn Töpel {
1510a714186SBjörn Töpel 	int err, result = I40E_XDP_PASS;
1520a714186SBjörn Töpel 	struct i40e_ring *xdp_ring;
1530a714186SBjörn Töpel 	struct bpf_prog *xdp_prog;
1540a714186SBjörn Töpel 	u32 act;
1550a714186SBjörn Töpel 
1560a714186SBjörn Töpel 	/* NB! xdp_prog will always be !NULL, due to the fact that
1570a714186SBjörn Töpel 	 * this path is enabled by setting an XDP program.
1580a714186SBjörn Töpel 	 */
1590a714186SBjörn Töpel 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1600a714186SBjörn Töpel 	act = bpf_prog_run_xdp(xdp_prog, xdp);
1612f86c806SKevin Laatz 
162346497c7SMagnus Karlsson 	if (likely(act == XDP_REDIRECT)) {
163346497c7SMagnus Karlsson 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
164f6c10b48SMagnus Karlsson 		if (err)
165f6c10b48SMagnus Karlsson 			goto out_failure;
166f6c10b48SMagnus Karlsson 		return I40E_XDP_REDIR;
167346497c7SMagnus Karlsson 	}
168346497c7SMagnus Karlsson 
1690a714186SBjörn Töpel 	switch (act) {
1700a714186SBjörn Töpel 	case XDP_PASS:
1710a714186SBjörn Töpel 		break;
1720a714186SBjörn Töpel 	case XDP_TX:
1730a714186SBjörn Töpel 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
1740a714186SBjörn Töpel 		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
175f6c10b48SMagnus Karlsson 		if (result == I40E_XDP_CONSUMED)
176f6c10b48SMagnus Karlsson 			goto out_failure;
1770a714186SBjörn Töpel 		break;
1780a714186SBjörn Töpel 	default:
179c8064e5bSPaolo Abeni 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
1805463fce6SJeff Kirsher 		fallthrough;
1810a714186SBjörn Töpel 	case XDP_ABORTED:
182f6c10b48SMagnus Karlsson out_failure:
1830a714186SBjörn Töpel 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1845463fce6SJeff Kirsher 		fallthrough; /* handle aborts by dropping packet */
1850a714186SBjörn Töpel 	case XDP_DROP:
1860a714186SBjörn Töpel 		result = I40E_XDP_CONSUMED;
1870a714186SBjörn Töpel 		break;
1880a714186SBjörn Töpel 	}
1890a714186SBjörn Töpel 	return result;
1900a714186SBjörn Töpel }
1910a714186SBjörn Töpel 
1923b4f0b66SBjörn Töpel bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
1930a714186SBjörn Töpel {
1940a714186SBjörn Töpel 	u16 ntu = rx_ring->next_to_use;
1950a714186SBjörn Töpel 	union i40e_rx_desc *rx_desc;
1966aab0bb0SMagnus Karlsson 	struct xdp_buff **xdp;
1976aab0bb0SMagnus Karlsson 	u32 nb_buffs, i;
1983b4f0b66SBjörn Töpel 	dma_addr_t dma;
1990a714186SBjörn Töpel 
2000a714186SBjörn Töpel 	rx_desc = I40E_RX_DESC(rx_ring, ntu);
2016aab0bb0SMagnus Karlsson 	xdp = i40e_rx_bi(rx_ring, ntu);
2026aab0bb0SMagnus Karlsson 
2036aab0bb0SMagnus Karlsson 	nb_buffs = min_t(u16, count, rx_ring->count - ntu);
2046aab0bb0SMagnus Karlsson 	nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
2056aab0bb0SMagnus Karlsson 	if (!nb_buffs)
2066aab0bb0SMagnus Karlsson 		return false;
2076aab0bb0SMagnus Karlsson 
2086aab0bb0SMagnus Karlsson 	i = nb_buffs;
2096aab0bb0SMagnus Karlsson 	while (i--) {
2106aab0bb0SMagnus Karlsson 		dma = xsk_buff_xdp_get_dma(*xdp);
2113b4f0b66SBjörn Töpel 		rx_desc->read.pkt_addr = cpu_to_le64(dma);
2123b4f0b66SBjörn Töpel 		rx_desc->read.hdr_addr = 0;
2130a714186SBjörn Töpel 
2140a714186SBjörn Töpel 		rx_desc++;
2156aab0bb0SMagnus Karlsson 		xdp++;
2166aab0bb0SMagnus Karlsson 	}
2170a714186SBjörn Töpel 
2186aab0bb0SMagnus Karlsson 	ntu += nb_buffs;
2196aab0bb0SMagnus Karlsson 	if (ntu == rx_ring->count) {
2200a714186SBjörn Töpel 		rx_desc = I40E_RX_DESC(rx_ring, 0);
2216aab0bb0SMagnus Karlsson 		xdp = i40e_rx_bi(rx_ring, 0);
2220a714186SBjörn Töpel 		ntu = 0;
2230a714186SBjörn Töpel 	}
2240a714186SBjörn Töpel 
22564050b5bSBjörn Töpel 	/* clear the status bits for the next_to_use descriptor */
22664050b5bSBjörn Töpel 	rx_desc->wb.qword1.status_error_len = 0;
2270a714186SBjörn Töpel 	i40e_release_rx_desc(rx_ring, ntu);
2280a714186SBjörn Töpel 
2293c6f3ae3SYang Li 	return count == nb_buffs;
2300a714186SBjörn Töpel }
2310a714186SBjörn Töpel 
2320a714186SBjörn Töpel /**
233e92c0e02SJesper Dangaard Brouer  * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
2340a714186SBjörn Töpel  * @rx_ring: Rx ring
2350a714186SBjörn Töpel  * @xdp: xdp_buff
2360a714186SBjörn Töpel  *
2370a714186SBjörn Töpel  * This functions allocates a new skb from a zero-copy Rx buffer.
2380a714186SBjörn Töpel  *
2390a714186SBjörn Töpel  * Returns the skb, or NULL on failure.
2400a714186SBjörn Töpel  **/
2410a714186SBjörn Töpel static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
2420a714186SBjörn Töpel 					     struct xdp_buff *xdp)
2430a714186SBjörn Töpel {
2440a714186SBjörn Töpel 	unsigned int metasize = xdp->data - xdp->data_meta;
2450a714186SBjörn Töpel 	unsigned int datasize = xdp->data_end - xdp->data;
2460a714186SBjörn Töpel 	struct sk_buff *skb;
2470a714186SBjörn Töpel 
2480a714186SBjörn Töpel 	/* allocate a skb to store the frags */
2490a714186SBjörn Töpel 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2500a714186SBjörn Töpel 			       xdp->data_end - xdp->data_hard_start,
2510a714186SBjörn Töpel 			       GFP_ATOMIC | __GFP_NOWARN);
2520a714186SBjörn Töpel 	if (unlikely(!skb))
253f020fa1aSCristian Dumitrescu 		goto out;
2540a714186SBjörn Töpel 
2550a714186SBjörn Töpel 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2560a714186SBjörn Töpel 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
2570a714186SBjörn Töpel 	if (metasize)
2580a714186SBjörn Töpel 		skb_metadata_set(skb, metasize);
2590a714186SBjörn Töpel 
260f020fa1aSCristian Dumitrescu out:
2613b4f0b66SBjörn Töpel 	xsk_buff_free(xdp);
2620a714186SBjörn Töpel 	return skb;
2630a714186SBjörn Töpel }
2640a714186SBjörn Töpel 
265f020fa1aSCristian Dumitrescu static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
266f020fa1aSCristian Dumitrescu 				      struct xdp_buff *xdp_buff,
267f020fa1aSCristian Dumitrescu 				      union i40e_rx_desc *rx_desc,
268f020fa1aSCristian Dumitrescu 				      unsigned int *rx_packets,
269f020fa1aSCristian Dumitrescu 				      unsigned int *rx_bytes,
270f020fa1aSCristian Dumitrescu 				      unsigned int size,
271f020fa1aSCristian Dumitrescu 				      unsigned int xdp_res)
272f020fa1aSCristian Dumitrescu {
273f020fa1aSCristian Dumitrescu 	struct sk_buff *skb;
274f020fa1aSCristian Dumitrescu 
275f020fa1aSCristian Dumitrescu 	*rx_packets = 1;
276f020fa1aSCristian Dumitrescu 	*rx_bytes = size;
277f020fa1aSCristian Dumitrescu 
278f020fa1aSCristian Dumitrescu 	if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
279f020fa1aSCristian Dumitrescu 		return;
280f020fa1aSCristian Dumitrescu 
281f020fa1aSCristian Dumitrescu 	if (xdp_res == I40E_XDP_CONSUMED) {
282f020fa1aSCristian Dumitrescu 		xsk_buff_free(xdp_buff);
283f020fa1aSCristian Dumitrescu 		return;
284f020fa1aSCristian Dumitrescu 	}
285f020fa1aSCristian Dumitrescu 
286f020fa1aSCristian Dumitrescu 	if (xdp_res == I40E_XDP_PASS) {
287f020fa1aSCristian Dumitrescu 		/* NB! We are not checking for errors using
288f020fa1aSCristian Dumitrescu 		 * i40e_test_staterr with
289f020fa1aSCristian Dumitrescu 		 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
290f020fa1aSCristian Dumitrescu 		 * SBP is *not* set in PRT_SBPVSI (default not set).
291f020fa1aSCristian Dumitrescu 		 */
292f020fa1aSCristian Dumitrescu 		skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
293f020fa1aSCristian Dumitrescu 		if (!skb) {
294f020fa1aSCristian Dumitrescu 			rx_ring->rx_stats.alloc_buff_failed++;
295f020fa1aSCristian Dumitrescu 			*rx_packets = 0;
296f020fa1aSCristian Dumitrescu 			*rx_bytes = 0;
297f020fa1aSCristian Dumitrescu 			return;
298f020fa1aSCristian Dumitrescu 		}
299f020fa1aSCristian Dumitrescu 
300f020fa1aSCristian Dumitrescu 		if (eth_skb_pad(skb)) {
301f020fa1aSCristian Dumitrescu 			*rx_packets = 0;
302f020fa1aSCristian Dumitrescu 			*rx_bytes = 0;
303f020fa1aSCristian Dumitrescu 			return;
304f020fa1aSCristian Dumitrescu 		}
305f020fa1aSCristian Dumitrescu 
306f020fa1aSCristian Dumitrescu 		*rx_bytes = skb->len;
307f020fa1aSCristian Dumitrescu 		i40e_process_skb_fields(rx_ring, rx_desc, skb);
308f020fa1aSCristian Dumitrescu 		napi_gro_receive(&rx_ring->q_vector->napi, skb);
309f020fa1aSCristian Dumitrescu 		return;
310f020fa1aSCristian Dumitrescu 	}
311f020fa1aSCristian Dumitrescu 
312f020fa1aSCristian Dumitrescu 	/* Should never get here, as all valid cases have been handled already.
313f020fa1aSCristian Dumitrescu 	 */
314f020fa1aSCristian Dumitrescu 	WARN_ON_ONCE(1);
315f020fa1aSCristian Dumitrescu }
316f020fa1aSCristian Dumitrescu 
3170a714186SBjörn Töpel /**
3180a714186SBjörn Töpel  * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
3190a714186SBjörn Töpel  * @rx_ring: Rx ring
3200a714186SBjörn Töpel  * @budget: NAPI budget
3210a714186SBjörn Töpel  *
3220a714186SBjörn Töpel  * Returns amount of work completed
3230a714186SBjörn Töpel  **/
3240a714186SBjörn Töpel int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
3250a714186SBjörn Töpel {
3260a714186SBjörn Töpel 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
3270a714186SBjörn Töpel 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
328c8a8ca34SCristian Dumitrescu 	u16 next_to_clean = rx_ring->next_to_clean;
329c8a8ca34SCristian Dumitrescu 	u16 count_mask = rx_ring->count - 1;
3300a714186SBjörn Töpel 	unsigned int xdp_res, xdp_xmit = 0;
3311773482fSDan Carpenter 	bool failure = false;
3320a714186SBjörn Töpel 
3330a714186SBjörn Töpel 	while (likely(total_rx_packets < (unsigned int)budget)) {
3340a714186SBjörn Töpel 		union i40e_rx_desc *rx_desc;
335f020fa1aSCristian Dumitrescu 		unsigned int rx_packets;
336f020fa1aSCristian Dumitrescu 		unsigned int rx_bytes;
337d4178c31SCristian Dumitrescu 		struct xdp_buff *bi;
3380a714186SBjörn Töpel 		unsigned int size;
3390a714186SBjörn Töpel 		u64 qword;
3400a714186SBjörn Töpel 
341c8a8ca34SCristian Dumitrescu 		rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
3420a714186SBjörn Töpel 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
3430a714186SBjörn Töpel 
3440a714186SBjörn Töpel 		/* This memory barrier is needed to keep us from reading
3450a714186SBjörn Töpel 		 * any other fields out of the rx_desc until we have
3460a714186SBjörn Töpel 		 * verified the descriptor has been written back.
3470a714186SBjörn Töpel 		 */
3480a714186SBjörn Töpel 		dma_rmb();
3490a714186SBjörn Töpel 
350be1222b5SBjörn Töpel 		if (i40e_rx_is_programming_status(qword)) {
351be1222b5SBjörn Töpel 			i40e_clean_programming_status(rx_ring,
352be1222b5SBjörn Töpel 						      rx_desc->raw.qword[0],
3530a714186SBjörn Töpel 						      qword);
354d4178c31SCristian Dumitrescu 			bi = *i40e_rx_bi(rx_ring, next_to_clean);
355d4178c31SCristian Dumitrescu 			xsk_buff_free(bi);
356c8a8ca34SCristian Dumitrescu 			next_to_clean = (next_to_clean + 1) & count_mask;
3570a714186SBjörn Töpel 			continue;
3580a714186SBjörn Töpel 		}
3590a714186SBjörn Töpel 
3600a714186SBjörn Töpel 		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
3610a714186SBjörn Töpel 		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
3620a714186SBjörn Töpel 		if (!size)
3630a714186SBjörn Töpel 			break;
3640a714186SBjörn Töpel 
365d4178c31SCristian Dumitrescu 		bi = *i40e_rx_bi(rx_ring, next_to_clean);
3666aab0bb0SMagnus Karlsson 		xsk_buff_set_size(bi, size);
367d4178c31SCristian Dumitrescu 		xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
3680a714186SBjörn Töpel 
369d4178c31SCristian Dumitrescu 		xdp_res = i40e_run_xdp_zc(rx_ring, bi);
370f020fa1aSCristian Dumitrescu 		i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
371f020fa1aSCristian Dumitrescu 					  &rx_bytes, size, xdp_res);
372f020fa1aSCristian Dumitrescu 		total_rx_packets += rx_packets;
373f020fa1aSCristian Dumitrescu 		total_rx_bytes += rx_bytes;
374f020fa1aSCristian Dumitrescu 		xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
375c8a8ca34SCristian Dumitrescu 		next_to_clean = (next_to_clean + 1) & count_mask;
3760a714186SBjörn Töpel 	}
3770a714186SBjörn Töpel 
378c8a8ca34SCristian Dumitrescu 	rx_ring->next_to_clean = next_to_clean;
379f12738b6SCristian Dumitrescu 	cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
380c8a8ca34SCristian Dumitrescu 
3818cbf7414SBjörn Töpel 	if (cleaned_count >= I40E_RX_BUFFER_WRITE)
3828cbf7414SBjörn Töpel 		failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
3838cbf7414SBjörn Töpel 
3840a714186SBjörn Töpel 	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
3850a714186SBjörn Töpel 	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
3863d0c5f1cSMagnus Karlsson 
387c4655761SMagnus Karlsson 	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
388c8a8ca34SCristian Dumitrescu 		if (failure || next_to_clean == rx_ring->next_to_use)
389c4655761SMagnus Karlsson 			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
3903d0c5f1cSMagnus Karlsson 		else
391c4655761SMagnus Karlsson 			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
3923d0c5f1cSMagnus Karlsson 
3933d0c5f1cSMagnus Karlsson 		return (int)total_rx_packets;
3943d0c5f1cSMagnus Karlsson 	}
3950a714186SBjörn Töpel 	return failure ? budget : (int)total_rx_packets;
3960a714186SBjörn Töpel }
3970a714186SBjörn Töpel 
3983106c580SMagnus Karlsson static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
3993106c580SMagnus Karlsson 			  unsigned int *total_bytes)
4003106c580SMagnus Karlsson {
4013106c580SMagnus Karlsson 	struct i40e_tx_desc *tx_desc;
4023106c580SMagnus Karlsson 	dma_addr_t dma;
4033106c580SMagnus Karlsson 
4043106c580SMagnus Karlsson 	dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
4053106c580SMagnus Karlsson 	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
4063106c580SMagnus Karlsson 
4073106c580SMagnus Karlsson 	tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
4083106c580SMagnus Karlsson 	tx_desc->buffer_addr = cpu_to_le64(dma);
4093106c580SMagnus Karlsson 	tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
4103106c580SMagnus Karlsson 						  0, desc->len, 0);
4113106c580SMagnus Karlsson 
4123106c580SMagnus Karlsson 	*total_bytes += desc->len;
4133106c580SMagnus Karlsson }
4143106c580SMagnus Karlsson 
4153106c580SMagnus Karlsson static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
4163106c580SMagnus Karlsson 				unsigned int *total_bytes)
4173106c580SMagnus Karlsson {
4183106c580SMagnus Karlsson 	u16 ntu = xdp_ring->next_to_use;
4193106c580SMagnus Karlsson 	struct i40e_tx_desc *tx_desc;
4203106c580SMagnus Karlsson 	dma_addr_t dma;
4213106c580SMagnus Karlsson 	u32 i;
4223106c580SMagnus Karlsson 
4233106c580SMagnus Karlsson 	loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
4243106c580SMagnus Karlsson 		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
4253106c580SMagnus Karlsson 		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
4263106c580SMagnus Karlsson 
4273106c580SMagnus Karlsson 		tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
4283106c580SMagnus Karlsson 		tx_desc->buffer_addr = cpu_to_le64(dma);
4293106c580SMagnus Karlsson 		tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
4303106c580SMagnus Karlsson 							  I40E_TX_DESC_CMD_EOP,
4313106c580SMagnus Karlsson 							  0, desc[i].len, 0);
4323106c580SMagnus Karlsson 
4333106c580SMagnus Karlsson 		*total_bytes += desc[i].len;
4343106c580SMagnus Karlsson 	}
4353106c580SMagnus Karlsson 
4363106c580SMagnus Karlsson 	xdp_ring->next_to_use = ntu;
4373106c580SMagnus Karlsson }
4383106c580SMagnus Karlsson 
4393106c580SMagnus Karlsson static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
4403106c580SMagnus Karlsson 				 unsigned int *total_bytes)
4413106c580SMagnus Karlsson {
4423106c580SMagnus Karlsson 	u32 batched, leftover, i;
4433106c580SMagnus Karlsson 
4443106c580SMagnus Karlsson 	batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
4453106c580SMagnus Karlsson 	leftover = nb_pkts & (PKTS_PER_BATCH - 1);
4463106c580SMagnus Karlsson 	for (i = 0; i < batched; i += PKTS_PER_BATCH)
4473106c580SMagnus Karlsson 		i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
4483106c580SMagnus Karlsson 	for (i = batched; i < batched + leftover; i++)
4493106c580SMagnus Karlsson 		i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
4503106c580SMagnus Karlsson }
4513106c580SMagnus Karlsson 
4523106c580SMagnus Karlsson static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
4533106c580SMagnus Karlsson {
4543106c580SMagnus Karlsson 	u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
4553106c580SMagnus Karlsson 	struct i40e_tx_desc *tx_desc;
4563106c580SMagnus Karlsson 
4573106c580SMagnus Karlsson 	tx_desc = I40E_TX_DESC(xdp_ring, ntu);
458b32cddd2SNorbert Ciosek 	tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
4593106c580SMagnus Karlsson }
4603106c580SMagnus Karlsson 
4611328dcddSMagnus Karlsson /**
4621328dcddSMagnus Karlsson  * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
4631328dcddSMagnus Karlsson  * @xdp_ring: XDP Tx ring
4641328dcddSMagnus Karlsson  * @budget: NAPI budget
4651328dcddSMagnus Karlsson  *
4661328dcddSMagnus Karlsson  * Returns true if the work is finished.
4671328dcddSMagnus Karlsson  **/
4681328dcddSMagnus Karlsson static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
4691328dcddSMagnus Karlsson {
470*d1bc532eSMagnus Karlsson 	struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
4713106c580SMagnus Karlsson 	u32 nb_pkts, nb_processed = 0;
4723106c580SMagnus Karlsson 	unsigned int total_bytes = 0;
4731328dcddSMagnus Karlsson 
474*d1bc532eSMagnus Karlsson 	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
4753106c580SMagnus Karlsson 	if (!nb_pkts)
476528060efSMagnus Karlsson 		return true;
4771328dcddSMagnus Karlsson 
4783106c580SMagnus Karlsson 	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
4793106c580SMagnus Karlsson 		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
4803106c580SMagnus Karlsson 		i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
4811328dcddSMagnus Karlsson 		xdp_ring->next_to_use = 0;
4821328dcddSMagnus Karlsson 	}
4831328dcddSMagnus Karlsson 
4843106c580SMagnus Karlsson 	i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
4853106c580SMagnus Karlsson 			     &total_bytes);
4863106c580SMagnus Karlsson 
4871328dcddSMagnus Karlsson 	/* Request an interrupt for the last frame and bump tail ptr. */
4883106c580SMagnus Karlsson 	i40e_set_rs_bit(xdp_ring);
4891328dcddSMagnus Karlsson 	i40e_xdp_ring_update_tail(xdp_ring);
4901328dcddSMagnus Karlsson 
4913106c580SMagnus Karlsson 	i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
4921328dcddSMagnus Karlsson 
493528060efSMagnus Karlsson 	return nb_pkts < budget;
4941328dcddSMagnus Karlsson }
4951328dcddSMagnus Karlsson 
4961328dcddSMagnus Karlsson /**
4971328dcddSMagnus Karlsson  * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
4981328dcddSMagnus Karlsson  * @tx_ring: XDP Tx ring
4991328dcddSMagnus Karlsson  * @tx_bi: Tx buffer info to clean
5001328dcddSMagnus Karlsson  **/
5011328dcddSMagnus Karlsson static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
5021328dcddSMagnus Karlsson 				     struct i40e_tx_buffer *tx_bi)
5031328dcddSMagnus Karlsson {
5041328dcddSMagnus Karlsson 	xdp_return_frame(tx_bi->xdpf);
5055574ff7bSMagnus Karlsson 	tx_ring->xdp_tx_active--;
5061328dcddSMagnus Karlsson 	dma_unmap_single(tx_ring->dev,
5071328dcddSMagnus Karlsson 			 dma_unmap_addr(tx_bi, dma),
5081328dcddSMagnus Karlsson 			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
5091328dcddSMagnus Karlsson 	dma_unmap_len_set(tx_bi, len, 0);
5101328dcddSMagnus Karlsson }
5111328dcddSMagnus Karlsson 
5121328dcddSMagnus Karlsson /**
5131328dcddSMagnus Karlsson  * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
5141fd972ebSMagnus Karlsson  * @vsi: Current VSI
5151328dcddSMagnus Karlsson  * @tx_ring: XDP Tx ring
5161328dcddSMagnus Karlsson  *
5171328dcddSMagnus Karlsson  * Returns true if cleanup/tranmission is done.
5181328dcddSMagnus Karlsson  **/
5195574ff7bSMagnus Karlsson bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
5201328dcddSMagnus Karlsson {
5211742b3d5SMagnus Karlsson 	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
5225574ff7bSMagnus Karlsson 	u32 i, completed_frames, xsk_frames = 0;
5231328dcddSMagnus Karlsson 	u32 head_idx = i40e_get_head(tx_ring);
5241328dcddSMagnus Karlsson 	struct i40e_tx_buffer *tx_bi;
5251fd972ebSMagnus Karlsson 	unsigned int ntc;
5261328dcddSMagnus Karlsson 
5271328dcddSMagnus Karlsson 	if (head_idx < tx_ring->next_to_clean)
5281328dcddSMagnus Karlsson 		head_idx += tx_ring->count;
5295574ff7bSMagnus Karlsson 	completed_frames = head_idx - tx_ring->next_to_clean;
5301328dcddSMagnus Karlsson 
5315574ff7bSMagnus Karlsson 	if (completed_frames == 0)
5321328dcddSMagnus Karlsson 		goto out_xmit;
5335574ff7bSMagnus Karlsson 
5345574ff7bSMagnus Karlsson 	if (likely(!tx_ring->xdp_tx_active)) {
5355574ff7bSMagnus Karlsson 		xsk_frames = completed_frames;
5365574ff7bSMagnus Karlsson 		goto skip;
5371328dcddSMagnus Karlsson 	}
5381328dcddSMagnus Karlsson 
5391328dcddSMagnus Karlsson 	ntc = tx_ring->next_to_clean;
5401328dcddSMagnus Karlsson 
5411328dcddSMagnus Karlsson 	for (i = 0; i < completed_frames; i++) {
5421328dcddSMagnus Karlsson 		tx_bi = &tx_ring->tx_bi[ntc];
5431328dcddSMagnus Karlsson 
5445574ff7bSMagnus Karlsson 		if (tx_bi->xdpf) {
5451328dcddSMagnus Karlsson 			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
5461328dcddSMagnus Karlsson 			tx_bi->xdpf = NULL;
5475574ff7bSMagnus Karlsson 		} else {
5485574ff7bSMagnus Karlsson 			xsk_frames++;
5495574ff7bSMagnus Karlsson 		}
5501328dcddSMagnus Karlsson 
5511328dcddSMagnus Karlsson 		if (++ntc >= tx_ring->count)
5521328dcddSMagnus Karlsson 			ntc = 0;
5531328dcddSMagnus Karlsson 	}
5541328dcddSMagnus Karlsson 
5555574ff7bSMagnus Karlsson skip:
5561328dcddSMagnus Karlsson 	tx_ring->next_to_clean += completed_frames;
5571328dcddSMagnus Karlsson 	if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
5581328dcddSMagnus Karlsson 		tx_ring->next_to_clean -= tx_ring->count;
5591328dcddSMagnus Karlsson 
5601328dcddSMagnus Karlsson 	if (xsk_frames)
561c4655761SMagnus Karlsson 		xsk_tx_completed(bp, xsk_frames);
5621328dcddSMagnus Karlsson 
5635574ff7bSMagnus Karlsson 	i40e_arm_wb(tx_ring, vsi, completed_frames);
5641328dcddSMagnus Karlsson 
5651328dcddSMagnus Karlsson out_xmit:
566c4655761SMagnus Karlsson 	if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
567c4655761SMagnus Karlsson 		xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
5683d0c5f1cSMagnus Karlsson 
5691fd972ebSMagnus Karlsson 	return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
5701328dcddSMagnus Karlsson }
5711328dcddSMagnus Karlsson 
5721328dcddSMagnus Karlsson /**
5739116e5e2SMagnus Karlsson  * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
5741328dcddSMagnus Karlsson  * @dev: the netdevice
5751328dcddSMagnus Karlsson  * @queue_id: queue id to wake up
5769116e5e2SMagnus Karlsson  * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
5771328dcddSMagnus Karlsson  *
5781328dcddSMagnus Karlsson  * Returns <0 for errors, 0 otherwise.
5791328dcddSMagnus Karlsson  **/
5809116e5e2SMagnus Karlsson int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
5811328dcddSMagnus Karlsson {
5821328dcddSMagnus Karlsson 	struct i40e_netdev_priv *np = netdev_priv(dev);
5831328dcddSMagnus Karlsson 	struct i40e_vsi *vsi = np->vsi;
584b3873a5bSMaxim Mikityanskiy 	struct i40e_pf *pf = vsi->back;
5851328dcddSMagnus Karlsson 	struct i40e_ring *ring;
5861328dcddSMagnus Karlsson 
587b3873a5bSMaxim Mikityanskiy 	if (test_bit(__I40E_CONFIG_BUSY, pf->state))
588c77e9f09SMaciej Fijalkowski 		return -EAGAIN;
589b3873a5bSMaxim Mikityanskiy 
5901328dcddSMagnus Karlsson 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
5911328dcddSMagnus Karlsson 		return -ENETDOWN;
5921328dcddSMagnus Karlsson 
5931328dcddSMagnus Karlsson 	if (!i40e_enabled_xdp_vsi(vsi))
5941328dcddSMagnus Karlsson 		return -ENXIO;
5951328dcddSMagnus Karlsson 
5961328dcddSMagnus Karlsson 	if (queue_id >= vsi->num_queue_pairs)
5971328dcddSMagnus Karlsson 		return -ENXIO;
5981328dcddSMagnus Karlsson 
5991742b3d5SMagnus Karlsson 	if (!vsi->xdp_rings[queue_id]->xsk_pool)
6001328dcddSMagnus Karlsson 		return -ENXIO;
6011328dcddSMagnus Karlsson 
6021328dcddSMagnus Karlsson 	ring = vsi->xdp_rings[queue_id];
6031328dcddSMagnus Karlsson 
6041328dcddSMagnus Karlsson 	/* The idea here is that if NAPI is running, mark a miss, so
6051328dcddSMagnus Karlsson 	 * it will run again. If not, trigger an interrupt and
6061328dcddSMagnus Karlsson 	 * schedule the NAPI from interrupt context. If NAPI would be
6071328dcddSMagnus Karlsson 	 * scheduled here, the interrupt affinity would not be
6081328dcddSMagnus Karlsson 	 * honored.
6091328dcddSMagnus Karlsson 	 */
6101328dcddSMagnus Karlsson 	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
6111328dcddSMagnus Karlsson 		i40e_force_wb(vsi, ring->q_vector);
6121328dcddSMagnus Karlsson 
6131328dcddSMagnus Karlsson 	return 0;
6141328dcddSMagnus Karlsson }
6159dbb1370SBjörn Töpel 
616411dc16fSBjörn Töpel void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
617411dc16fSBjörn Töpel {
618d4178c31SCristian Dumitrescu 	u16 count_mask = rx_ring->count - 1;
619d4178c31SCristian Dumitrescu 	u16 ntc = rx_ring->next_to_clean;
620d4178c31SCristian Dumitrescu 	u16 ntu = rx_ring->next_to_use;
621411dc16fSBjörn Töpel 
622d4178c31SCristian Dumitrescu 	for ( ; ntc != ntu; ntc = (ntc + 1)  & count_mask) {
623d4178c31SCristian Dumitrescu 		struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
624411dc16fSBjörn Töpel 
6253b4f0b66SBjörn Töpel 		xsk_buff_free(rx_bi);
626411dc16fSBjörn Töpel 	}
627411dc16fSBjörn Töpel }
628411dc16fSBjörn Töpel 
6299dbb1370SBjörn Töpel /**
630262de08fSJesse Brandeburg  * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
6311fd972ebSMagnus Karlsson  * @tx_ring: XDP Tx ring
6329dbb1370SBjörn Töpel  **/
6339dbb1370SBjörn Töpel void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
6349dbb1370SBjörn Töpel {
6359dbb1370SBjörn Töpel 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
6361742b3d5SMagnus Karlsson 	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
6379dbb1370SBjörn Töpel 	struct i40e_tx_buffer *tx_bi;
6389dbb1370SBjörn Töpel 	u32 xsk_frames = 0;
6399dbb1370SBjörn Töpel 
6409dbb1370SBjörn Töpel 	while (ntc != ntu) {
6419dbb1370SBjörn Töpel 		tx_bi = &tx_ring->tx_bi[ntc];
6429dbb1370SBjörn Töpel 
6439dbb1370SBjörn Töpel 		if (tx_bi->xdpf)
6449dbb1370SBjörn Töpel 			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
6459dbb1370SBjörn Töpel 		else
6469dbb1370SBjörn Töpel 			xsk_frames++;
6479dbb1370SBjörn Töpel 
6489dbb1370SBjörn Töpel 		tx_bi->xdpf = NULL;
6499dbb1370SBjörn Töpel 
6509dbb1370SBjörn Töpel 		ntc++;
6519dbb1370SBjörn Töpel 		if (ntc >= tx_ring->count)
6529dbb1370SBjörn Töpel 			ntc = 0;
6539dbb1370SBjörn Töpel 	}
6549dbb1370SBjörn Töpel 
6559dbb1370SBjörn Töpel 	if (xsk_frames)
656c4655761SMagnus Karlsson 		xsk_tx_completed(bp, xsk_frames);
6579dbb1370SBjörn Töpel }
6583ab52af5SBjörn Töpel 
6593ab52af5SBjörn Töpel /**
6601742b3d5SMagnus Karlsson  * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
6611742b3d5SMagnus Karlsson  * buffer pool attached
6623ab52af5SBjörn Töpel  * @vsi: vsi
6633ab52af5SBjörn Töpel  *
6641742b3d5SMagnus Karlsson  * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
6653ab52af5SBjörn Töpel  **/
6663ab52af5SBjörn Töpel bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
6673ab52af5SBjörn Töpel {
668f3fef2b6SJan Sokolowski 	struct net_device *netdev = vsi->netdev;
6693ab52af5SBjörn Töpel 	int i;
6703ab52af5SBjörn Töpel 
6713ab52af5SBjörn Töpel 	for (i = 0; i < vsi->num_queue_pairs; i++) {
672c4655761SMagnus Karlsson 		if (xsk_get_pool_from_qid(netdev, i))
6733ab52af5SBjörn Töpel 			return true;
6743ab52af5SBjörn Töpel 	}
6753ab52af5SBjörn Töpel 
6763ab52af5SBjörn Töpel 	return false;
6773ab52af5SBjörn Töpel }
678