xref: /linux/drivers/net/ethernet/intel/i40e/i40e_xsk.c (revision be1222b585fdc410b8c1dbcc57dd03a00f04eff5)
10a714186SBjörn Töpel // SPDX-License-Identifier: GPL-2.0
20a714186SBjörn Töpel /* Copyright(c) 2018 Intel Corporation. */
30a714186SBjörn Töpel 
40a714186SBjörn Töpel #include <linux/bpf_trace.h>
5a71506a4SMagnus Karlsson #include <net/xdp_sock_drv.h>
60a714186SBjörn Töpel #include <net/xdp.h>
70a714186SBjörn Töpel 
80a714186SBjörn Töpel #include "i40e.h"
90a714186SBjörn Töpel #include "i40e_txrx_common.h"
100a714186SBjörn Töpel #include "i40e_xsk.h"
110a714186SBjörn Töpel 
12*be1222b5SBjörn Töpel int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
13e1675f97SBjörn Töpel {
14*be1222b5SBjörn Töpel 	unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
15*be1222b5SBjörn Töpel 
16*be1222b5SBjörn Töpel 	rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
17*be1222b5SBjörn Töpel 	return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
18*be1222b5SBjörn Töpel }
19*be1222b5SBjörn Töpel 
20*be1222b5SBjörn Töpel void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
21*be1222b5SBjörn Töpel {
22*be1222b5SBjörn Töpel 	memset(rx_ring->rx_bi_zc, 0,
23*be1222b5SBjörn Töpel 	       sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
24*be1222b5SBjörn Töpel }
25*be1222b5SBjörn Töpel 
26*be1222b5SBjörn Töpel static struct i40e_rx_buffer_zc *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
27*be1222b5SBjörn Töpel {
28*be1222b5SBjörn Töpel 	return &rx_ring->rx_bi_zc[idx];
29e1675f97SBjörn Töpel }
30e1675f97SBjörn Töpel 
310a714186SBjörn Töpel /**
320a714186SBjörn Töpel  * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
330a714186SBjörn Töpel  * @vsi: Current VSI
340a714186SBjörn Töpel  * @umem: UMEM to DMA map
350a714186SBjörn Töpel  *
360a714186SBjörn Töpel  * Returns 0 on success, <0 on failure
370a714186SBjörn Töpel  **/
380a714186SBjörn Töpel static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
390a714186SBjörn Töpel {
400a714186SBjörn Töpel 	struct i40e_pf *pf = vsi->back;
410a714186SBjörn Töpel 	struct device *dev;
420a714186SBjörn Töpel 	unsigned int i, j;
430a714186SBjörn Töpel 	dma_addr_t dma;
440a714186SBjörn Töpel 
450a714186SBjörn Töpel 	dev = &pf->pdev->dev;
460a714186SBjörn Töpel 	for (i = 0; i < umem->npgs; i++) {
470a714186SBjörn Töpel 		dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
480a714186SBjörn Töpel 					 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
490a714186SBjörn Töpel 		if (dma_mapping_error(dev, dma))
500a714186SBjörn Töpel 			goto out_unmap;
510a714186SBjörn Töpel 
520a714186SBjörn Töpel 		umem->pages[i].dma = dma;
530a714186SBjörn Töpel 	}
540a714186SBjörn Töpel 
550a714186SBjörn Töpel 	return 0;
560a714186SBjörn Töpel 
570a714186SBjörn Töpel out_unmap:
580a714186SBjörn Töpel 	for (j = 0; j < i; j++) {
590a714186SBjörn Töpel 		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
600a714186SBjörn Töpel 				     DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
610a714186SBjörn Töpel 		umem->pages[i].dma = 0;
620a714186SBjörn Töpel 	}
630a714186SBjörn Töpel 
640a714186SBjörn Töpel 	return -1;
650a714186SBjörn Töpel }
660a714186SBjörn Töpel 
670a714186SBjörn Töpel /**
680a714186SBjörn Töpel  * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev
690a714186SBjörn Töpel  * @vsi: Current VSI
700a714186SBjörn Töpel  * @umem: UMEM to DMA map
710a714186SBjörn Töpel  **/
720a714186SBjörn Töpel static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
730a714186SBjörn Töpel {
740a714186SBjörn Töpel 	struct i40e_pf *pf = vsi->back;
750a714186SBjörn Töpel 	struct device *dev;
760a714186SBjörn Töpel 	unsigned int i;
770a714186SBjörn Töpel 
780a714186SBjörn Töpel 	dev = &pf->pdev->dev;
790a714186SBjörn Töpel 
800a714186SBjörn Töpel 	for (i = 0; i < umem->npgs; i++) {
810a714186SBjörn Töpel 		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
820a714186SBjörn Töpel 				     DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
830a714186SBjörn Töpel 
840a714186SBjörn Töpel 		umem->pages[i].dma = 0;
850a714186SBjörn Töpel 	}
860a714186SBjörn Töpel }
870a714186SBjörn Töpel 
880a714186SBjörn Töpel /**
89529eb362SJan Sokolowski  * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
900a714186SBjörn Töpel  * @vsi: Current VSI
910a714186SBjörn Töpel  * @umem: UMEM
920a714186SBjörn Töpel  * @qid: Rx ring to associate UMEM to
930a714186SBjörn Töpel  *
940a714186SBjörn Töpel  * Returns 0 on success, <0 on failure
950a714186SBjörn Töpel  **/
960a714186SBjörn Töpel static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
970a714186SBjörn Töpel 				u16 qid)
980a714186SBjörn Töpel {
99f3fef2b6SJan Sokolowski 	struct net_device *netdev = vsi->netdev;
100411dc16fSBjörn Töpel 	struct xdp_umem_fq_reuse *reuseq;
1010a714186SBjörn Töpel 	bool if_running;
1020a714186SBjörn Töpel 	int err;
1030a714186SBjörn Töpel 
1040a714186SBjörn Töpel 	if (vsi->type != I40E_VSI_MAIN)
1050a714186SBjörn Töpel 		return -EINVAL;
1060a714186SBjörn Töpel 
1070a714186SBjörn Töpel 	if (qid >= vsi->num_queue_pairs)
1080a714186SBjörn Töpel 		return -EINVAL;
1090a714186SBjörn Töpel 
110f3fef2b6SJan Sokolowski 	if (qid >= netdev->real_num_rx_queues ||
111f3fef2b6SJan Sokolowski 	    qid >= netdev->real_num_tx_queues)
1120a714186SBjörn Töpel 		return -EINVAL;
1130a714186SBjörn Töpel 
114411dc16fSBjörn Töpel 	reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
115411dc16fSBjörn Töpel 	if (!reuseq)
116411dc16fSBjörn Töpel 		return -ENOMEM;
117411dc16fSBjörn Töpel 
118411dc16fSBjörn Töpel 	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
119411dc16fSBjörn Töpel 
1200a714186SBjörn Töpel 	err = i40e_xsk_umem_dma_map(vsi, umem);
1210a714186SBjörn Töpel 	if (err)
1220a714186SBjörn Töpel 		return err;
1230a714186SBjörn Töpel 
12444ddd4f1SBjörn Töpel 	set_bit(qid, vsi->af_xdp_zc_qps);
12544ddd4f1SBjörn Töpel 
1260a714186SBjörn Töpel 	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
1270a714186SBjörn Töpel 
1280a714186SBjörn Töpel 	if (if_running) {
1290a714186SBjörn Töpel 		err = i40e_queue_pair_disable(vsi, qid);
1300a714186SBjörn Töpel 		if (err)
1310a714186SBjörn Töpel 			return err;
1320a714186SBjörn Töpel 
1330a714186SBjörn Töpel 		err = i40e_queue_pair_enable(vsi, qid);
1340a714186SBjörn Töpel 		if (err)
1350a714186SBjörn Töpel 			return err;
13614ffeb52SMagnus Karlsson 
13714ffeb52SMagnus Karlsson 		/* Kick start the NAPI context so that receiving will start */
1389116e5e2SMagnus Karlsson 		err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
13914ffeb52SMagnus Karlsson 		if (err)
14014ffeb52SMagnus Karlsson 			return err;
1410a714186SBjörn Töpel 	}
1420a714186SBjörn Töpel 
1430a714186SBjörn Töpel 	return 0;
1440a714186SBjörn Töpel }
1450a714186SBjörn Töpel 
1460a714186SBjörn Töpel /**
147529eb362SJan Sokolowski  * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
1480a714186SBjörn Töpel  * @vsi: Current VSI
1490a714186SBjörn Töpel  * @qid: Rx ring to associate UMEM to
1500a714186SBjörn Töpel  *
1510a714186SBjörn Töpel  * Returns 0 on success, <0 on failure
1520a714186SBjörn Töpel  **/
1530a714186SBjörn Töpel static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
1540a714186SBjörn Töpel {
155f3fef2b6SJan Sokolowski 	struct net_device *netdev = vsi->netdev;
156f3fef2b6SJan Sokolowski 	struct xdp_umem *umem;
1570a714186SBjörn Töpel 	bool if_running;
1580a714186SBjörn Töpel 	int err;
1590a714186SBjörn Töpel 
160f3fef2b6SJan Sokolowski 	umem = xdp_get_umem_from_qid(netdev, qid);
161f3fef2b6SJan Sokolowski 	if (!umem)
1620a714186SBjörn Töpel 		return -EINVAL;
1630a714186SBjörn Töpel 
1640a714186SBjörn Töpel 	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
1650a714186SBjörn Töpel 
1660a714186SBjörn Töpel 	if (if_running) {
1670a714186SBjörn Töpel 		err = i40e_queue_pair_disable(vsi, qid);
1680a714186SBjörn Töpel 		if (err)
1690a714186SBjörn Töpel 			return err;
1700a714186SBjörn Töpel 	}
1710a714186SBjörn Töpel 
17244ddd4f1SBjörn Töpel 	clear_bit(qid, vsi->af_xdp_zc_qps);
173f3fef2b6SJan Sokolowski 	i40e_xsk_umem_dma_unmap(vsi, umem);
1740a714186SBjörn Töpel 
1750a714186SBjörn Töpel 	if (if_running) {
1760a714186SBjörn Töpel 		err = i40e_queue_pair_enable(vsi, qid);
1770a714186SBjörn Töpel 		if (err)
1780a714186SBjörn Töpel 			return err;
1790a714186SBjörn Töpel 	}
1800a714186SBjörn Töpel 
1810a714186SBjörn Töpel 	return 0;
1820a714186SBjörn Töpel }
1830a714186SBjörn Töpel 
1840a714186SBjörn Töpel /**
185529eb362SJan Sokolowski  * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
1860a714186SBjörn Töpel  * @vsi: Current VSI
1870a714186SBjörn Töpel  * @umem: UMEM to enable/associate to a ring, or NULL to disable
1880a714186SBjörn Töpel  * @qid: Rx ring to (dis)associate UMEM (from)to
1890a714186SBjörn Töpel  *
190529eb362SJan Sokolowski  * This function enables or disables a UMEM to a certain ring.
1910a714186SBjörn Töpel  *
1920a714186SBjörn Töpel  * Returns 0 on success, <0 on failure
1930a714186SBjörn Töpel  **/
1940a714186SBjörn Töpel int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
1950a714186SBjörn Töpel 			u16 qid)
1960a714186SBjörn Töpel {
1970a714186SBjörn Töpel 	return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
1980a714186SBjörn Töpel 		i40e_xsk_umem_disable(vsi, qid);
1990a714186SBjörn Töpel }
2000a714186SBjörn Töpel 
2010a714186SBjörn Töpel /**
2020a714186SBjörn Töpel  * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
2030a714186SBjörn Töpel  * @rx_ring: Rx ring
2040a714186SBjörn Töpel  * @xdp: xdp_buff used as input to the XDP program
2050a714186SBjörn Töpel  *
206529eb362SJan Sokolowski  * This function enables or disables a UMEM to a certain ring.
2070a714186SBjörn Töpel  *
2080a714186SBjörn Töpel  * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
2090a714186SBjörn Töpel  **/
2100a714186SBjörn Töpel static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
2110a714186SBjörn Töpel {
2122f86c806SKevin Laatz 	struct xdp_umem *umem = rx_ring->xsk_umem;
2130a714186SBjörn Töpel 	int err, result = I40E_XDP_PASS;
2140a714186SBjörn Töpel 	struct i40e_ring *xdp_ring;
2150a714186SBjörn Töpel 	struct bpf_prog *xdp_prog;
216168dfc3aSCiara Loftus 	u64 offset;
2170a714186SBjörn Töpel 	u32 act;
2180a714186SBjörn Töpel 
2190a714186SBjörn Töpel 	rcu_read_lock();
2200a714186SBjörn Töpel 	/* NB! xdp_prog will always be !NULL, due to the fact that
2210a714186SBjörn Töpel 	 * this path is enabled by setting an XDP program.
2220a714186SBjörn Töpel 	 */
2230a714186SBjörn Töpel 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2240a714186SBjörn Töpel 	act = bpf_prog_run_xdp(xdp_prog, xdp);
225168dfc3aSCiara Loftus 	offset = xdp->data - xdp->data_hard_start;
2262f86c806SKevin Laatz 
2272f86c806SKevin Laatz 	xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
2282f86c806SKevin Laatz 
2290a714186SBjörn Töpel 	switch (act) {
2300a714186SBjörn Töpel 	case XDP_PASS:
2310a714186SBjörn Töpel 		break;
2320a714186SBjörn Töpel 	case XDP_TX:
2330a714186SBjörn Töpel 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2340a714186SBjörn Töpel 		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2350a714186SBjörn Töpel 		break;
2360a714186SBjörn Töpel 	case XDP_REDIRECT:
2370a714186SBjörn Töpel 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2380a714186SBjörn Töpel 		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2390a714186SBjörn Töpel 		break;
2400a714186SBjörn Töpel 	default:
2410a714186SBjörn Töpel 		bpf_warn_invalid_xdp_action(act);
242514af5f0SGustavo A. R. Silva 		/* fall through */
2430a714186SBjörn Töpel 	case XDP_ABORTED:
2440a714186SBjörn Töpel 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2450a714186SBjörn Töpel 		/* fallthrough -- handle aborts by dropping packet */
2460a714186SBjörn Töpel 	case XDP_DROP:
2470a714186SBjörn Töpel 		result = I40E_XDP_CONSUMED;
2480a714186SBjörn Töpel 		break;
2490a714186SBjörn Töpel 	}
2500a714186SBjörn Töpel 	rcu_read_unlock();
2510a714186SBjörn Töpel 	return result;
2520a714186SBjörn Töpel }
2530a714186SBjörn Töpel 
2540a714186SBjörn Töpel /**
255*be1222b5SBjörn Töpel  * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer_zc
2560a714186SBjörn Töpel  * @rx_ring: Rx ring
2570a714186SBjörn Töpel  * @bi: Rx buffer to populate
2580a714186SBjörn Töpel  *
2590a714186SBjörn Töpel  * This function allocates an Rx buffer. The buffer can come from fill
2600a714186SBjörn Töpel  * queue, or via the recycle queue (next_to_alloc).
2610a714186SBjörn Töpel  *
2620a714186SBjörn Töpel  * Returns true for a successful allocation, false otherwise
2630a714186SBjörn Töpel  **/
2640a714186SBjörn Töpel static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
265*be1222b5SBjörn Töpel 				 struct i40e_rx_buffer_zc *bi)
2660a714186SBjörn Töpel {
2670a714186SBjörn Töpel 	struct xdp_umem *umem = rx_ring->xsk_umem;
2680a714186SBjörn Töpel 	void *addr = bi->addr;
2690a714186SBjörn Töpel 	u64 handle, hr;
2700a714186SBjörn Töpel 
2710a714186SBjörn Töpel 	if (addr) {
2720a714186SBjörn Töpel 		rx_ring->rx_stats.page_reuse_count++;
2730a714186SBjörn Töpel 		return true;
2740a714186SBjörn Töpel 	}
2750a714186SBjörn Töpel 
2760a714186SBjörn Töpel 	if (!xsk_umem_peek_addr(umem, &handle)) {
2770a714186SBjörn Töpel 		rx_ring->rx_stats.alloc_page_failed++;
2780a714186SBjörn Töpel 		return false;
2790a714186SBjörn Töpel 	}
2800a714186SBjörn Töpel 
2810a714186SBjörn Töpel 	hr = umem->headroom + XDP_PACKET_HEADROOM;
2820a714186SBjörn Töpel 
2830a714186SBjörn Töpel 	bi->dma = xdp_umem_get_dma(umem, handle);
2840a714186SBjörn Töpel 	bi->dma += hr;
2850a714186SBjörn Töpel 
2860a714186SBjörn Töpel 	bi->addr = xdp_umem_get_data(umem, handle);
2870a714186SBjörn Töpel 	bi->addr += hr;
2880a714186SBjörn Töpel 
2894c5d9a7fSKevin Laatz 	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
2900a714186SBjörn Töpel 
291f8509aa0SMagnus Karlsson 	xsk_umem_release_addr(umem);
2920a714186SBjörn Töpel 	return true;
2930a714186SBjörn Töpel }
2940a714186SBjörn Töpel 
2950a714186SBjörn Töpel /**
296*be1222b5SBjörn Töpel  * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer_zc
2970a714186SBjörn Töpel  * @rx_ring: Rx ring
298411dc16fSBjörn Töpel  * @bi: Rx buffer to populate
2990a714186SBjörn Töpel  *
300411dc16fSBjörn Töpel  * This function allocates an Rx buffer. The buffer can come from fill
301411dc16fSBjörn Töpel  * queue, or via the reuse queue.
3020a714186SBjörn Töpel  *
3030a714186SBjörn Töpel  * Returns true for a successful allocation, false otherwise
3040a714186SBjörn Töpel  **/
305411dc16fSBjörn Töpel static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
306*be1222b5SBjörn Töpel 				      struct i40e_rx_buffer_zc *bi)
307411dc16fSBjörn Töpel {
308411dc16fSBjörn Töpel 	struct xdp_umem *umem = rx_ring->xsk_umem;
309411dc16fSBjörn Töpel 	u64 handle, hr;
310411dc16fSBjörn Töpel 
311411dc16fSBjörn Töpel 	if (!xsk_umem_peek_addr_rq(umem, &handle)) {
312411dc16fSBjörn Töpel 		rx_ring->rx_stats.alloc_page_failed++;
313411dc16fSBjörn Töpel 		return false;
314411dc16fSBjörn Töpel 	}
315411dc16fSBjörn Töpel 
316411dc16fSBjörn Töpel 	handle &= rx_ring->xsk_umem->chunk_mask;
317411dc16fSBjörn Töpel 
318411dc16fSBjörn Töpel 	hr = umem->headroom + XDP_PACKET_HEADROOM;
319411dc16fSBjörn Töpel 
320411dc16fSBjörn Töpel 	bi->dma = xdp_umem_get_dma(umem, handle);
321411dc16fSBjörn Töpel 	bi->dma += hr;
322411dc16fSBjörn Töpel 
323411dc16fSBjörn Töpel 	bi->addr = xdp_umem_get_data(umem, handle);
324411dc16fSBjörn Töpel 	bi->addr += hr;
325411dc16fSBjörn Töpel 
3264c5d9a7fSKevin Laatz 	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
327411dc16fSBjörn Töpel 
328f8509aa0SMagnus Karlsson 	xsk_umem_release_addr_rq(umem);
329411dc16fSBjörn Töpel 	return true;
330411dc16fSBjörn Töpel }
331411dc16fSBjörn Töpel 
332411dc16fSBjörn Töpel static __always_inline bool
333411dc16fSBjörn Töpel __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
334411dc16fSBjörn Töpel 			   bool alloc(struct i40e_ring *rx_ring,
335*be1222b5SBjörn Töpel 				      struct i40e_rx_buffer_zc *bi))
3360a714186SBjörn Töpel {
3370a714186SBjörn Töpel 	u16 ntu = rx_ring->next_to_use;
3380a714186SBjörn Töpel 	union i40e_rx_desc *rx_desc;
339*be1222b5SBjörn Töpel 	struct i40e_rx_buffer_zc *bi;
3400a714186SBjörn Töpel 	bool ok = true;
3410a714186SBjörn Töpel 
3420a714186SBjörn Töpel 	rx_desc = I40E_RX_DESC(rx_ring, ntu);
343e1675f97SBjörn Töpel 	bi = i40e_rx_bi(rx_ring, ntu);
3440a714186SBjörn Töpel 	do {
345411dc16fSBjörn Töpel 		if (!alloc(rx_ring, bi)) {
3460a714186SBjörn Töpel 			ok = false;
3470a714186SBjörn Töpel 			goto no_buffers;
3480a714186SBjörn Töpel 		}
3490a714186SBjörn Töpel 
3500a714186SBjörn Töpel 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0,
3510a714186SBjörn Töpel 						 rx_ring->rx_buf_len,
3520a714186SBjörn Töpel 						 DMA_BIDIRECTIONAL);
3530a714186SBjörn Töpel 
3540a714186SBjörn Töpel 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
3550a714186SBjörn Töpel 
3560a714186SBjörn Töpel 		rx_desc++;
3570a714186SBjörn Töpel 		bi++;
3580a714186SBjörn Töpel 		ntu++;
3590a714186SBjörn Töpel 
3600a714186SBjörn Töpel 		if (unlikely(ntu == rx_ring->count)) {
3610a714186SBjörn Töpel 			rx_desc = I40E_RX_DESC(rx_ring, 0);
362e1675f97SBjörn Töpel 			bi = i40e_rx_bi(rx_ring, 0);
3630a714186SBjörn Töpel 			ntu = 0;
3640a714186SBjörn Töpel 		}
3650a714186SBjörn Töpel 
3660a714186SBjörn Töpel 		rx_desc->wb.qword1.status_error_len = 0;
3670a714186SBjörn Töpel 		count--;
3680a714186SBjörn Töpel 	} while (count);
3690a714186SBjörn Töpel 
3700a714186SBjörn Töpel no_buffers:
3710a714186SBjörn Töpel 	if (rx_ring->next_to_use != ntu)
3720a714186SBjörn Töpel 		i40e_release_rx_desc(rx_ring, ntu);
3730a714186SBjörn Töpel 
3740a714186SBjörn Töpel 	return ok;
3750a714186SBjörn Töpel }
3760a714186SBjörn Töpel 
3770a714186SBjörn Töpel /**
378411dc16fSBjörn Töpel  * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers
379411dc16fSBjörn Töpel  * @rx_ring: Rx ring
380411dc16fSBjörn Töpel  * @count: The number of buffers to allocate
381411dc16fSBjörn Töpel  *
382411dc16fSBjörn Töpel  * This function allocates a number of Rx buffers from the reuse queue
383411dc16fSBjörn Töpel  * or fill ring and places them on the Rx ring.
384411dc16fSBjörn Töpel  *
385411dc16fSBjörn Töpel  * Returns true for a successful allocation, false otherwise
386411dc16fSBjörn Töpel  **/
387411dc16fSBjörn Töpel bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
388411dc16fSBjörn Töpel {
389411dc16fSBjörn Töpel 	return __i40e_alloc_rx_buffers_zc(rx_ring, count,
390411dc16fSBjörn Töpel 					  i40e_alloc_buffer_slow_zc);
391411dc16fSBjörn Töpel }
392411dc16fSBjörn Töpel 
393411dc16fSBjörn Töpel /**
394411dc16fSBjörn Töpel  * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers
395411dc16fSBjörn Töpel  * @rx_ring: Rx ring
396411dc16fSBjörn Töpel  * @count: The number of buffers to allocate
397411dc16fSBjörn Töpel  *
398411dc16fSBjörn Töpel  * This function allocates a number of Rx buffers from the fill ring
399411dc16fSBjörn Töpel  * or the internal recycle mechanism and places them on the Rx ring.
400411dc16fSBjörn Töpel  *
401411dc16fSBjörn Töpel  * Returns true for a successful allocation, false otherwise
402411dc16fSBjörn Töpel  **/
403411dc16fSBjörn Töpel static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
404411dc16fSBjörn Töpel {
405411dc16fSBjörn Töpel 	return __i40e_alloc_rx_buffers_zc(rx_ring, count,
406411dc16fSBjörn Töpel 					  i40e_alloc_buffer_zc);
407411dc16fSBjörn Töpel }
408411dc16fSBjörn Töpel 
409411dc16fSBjörn Töpel /**
4100a714186SBjörn Töpel  * i40e_get_rx_buffer_zc - Return the current Rx buffer
4110a714186SBjörn Töpel  * @rx_ring: Rx ring
4120a714186SBjörn Töpel  * @size: The size of the rx buffer (read from descriptor)
4130a714186SBjörn Töpel  *
4140a714186SBjörn Töpel  * This function returns the current, received Rx buffer, and also
4150a714186SBjörn Töpel  * does DMA synchronization.  the Rx ring.
4160a714186SBjörn Töpel  *
4170a714186SBjörn Töpel  * Returns the received Rx buffer
4180a714186SBjörn Töpel  **/
419*be1222b5SBjörn Töpel static struct i40e_rx_buffer_zc *i40e_get_rx_buffer_zc(
420*be1222b5SBjörn Töpel 	struct i40e_ring *rx_ring,
4210a714186SBjörn Töpel 	const unsigned int size)
4220a714186SBjörn Töpel {
423*be1222b5SBjörn Töpel 	struct i40e_rx_buffer_zc *bi;
4240a714186SBjörn Töpel 
425e1675f97SBjörn Töpel 	bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
4260a714186SBjörn Töpel 
4270a714186SBjörn Töpel 	/* we are reusing so sync this buffer for CPU use */
4280a714186SBjörn Töpel 	dma_sync_single_range_for_cpu(rx_ring->dev,
4290a714186SBjörn Töpel 				      bi->dma, 0,
4300a714186SBjörn Töpel 				      size,
4310a714186SBjörn Töpel 				      DMA_BIDIRECTIONAL);
4320a714186SBjörn Töpel 
4330a714186SBjörn Töpel 	return bi;
4340a714186SBjörn Töpel }
4350a714186SBjörn Töpel 
4360a714186SBjörn Töpel /**
4370a714186SBjörn Töpel  * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer
4380a714186SBjörn Töpel  * @rx_ring: Rx ring
4390a714186SBjörn Töpel  * @old_bi: The Rx buffer to recycle
4400a714186SBjörn Töpel  *
4410a714186SBjörn Töpel  * This function recycles a finished Rx buffer, and places it on the
4420a714186SBjörn Töpel  * recycle queue (next_to_alloc).
4430a714186SBjörn Töpel  **/
4440a714186SBjörn Töpel static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
445*be1222b5SBjörn Töpel 				    struct i40e_rx_buffer_zc *old_bi)
4460a714186SBjörn Töpel {
447*be1222b5SBjörn Töpel 	struct i40e_rx_buffer_zc *new_bi = i40e_rx_bi(rx_ring,
448e1675f97SBjörn Töpel 						      rx_ring->next_to_alloc);
4490a714186SBjörn Töpel 	u16 nta = rx_ring->next_to_alloc;
4500a714186SBjörn Töpel 
4510a714186SBjörn Töpel 	/* update, and store next to alloc */
4520a714186SBjörn Töpel 	nta++;
4530a714186SBjörn Töpel 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
4540a714186SBjörn Töpel 
4550a714186SBjörn Töpel 	/* transfer page from old buffer to new buffer */
45610912fc9SKevin Laatz 	new_bi->dma = old_bi->dma;
45710912fc9SKevin Laatz 	new_bi->addr = old_bi->addr;
45810912fc9SKevin Laatz 	new_bi->handle = old_bi->handle;
4590a714186SBjörn Töpel 
4600a714186SBjörn Töpel 	old_bi->addr = NULL;
4610a714186SBjörn Töpel }
4620a714186SBjörn Töpel 
4630a714186SBjörn Töpel /**
4640a714186SBjörn Töpel  * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations
4650a714186SBjörn Töpel  * @alloc: Zero-copy allocator
4660a714186SBjörn Töpel  * @handle: Buffer handle
4670a714186SBjörn Töpel  **/
4680a714186SBjörn Töpel void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
4690a714186SBjörn Töpel {
470*be1222b5SBjörn Töpel 	struct i40e_rx_buffer_zc *bi;
4710a714186SBjörn Töpel 	struct i40e_ring *rx_ring;
4720a714186SBjörn Töpel 	u64 hr, mask;
4730a714186SBjörn Töpel 	u16 nta;
4740a714186SBjörn Töpel 
4750a714186SBjörn Töpel 	rx_ring = container_of(alloc, struct i40e_ring, zca);
4760a714186SBjörn Töpel 	hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
47793ee30f3SMagnus Karlsson 	mask = rx_ring->xsk_umem->chunk_mask;
4780a714186SBjörn Töpel 
4790a714186SBjörn Töpel 	nta = rx_ring->next_to_alloc;
480e1675f97SBjörn Töpel 	bi = i40e_rx_bi(rx_ring, nta);
4810a714186SBjörn Töpel 
4820a714186SBjörn Töpel 	nta++;
4830a714186SBjörn Töpel 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
4840a714186SBjörn Töpel 
4850a714186SBjörn Töpel 	handle &= mask;
4860a714186SBjörn Töpel 
4870a714186SBjörn Töpel 	bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
4880a714186SBjörn Töpel 	bi->dma += hr;
4890a714186SBjörn Töpel 
4900a714186SBjörn Töpel 	bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
4910a714186SBjörn Töpel 	bi->addr += hr;
4920a714186SBjörn Töpel 
4934c5d9a7fSKevin Laatz 	bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
4944c5d9a7fSKevin Laatz 					    rx_ring->xsk_umem->headroom);
4950a714186SBjörn Töpel }
4960a714186SBjörn Töpel 
4970a714186SBjörn Töpel /**
4980a714186SBjörn Töpel  * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer
4990a714186SBjörn Töpel  * @rx_ring: Rx ring
5000a714186SBjörn Töpel  * @bi: Rx buffer
5010a714186SBjörn Töpel  * @xdp: xdp_buff
5020a714186SBjörn Töpel  *
5030a714186SBjörn Töpel  * This functions allocates a new skb from a zero-copy Rx buffer.
5040a714186SBjörn Töpel  *
5050a714186SBjörn Töpel  * Returns the skb, or NULL on failure.
5060a714186SBjörn Töpel  **/
5070a714186SBjörn Töpel static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
508*be1222b5SBjörn Töpel 					     struct i40e_rx_buffer_zc *bi,
5090a714186SBjörn Töpel 					     struct xdp_buff *xdp)
5100a714186SBjörn Töpel {
5110a714186SBjörn Töpel 	unsigned int metasize = xdp->data - xdp->data_meta;
5120a714186SBjörn Töpel 	unsigned int datasize = xdp->data_end - xdp->data;
5130a714186SBjörn Töpel 	struct sk_buff *skb;
5140a714186SBjörn Töpel 
5150a714186SBjörn Töpel 	/* allocate a skb to store the frags */
5160a714186SBjörn Töpel 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
5170a714186SBjörn Töpel 			       xdp->data_end - xdp->data_hard_start,
5180a714186SBjörn Töpel 			       GFP_ATOMIC | __GFP_NOWARN);
5190a714186SBjörn Töpel 	if (unlikely(!skb))
5200a714186SBjörn Töpel 		return NULL;
5210a714186SBjörn Töpel 
5220a714186SBjörn Töpel 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5230a714186SBjörn Töpel 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5240a714186SBjörn Töpel 	if (metasize)
5250a714186SBjörn Töpel 		skb_metadata_set(skb, metasize);
5260a714186SBjörn Töpel 
5270a714186SBjörn Töpel 	i40e_reuse_rx_buffer_zc(rx_ring, bi);
5280a714186SBjörn Töpel 	return skb;
5290a714186SBjörn Töpel }
5300a714186SBjörn Töpel 
5310a714186SBjörn Töpel /**
5320a714186SBjörn Töpel  * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
5330a714186SBjörn Töpel  * @rx_ring: Rx ring
5340a714186SBjörn Töpel  * @budget: NAPI budget
5350a714186SBjörn Töpel  *
5360a714186SBjörn Töpel  * Returns amount of work completed
5370a714186SBjörn Töpel  **/
5380a714186SBjörn Töpel int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
5390a714186SBjörn Töpel {
5400a714186SBjörn Töpel 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
5410a714186SBjörn Töpel 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
5422a637c5bSJesper Dangaard Brouer 	struct xdp_umem *umem = rx_ring->xsk_umem;
5430a714186SBjörn Töpel 	unsigned int xdp_res, xdp_xmit = 0;
5440a714186SBjörn Töpel 	bool failure = false;
5450a714186SBjörn Töpel 	struct sk_buff *skb;
5460a714186SBjörn Töpel 	struct xdp_buff xdp;
5470a714186SBjörn Töpel 
5480a714186SBjörn Töpel 	xdp.rxq = &rx_ring->xdp_rxq;
5492a637c5bSJesper Dangaard Brouer 	xdp.frame_sz = xsk_umem_xdp_frame_sz(umem);
5500a714186SBjörn Töpel 
5510a714186SBjörn Töpel 	while (likely(total_rx_packets < (unsigned int)budget)) {
552*be1222b5SBjörn Töpel 		struct i40e_rx_buffer_zc *bi;
5530a714186SBjörn Töpel 		union i40e_rx_desc *rx_desc;
5540a714186SBjörn Töpel 		unsigned int size;
5550a714186SBjörn Töpel 		u64 qword;
5560a714186SBjörn Töpel 
5570a714186SBjörn Töpel 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
5580a714186SBjörn Töpel 			failure = failure ||
559411dc16fSBjörn Töpel 				  !i40e_alloc_rx_buffers_fast_zc(rx_ring,
5600a714186SBjörn Töpel 								 cleaned_count);
5610a714186SBjörn Töpel 			cleaned_count = 0;
5620a714186SBjörn Töpel 		}
5630a714186SBjörn Töpel 
5640a714186SBjörn Töpel 		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
5650a714186SBjörn Töpel 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
5660a714186SBjörn Töpel 
5670a714186SBjörn Töpel 		/* This memory barrier is needed to keep us from reading
5680a714186SBjörn Töpel 		 * any other fields out of the rx_desc until we have
5690a714186SBjörn Töpel 		 * verified the descriptor has been written back.
5700a714186SBjörn Töpel 		 */
5710a714186SBjörn Töpel 		dma_rmb();
5720a714186SBjörn Töpel 
573*be1222b5SBjörn Töpel 		if (i40e_rx_is_programming_status(qword)) {
574*be1222b5SBjörn Töpel 			i40e_clean_programming_status(rx_ring,
575*be1222b5SBjörn Töpel 						      rx_desc->raw.qword[0],
5760a714186SBjörn Töpel 						      qword);
577*be1222b5SBjörn Töpel 			bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
578*be1222b5SBjörn Töpel 			i40e_inc_ntc(rx_ring);
5790a714186SBjörn Töpel 			i40e_reuse_rx_buffer_zc(rx_ring, bi);
5800a714186SBjörn Töpel 			cleaned_count++;
5810a714186SBjörn Töpel 			continue;
5820a714186SBjörn Töpel 		}
5830a714186SBjörn Töpel 
584*be1222b5SBjörn Töpel 		bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
5850a714186SBjörn Töpel 		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
5860a714186SBjörn Töpel 		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
5870a714186SBjörn Töpel 		if (!size)
5880a714186SBjörn Töpel 			break;
5890a714186SBjörn Töpel 
5900a714186SBjörn Töpel 		bi = i40e_get_rx_buffer_zc(rx_ring, size);
5910a714186SBjörn Töpel 		xdp.data = bi->addr;
5920a714186SBjörn Töpel 		xdp.data_meta = xdp.data;
5930a714186SBjörn Töpel 		xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
5940a714186SBjörn Töpel 		xdp.data_end = xdp.data + size;
5950a714186SBjörn Töpel 		xdp.handle = bi->handle;
5960a714186SBjörn Töpel 
5970a714186SBjörn Töpel 		xdp_res = i40e_run_xdp_zc(rx_ring, &xdp);
5980a714186SBjörn Töpel 		if (xdp_res) {
5990a714186SBjörn Töpel 			if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
6000a714186SBjörn Töpel 				xdp_xmit |= xdp_res;
6010a714186SBjörn Töpel 				bi->addr = NULL;
6020a714186SBjörn Töpel 			} else {
6030a714186SBjörn Töpel 				i40e_reuse_rx_buffer_zc(rx_ring, bi);
6040a714186SBjörn Töpel 			}
6050a714186SBjörn Töpel 
6060a714186SBjörn Töpel 			total_rx_bytes += size;
6070a714186SBjörn Töpel 			total_rx_packets++;
6080a714186SBjörn Töpel 
6090a714186SBjörn Töpel 			cleaned_count++;
6100a714186SBjörn Töpel 			i40e_inc_ntc(rx_ring);
6110a714186SBjörn Töpel 			continue;
6120a714186SBjörn Töpel 		}
6130a714186SBjörn Töpel 
6140a714186SBjörn Töpel 		/* XDP_PASS path */
6150a714186SBjörn Töpel 
6160a714186SBjörn Töpel 		/* NB! We are not checking for errors using
6170a714186SBjörn Töpel 		 * i40e_test_staterr with
6180a714186SBjörn Töpel 		 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
6190a714186SBjörn Töpel 		 * SBP is *not* set in PRT_SBPVSI (default not set).
6200a714186SBjörn Töpel 		 */
6210a714186SBjörn Töpel 		skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
6220a714186SBjörn Töpel 		if (!skb) {
6230a714186SBjörn Töpel 			rx_ring->rx_stats.alloc_buff_failed++;
6240a714186SBjörn Töpel 			break;
6250a714186SBjörn Töpel 		}
6260a714186SBjörn Töpel 
6270a714186SBjörn Töpel 		cleaned_count++;
6280a714186SBjörn Töpel 		i40e_inc_ntc(rx_ring);
6290a714186SBjörn Töpel 
6300a714186SBjörn Töpel 		if (eth_skb_pad(skb))
6310a714186SBjörn Töpel 			continue;
6320a714186SBjörn Töpel 
6330a714186SBjörn Töpel 		total_rx_bytes += skb->len;
6340a714186SBjörn Töpel 		total_rx_packets++;
6350a714186SBjörn Töpel 
636800b8f63SMichał Mirosław 		i40e_process_skb_fields(rx_ring, rx_desc, skb);
6372a508c64SMichał Mirosław 		napi_gro_receive(&rx_ring->q_vector->napi, skb);
6380a714186SBjörn Töpel 	}
6390a714186SBjörn Töpel 
6400a714186SBjörn Töpel 	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
6410a714186SBjörn Töpel 	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
6423d0c5f1cSMagnus Karlsson 
6433d0c5f1cSMagnus Karlsson 	if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
6443d0c5f1cSMagnus Karlsson 		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
6453d0c5f1cSMagnus Karlsson 			xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
6463d0c5f1cSMagnus Karlsson 		else
6473d0c5f1cSMagnus Karlsson 			xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
6483d0c5f1cSMagnus Karlsson 
6493d0c5f1cSMagnus Karlsson 		return (int)total_rx_packets;
6503d0c5f1cSMagnus Karlsson 	}
6510a714186SBjörn Töpel 	return failure ? budget : (int)total_rx_packets;
6520a714186SBjörn Töpel }
6530a714186SBjörn Töpel 
6541328dcddSMagnus Karlsson /**
6551328dcddSMagnus Karlsson  * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
6561328dcddSMagnus Karlsson  * @xdp_ring: XDP Tx ring
6571328dcddSMagnus Karlsson  * @budget: NAPI budget
6581328dcddSMagnus Karlsson  *
6591328dcddSMagnus Karlsson  * Returns true if the work is finished.
6601328dcddSMagnus Karlsson  **/
6611328dcddSMagnus Karlsson static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
6621328dcddSMagnus Karlsson {
663cf484f9fSMagnus Karlsson 	struct i40e_tx_desc *tx_desc = NULL;
6641328dcddSMagnus Karlsson 	struct i40e_tx_buffer *tx_bi;
6651328dcddSMagnus Karlsson 	bool work_done = true;
6664bce4e5cSMaxim Mikityanskiy 	struct xdp_desc desc;
6671328dcddSMagnus Karlsson 	dma_addr_t dma;
6681328dcddSMagnus Karlsson 
6691328dcddSMagnus Karlsson 	while (budget-- > 0) {
6701328dcddSMagnus Karlsson 		if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
6711328dcddSMagnus Karlsson 			xdp_ring->tx_stats.tx_busy++;
6721328dcddSMagnus Karlsson 			work_done = false;
6731328dcddSMagnus Karlsson 			break;
6741328dcddSMagnus Karlsson 		}
6751328dcddSMagnus Karlsson 
6764bce4e5cSMaxim Mikityanskiy 		if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
6771328dcddSMagnus Karlsson 			break;
6781328dcddSMagnus Karlsson 
6794bce4e5cSMaxim Mikityanskiy 		dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
6804bce4e5cSMaxim Mikityanskiy 
6814bce4e5cSMaxim Mikityanskiy 		dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
6821328dcddSMagnus Karlsson 					   DMA_BIDIRECTIONAL);
6831328dcddSMagnus Karlsson 
6841328dcddSMagnus Karlsson 		tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
6854bce4e5cSMaxim Mikityanskiy 		tx_bi->bytecount = desc.len;
6861328dcddSMagnus Karlsson 
6871328dcddSMagnus Karlsson 		tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
6881328dcddSMagnus Karlsson 		tx_desc->buffer_addr = cpu_to_le64(dma);
6891328dcddSMagnus Karlsson 		tx_desc->cmd_type_offset_bsz =
6901328dcddSMagnus Karlsson 			build_ctob(I40E_TX_DESC_CMD_ICRC
6911328dcddSMagnus Karlsson 				   | I40E_TX_DESC_CMD_EOP,
6924bce4e5cSMaxim Mikityanskiy 				   0, desc.len, 0);
6931328dcddSMagnus Karlsson 
6941328dcddSMagnus Karlsson 		xdp_ring->next_to_use++;
6951328dcddSMagnus Karlsson 		if (xdp_ring->next_to_use == xdp_ring->count)
6961328dcddSMagnus Karlsson 			xdp_ring->next_to_use = 0;
6971328dcddSMagnus Karlsson 	}
6981328dcddSMagnus Karlsson 
699cf484f9fSMagnus Karlsson 	if (tx_desc) {
7001328dcddSMagnus Karlsson 		/* Request an interrupt for the last frame and bump tail ptr. */
7011328dcddSMagnus Karlsson 		tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS <<
7021328dcddSMagnus Karlsson 						 I40E_TXD_QW1_CMD_SHIFT);
7031328dcddSMagnus Karlsson 		i40e_xdp_ring_update_tail(xdp_ring);
7041328dcddSMagnus Karlsson 
7051328dcddSMagnus Karlsson 		xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
7061328dcddSMagnus Karlsson 	}
7071328dcddSMagnus Karlsson 
7081328dcddSMagnus Karlsson 	return !!budget && work_done;
7091328dcddSMagnus Karlsson }
7101328dcddSMagnus Karlsson 
7111328dcddSMagnus Karlsson /**
7121328dcddSMagnus Karlsson  * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
7131328dcddSMagnus Karlsson  * @tx_ring: XDP Tx ring
7141328dcddSMagnus Karlsson  * @tx_bi: Tx buffer info to clean
7151328dcddSMagnus Karlsson  **/
7161328dcddSMagnus Karlsson static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
7171328dcddSMagnus Karlsson 				     struct i40e_tx_buffer *tx_bi)
7181328dcddSMagnus Karlsson {
7191328dcddSMagnus Karlsson 	xdp_return_frame(tx_bi->xdpf);
7201328dcddSMagnus Karlsson 	dma_unmap_single(tx_ring->dev,
7211328dcddSMagnus Karlsson 			 dma_unmap_addr(tx_bi, dma),
7221328dcddSMagnus Karlsson 			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
7231328dcddSMagnus Karlsson 	dma_unmap_len_set(tx_bi, len, 0);
7241328dcddSMagnus Karlsson }
7251328dcddSMagnus Karlsson 
7261328dcddSMagnus Karlsson /**
7271328dcddSMagnus Karlsson  * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
7281328dcddSMagnus Karlsson  * @tx_ring: XDP Tx ring
7291328dcddSMagnus Karlsson  * @tx_bi: Tx buffer info to clean
7301328dcddSMagnus Karlsson  *
7311328dcddSMagnus Karlsson  * Returns true if cleanup/tranmission is done.
7321328dcddSMagnus Karlsson  **/
7331328dcddSMagnus Karlsson bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
7341328dcddSMagnus Karlsson 			   struct i40e_ring *tx_ring, int napi_budget)
7351328dcddSMagnus Karlsson {
7361328dcddSMagnus Karlsson 	unsigned int ntc, total_bytes = 0, budget = vsi->work_limit;
7371328dcddSMagnus Karlsson 	u32 i, completed_frames, frames_ready, xsk_frames = 0;
7381328dcddSMagnus Karlsson 	struct xdp_umem *umem = tx_ring->xsk_umem;
7391328dcddSMagnus Karlsson 	u32 head_idx = i40e_get_head(tx_ring);
7401328dcddSMagnus Karlsson 	bool work_done = true, xmit_done;
7411328dcddSMagnus Karlsson 	struct i40e_tx_buffer *tx_bi;
7421328dcddSMagnus Karlsson 
7431328dcddSMagnus Karlsson 	if (head_idx < tx_ring->next_to_clean)
7441328dcddSMagnus Karlsson 		head_idx += tx_ring->count;
7451328dcddSMagnus Karlsson 	frames_ready = head_idx - tx_ring->next_to_clean;
7461328dcddSMagnus Karlsson 
7471328dcddSMagnus Karlsson 	if (frames_ready == 0) {
7481328dcddSMagnus Karlsson 		goto out_xmit;
7491328dcddSMagnus Karlsson 	} else if (frames_ready > budget) {
7501328dcddSMagnus Karlsson 		completed_frames = budget;
7511328dcddSMagnus Karlsson 		work_done = false;
7521328dcddSMagnus Karlsson 	} else {
7531328dcddSMagnus Karlsson 		completed_frames = frames_ready;
7541328dcddSMagnus Karlsson 	}
7551328dcddSMagnus Karlsson 
7561328dcddSMagnus Karlsson 	ntc = tx_ring->next_to_clean;
7571328dcddSMagnus Karlsson 
7581328dcddSMagnus Karlsson 	for (i = 0; i < completed_frames; i++) {
7591328dcddSMagnus Karlsson 		tx_bi = &tx_ring->tx_bi[ntc];
7601328dcddSMagnus Karlsson 
7611328dcddSMagnus Karlsson 		if (tx_bi->xdpf)
7621328dcddSMagnus Karlsson 			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
7631328dcddSMagnus Karlsson 		else
7641328dcddSMagnus Karlsson 			xsk_frames++;
7651328dcddSMagnus Karlsson 
7661328dcddSMagnus Karlsson 		tx_bi->xdpf = NULL;
7671328dcddSMagnus Karlsson 		total_bytes += tx_bi->bytecount;
7681328dcddSMagnus Karlsson 
7691328dcddSMagnus Karlsson 		if (++ntc >= tx_ring->count)
7701328dcddSMagnus Karlsson 			ntc = 0;
7711328dcddSMagnus Karlsson 	}
7721328dcddSMagnus Karlsson 
7731328dcddSMagnus Karlsson 	tx_ring->next_to_clean += completed_frames;
7741328dcddSMagnus Karlsson 	if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
7751328dcddSMagnus Karlsson 		tx_ring->next_to_clean -= tx_ring->count;
7761328dcddSMagnus Karlsson 
7771328dcddSMagnus Karlsson 	if (xsk_frames)
7781328dcddSMagnus Karlsson 		xsk_umem_complete_tx(umem, xsk_frames);
7791328dcddSMagnus Karlsson 
7801328dcddSMagnus Karlsson 	i40e_arm_wb(tx_ring, vsi, budget);
7811328dcddSMagnus Karlsson 	i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
7821328dcddSMagnus Karlsson 
7831328dcddSMagnus Karlsson out_xmit:
78470563957SMagnus Karlsson 	if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
7853d0c5f1cSMagnus Karlsson 		xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
7863d0c5f1cSMagnus Karlsson 
7871328dcddSMagnus Karlsson 	xmit_done = i40e_xmit_zc(tx_ring, budget);
7881328dcddSMagnus Karlsson 
7891328dcddSMagnus Karlsson 	return work_done && xmit_done;
7901328dcddSMagnus Karlsson }
7911328dcddSMagnus Karlsson 
7921328dcddSMagnus Karlsson /**
7939116e5e2SMagnus Karlsson  * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
7941328dcddSMagnus Karlsson  * @dev: the netdevice
7951328dcddSMagnus Karlsson  * @queue_id: queue id to wake up
7969116e5e2SMagnus Karlsson  * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
7971328dcddSMagnus Karlsson  *
7981328dcddSMagnus Karlsson  * Returns <0 for errors, 0 otherwise.
7991328dcddSMagnus Karlsson  **/
8009116e5e2SMagnus Karlsson int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
8011328dcddSMagnus Karlsson {
8021328dcddSMagnus Karlsson 	struct i40e_netdev_priv *np = netdev_priv(dev);
8031328dcddSMagnus Karlsson 	struct i40e_vsi *vsi = np->vsi;
804b3873a5bSMaxim Mikityanskiy 	struct i40e_pf *pf = vsi->back;
8051328dcddSMagnus Karlsson 	struct i40e_ring *ring;
8061328dcddSMagnus Karlsson 
807b3873a5bSMaxim Mikityanskiy 	if (test_bit(__I40E_CONFIG_BUSY, pf->state))
808c77e9f09SMaciej Fijalkowski 		return -EAGAIN;
809b3873a5bSMaxim Mikityanskiy 
8101328dcddSMagnus Karlsson 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
8111328dcddSMagnus Karlsson 		return -ENETDOWN;
8121328dcddSMagnus Karlsson 
8131328dcddSMagnus Karlsson 	if (!i40e_enabled_xdp_vsi(vsi))
8141328dcddSMagnus Karlsson 		return -ENXIO;
8151328dcddSMagnus Karlsson 
8161328dcddSMagnus Karlsson 	if (queue_id >= vsi->num_queue_pairs)
8171328dcddSMagnus Karlsson 		return -ENXIO;
8181328dcddSMagnus Karlsson 
8191328dcddSMagnus Karlsson 	if (!vsi->xdp_rings[queue_id]->xsk_umem)
8201328dcddSMagnus Karlsson 		return -ENXIO;
8211328dcddSMagnus Karlsson 
8221328dcddSMagnus Karlsson 	ring = vsi->xdp_rings[queue_id];
8231328dcddSMagnus Karlsson 
8241328dcddSMagnus Karlsson 	/* The idea here is that if NAPI is running, mark a miss, so
8251328dcddSMagnus Karlsson 	 * it will run again. If not, trigger an interrupt and
8261328dcddSMagnus Karlsson 	 * schedule the NAPI from interrupt context. If NAPI would be
8271328dcddSMagnus Karlsson 	 * scheduled here, the interrupt affinity would not be
8281328dcddSMagnus Karlsson 	 * honored.
8291328dcddSMagnus Karlsson 	 */
8301328dcddSMagnus Karlsson 	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
8311328dcddSMagnus Karlsson 		i40e_force_wb(vsi, ring->q_vector);
8321328dcddSMagnus Karlsson 
8331328dcddSMagnus Karlsson 	return 0;
8341328dcddSMagnus Karlsson }
8359dbb1370SBjörn Töpel 
836411dc16fSBjörn Töpel void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
837411dc16fSBjörn Töpel {
838411dc16fSBjörn Töpel 	u16 i;
839411dc16fSBjörn Töpel 
840411dc16fSBjörn Töpel 	for (i = 0; i < rx_ring->count; i++) {
841*be1222b5SBjörn Töpel 		struct i40e_rx_buffer_zc *rx_bi = i40e_rx_bi(rx_ring, i);
842411dc16fSBjörn Töpel 
843411dc16fSBjörn Töpel 		if (!rx_bi->addr)
844411dc16fSBjörn Töpel 			continue;
845411dc16fSBjörn Töpel 
846411dc16fSBjörn Töpel 		xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
847411dc16fSBjörn Töpel 		rx_bi->addr = NULL;
848411dc16fSBjörn Töpel 	}
849411dc16fSBjörn Töpel }
850411dc16fSBjörn Töpel 
8519dbb1370SBjörn Töpel /**
8529dbb1370SBjörn Töpel  * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
8539dbb1370SBjörn Töpel  * @xdp_ring: XDP Tx ring
8549dbb1370SBjörn Töpel  **/
8559dbb1370SBjörn Töpel void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
8569dbb1370SBjörn Töpel {
8579dbb1370SBjörn Töpel 	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
8589dbb1370SBjörn Töpel 	struct xdp_umem *umem = tx_ring->xsk_umem;
8599dbb1370SBjörn Töpel 	struct i40e_tx_buffer *tx_bi;
8609dbb1370SBjörn Töpel 	u32 xsk_frames = 0;
8619dbb1370SBjörn Töpel 
8629dbb1370SBjörn Töpel 	while (ntc != ntu) {
8639dbb1370SBjörn Töpel 		tx_bi = &tx_ring->tx_bi[ntc];
8649dbb1370SBjörn Töpel 
8659dbb1370SBjörn Töpel 		if (tx_bi->xdpf)
8669dbb1370SBjörn Töpel 			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
8679dbb1370SBjörn Töpel 		else
8689dbb1370SBjörn Töpel 			xsk_frames++;
8699dbb1370SBjörn Töpel 
8709dbb1370SBjörn Töpel 		tx_bi->xdpf = NULL;
8719dbb1370SBjörn Töpel 
8729dbb1370SBjörn Töpel 		ntc++;
8739dbb1370SBjörn Töpel 		if (ntc >= tx_ring->count)
8749dbb1370SBjörn Töpel 			ntc = 0;
8759dbb1370SBjörn Töpel 	}
8769dbb1370SBjörn Töpel 
8779dbb1370SBjörn Töpel 	if (xsk_frames)
8789dbb1370SBjörn Töpel 		xsk_umem_complete_tx(umem, xsk_frames);
8799dbb1370SBjörn Töpel }
8803ab52af5SBjörn Töpel 
8813ab52af5SBjörn Töpel /**
8823ab52af5SBjörn Töpel  * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
8833ab52af5SBjörn Töpel  * @vsi: vsi
8843ab52af5SBjörn Töpel  *
8853ab52af5SBjörn Töpel  * Returns true if any of the Rx rings has an AF_XDP UMEM attached
8863ab52af5SBjörn Töpel  **/
8873ab52af5SBjörn Töpel bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
8883ab52af5SBjörn Töpel {
889f3fef2b6SJan Sokolowski 	struct net_device *netdev = vsi->netdev;
8903ab52af5SBjörn Töpel 	int i;
8913ab52af5SBjörn Töpel 
8923ab52af5SBjörn Töpel 	for (i = 0; i < vsi->num_queue_pairs; i++) {
893f3fef2b6SJan Sokolowski 		if (xdp_get_umem_from_qid(netdev, i))
8943ab52af5SBjörn Töpel 			return true;
8953ab52af5SBjörn Töpel 	}
8963ab52af5SBjörn Töpel 
8973ab52af5SBjörn Töpel 	return false;
8983ab52af5SBjörn Töpel }
899