xref: /linux/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c (revision c771600c6af14749609b49565ffb4cac2959710d)
162d03330SJakub Kicinski // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
262d03330SJakub Kicinski /* Copyright (C) 2018 Netronome Systems, Inc */
362d03330SJakub Kicinski /* Copyright (C) 2021 Corigine, Inc */
462d03330SJakub Kicinski 
562d03330SJakub Kicinski #include <linux/bpf_trace.h>
662d03330SJakub Kicinski #include <linux/netdevice.h>
762d03330SJakub Kicinski 
862d03330SJakub Kicinski #include "../nfp_app.h"
962d03330SJakub Kicinski #include "../nfp_net.h"
1062d03330SJakub Kicinski #include "../nfp_net_dp.h"
1162d03330SJakub Kicinski #include "../nfp_net_xsk.h"
1262d03330SJakub Kicinski #include "nfd3.h"
1362d03330SJakub Kicinski 
1462d03330SJakub Kicinski static bool
1562d03330SJakub Kicinski nfp_nfd3_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
1662d03330SJakub Kicinski 		    struct nfp_net_rx_ring *rx_ring,
1762d03330SJakub Kicinski 		    struct nfp_net_tx_ring *tx_ring,
1862d03330SJakub Kicinski 		    struct nfp_net_xsk_rx_buf *xrxbuf, unsigned int pkt_len,
1962d03330SJakub Kicinski 		    int pkt_off)
2062d03330SJakub Kicinski {
2162d03330SJakub Kicinski 	struct xsk_buff_pool *pool = r_vec->xsk_pool;
2262d03330SJakub Kicinski 	struct nfp_nfd3_tx_buf *txbuf;
2362d03330SJakub Kicinski 	struct nfp_nfd3_tx_desc *txd;
2462d03330SJakub Kicinski 	unsigned int wr_idx;
2562d03330SJakub Kicinski 
2662d03330SJakub Kicinski 	if (nfp_net_tx_space(tx_ring) < 1)
2762d03330SJakub Kicinski 		return false;
2862d03330SJakub Kicinski 
2962d03330SJakub Kicinski 	xsk_buff_raw_dma_sync_for_device(pool, xrxbuf->dma_addr + pkt_off,
3062d03330SJakub Kicinski 					 pkt_len);
3162d03330SJakub Kicinski 
3262d03330SJakub Kicinski 	wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
3362d03330SJakub Kicinski 
3462d03330SJakub Kicinski 	txbuf = &tx_ring->txbufs[wr_idx];
3562d03330SJakub Kicinski 	txbuf->xdp = xrxbuf->xdp;
3662d03330SJakub Kicinski 	txbuf->real_len = pkt_len;
3762d03330SJakub Kicinski 	txbuf->is_xsk_tx = true;
3862d03330SJakub Kicinski 
3962d03330SJakub Kicinski 	/* Build TX descriptor */
4062d03330SJakub Kicinski 	txd = &tx_ring->txds[wr_idx];
4162d03330SJakub Kicinski 	txd->offset_eop = NFD3_DESC_TX_EOP;
4262d03330SJakub Kicinski 	txd->dma_len = cpu_to_le16(pkt_len);
435f30671dSYinjun Zhang 	nfp_desc_set_dma_addr_40b(txd, xrxbuf->dma_addr + pkt_off);
4462d03330SJakub Kicinski 	txd->data_len = cpu_to_le16(pkt_len);
4562d03330SJakub Kicinski 
4662d03330SJakub Kicinski 	txd->flags = 0;
4762d03330SJakub Kicinski 	txd->mss = 0;
4862d03330SJakub Kicinski 	txd->lso_hdrlen = 0;
4962d03330SJakub Kicinski 
5062d03330SJakub Kicinski 	tx_ring->wr_ptr_add++;
5162d03330SJakub Kicinski 	tx_ring->wr_p++;
5262d03330SJakub Kicinski 
5362d03330SJakub Kicinski 	return true;
5462d03330SJakub Kicinski }
5562d03330SJakub Kicinski 
5662d03330SJakub Kicinski static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
5762d03330SJakub Kicinski 				const struct nfp_net_rx_desc *rxd,
5862d03330SJakub Kicinski 				struct nfp_net_xsk_rx_buf *xrxbuf,
5962d03330SJakub Kicinski 				const struct nfp_meta_parsed *meta,
6062d03330SJakub Kicinski 				unsigned int pkt_len,
6162d03330SJakub Kicinski 				bool meta_xdp,
6262d03330SJakub Kicinski 				unsigned int *skbs_polled)
6362d03330SJakub Kicinski {
6462d03330SJakub Kicinski 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
6562d03330SJakub Kicinski 	struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
6662d03330SJakub Kicinski 	struct net_device *netdev;
6762d03330SJakub Kicinski 	struct sk_buff *skb;
6862d03330SJakub Kicinski 
6962d03330SJakub Kicinski 	if (likely(!meta->portid)) {
7062d03330SJakub Kicinski 		netdev = dp->netdev;
7162d03330SJakub Kicinski 	} else {
7262d03330SJakub Kicinski 		struct nfp_net *nn = netdev_priv(dp->netdev);
7362d03330SJakub Kicinski 
7462d03330SJakub Kicinski 		netdev = nfp_app_dev_get(nn->app, meta->portid, NULL);
7562d03330SJakub Kicinski 		if (unlikely(!netdev)) {
7662d03330SJakub Kicinski 			nfp_net_xsk_rx_drop(r_vec, xrxbuf);
7762d03330SJakub Kicinski 			return;
7862d03330SJakub Kicinski 		}
7962d03330SJakub Kicinski 		nfp_repr_inc_rx_stats(netdev, pkt_len);
8062d03330SJakub Kicinski 	}
8162d03330SJakub Kicinski 
8262d03330SJakub Kicinski 	skb = napi_alloc_skb(&r_vec->napi, pkt_len);
8362d03330SJakub Kicinski 	if (!skb) {
8462d03330SJakub Kicinski 		nfp_net_xsk_rx_drop(r_vec, xrxbuf);
8562d03330SJakub Kicinski 		return;
8662d03330SJakub Kicinski 	}
87d49e265bSShang XiaoJing 	skb_put_data(skb, xrxbuf->xdp->data, pkt_len);
8862d03330SJakub Kicinski 
8962d03330SJakub Kicinski 	skb->mark = meta->mark;
9062d03330SJakub Kicinski 	skb_set_hash(skb, meta->hash, meta->hash_type);
9162d03330SJakub Kicinski 
9262d03330SJakub Kicinski 	skb_record_rx_queue(skb, rx_ring->idx);
9362d03330SJakub Kicinski 	skb->protocol = eth_type_trans(skb, netdev);
9462d03330SJakub Kicinski 
9562d03330SJakub Kicinski 	nfp_nfd3_rx_csum(dp, r_vec, rxd, meta, skb);
9662d03330SJakub Kicinski 
9767d2656bSDiana Wang 	if (unlikely(!nfp_net_vlan_strip(skb, rxd, meta))) {
9867d2656bSDiana Wang 		dev_kfree_skb_any(skb);
9967d2656bSDiana Wang 		nfp_net_xsk_rx_drop(r_vec, xrxbuf);
10067d2656bSDiana Wang 		return;
10167d2656bSDiana Wang 	}
10267d2656bSDiana Wang 
10362d03330SJakub Kicinski 	if (meta_xdp)
10462d03330SJakub Kicinski 		skb_metadata_set(skb,
10562d03330SJakub Kicinski 				 xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
10662d03330SJakub Kicinski 
10762d03330SJakub Kicinski 	napi_gro_receive(&rx_ring->r_vec->napi, skb);
10862d03330SJakub Kicinski 
10962d03330SJakub Kicinski 	nfp_net_xsk_rx_free(xrxbuf);
11062d03330SJakub Kicinski 
11162d03330SJakub Kicinski 	(*skbs_polled)++;
11262d03330SJakub Kicinski }
11362d03330SJakub Kicinski 
11462d03330SJakub Kicinski static unsigned int
11562d03330SJakub Kicinski nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
11662d03330SJakub Kicinski 		unsigned int *skbs_polled)
11762d03330SJakub Kicinski {
11862d03330SJakub Kicinski 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
11962d03330SJakub Kicinski 	struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
12062d03330SJakub Kicinski 	struct nfp_net_tx_ring *tx_ring;
12162d03330SJakub Kicinski 	struct bpf_prog *xdp_prog;
12262d03330SJakub Kicinski 	bool xdp_redir = false;
12362d03330SJakub Kicinski 	int pkts_polled = 0;
12462d03330SJakub Kicinski 
12562d03330SJakub Kicinski 	xdp_prog = READ_ONCE(dp->xdp_prog);
12662d03330SJakub Kicinski 	tx_ring = r_vec->xdp_ring;
12762d03330SJakub Kicinski 
12862d03330SJakub Kicinski 	while (pkts_polled < budget) {
12962d03330SJakub Kicinski 		unsigned int meta_len, data_len, pkt_len, pkt_off;
13062d03330SJakub Kicinski 		struct nfp_net_xsk_rx_buf *xrxbuf;
13162d03330SJakub Kicinski 		struct nfp_net_rx_desc *rxd;
13262d03330SJakub Kicinski 		struct nfp_meta_parsed meta;
13362d03330SJakub Kicinski 		int idx, act;
13462d03330SJakub Kicinski 
13562d03330SJakub Kicinski 		idx = D_IDX(rx_ring, rx_ring->rd_p);
13662d03330SJakub Kicinski 
13762d03330SJakub Kicinski 		rxd = &rx_ring->rxds[idx];
13862d03330SJakub Kicinski 		if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
13962d03330SJakub Kicinski 			break;
14062d03330SJakub Kicinski 
14162d03330SJakub Kicinski 		rx_ring->rd_p++;
14262d03330SJakub Kicinski 		pkts_polled++;
14362d03330SJakub Kicinski 
14462d03330SJakub Kicinski 		xrxbuf = &rx_ring->xsk_rxbufs[idx];
14562d03330SJakub Kicinski 
14662d03330SJakub Kicinski 		/* If starved of buffers "drop" it and scream. */
14762d03330SJakub Kicinski 		if (rx_ring->rd_p >= rx_ring->wr_p) {
14862d03330SJakub Kicinski 			nn_dp_warn(dp, "Starved of RX buffers\n");
14962d03330SJakub Kicinski 			nfp_net_xsk_rx_drop(r_vec, xrxbuf);
15062d03330SJakub Kicinski 			break;
15162d03330SJakub Kicinski 		}
15262d03330SJakub Kicinski 
15362d03330SJakub Kicinski 		/* Memory barrier to ensure that we won't do other reads
15462d03330SJakub Kicinski 		 * before the DD bit.
15562d03330SJakub Kicinski 		 */
15662d03330SJakub Kicinski 		dma_rmb();
15762d03330SJakub Kicinski 
15862d03330SJakub Kicinski 		memset(&meta, 0, sizeof(meta));
15962d03330SJakub Kicinski 
16062d03330SJakub Kicinski 		/* Only supporting AF_XDP with dynamic metadata so buffer layout
16162d03330SJakub Kicinski 		 * is always:
16262d03330SJakub Kicinski 		 *
16362d03330SJakub Kicinski 		 *  ---------------------------------------------------------
16462d03330SJakub Kicinski 		 * |  off | metadata  |             packet           | XXXX  |
16562d03330SJakub Kicinski 		 *  ---------------------------------------------------------
16662d03330SJakub Kicinski 		 */
16762d03330SJakub Kicinski 		meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
16862d03330SJakub Kicinski 		data_len = le16_to_cpu(rxd->rxd.data_len);
16962d03330SJakub Kicinski 		pkt_len = data_len - meta_len;
17062d03330SJakub Kicinski 
17162d03330SJakub Kicinski 		if (unlikely(meta_len > NFP_NET_MAX_PREPEND)) {
17262d03330SJakub Kicinski 			nn_dp_warn(dp, "Oversized RX packet metadata %u\n",
17362d03330SJakub Kicinski 				   meta_len);
17462d03330SJakub Kicinski 			nfp_net_xsk_rx_drop(r_vec, xrxbuf);
17562d03330SJakub Kicinski 			continue;
17662d03330SJakub Kicinski 		}
17762d03330SJakub Kicinski 
17862d03330SJakub Kicinski 		/* Stats update. */
17962d03330SJakub Kicinski 		u64_stats_update_begin(&r_vec->rx_sync);
18062d03330SJakub Kicinski 		r_vec->rx_pkts++;
18162d03330SJakub Kicinski 		r_vec->rx_bytes += pkt_len;
18262d03330SJakub Kicinski 		u64_stats_update_end(&r_vec->rx_sync);
18362d03330SJakub Kicinski 
18462d03330SJakub Kicinski 		xrxbuf->xdp->data += meta_len;
18562d03330SJakub Kicinski 		xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
18662d03330SJakub Kicinski 		xdp_set_data_meta_invalid(xrxbuf->xdp);
187*163943acSAlexander Lobakin 		xsk_buff_dma_sync_for_cpu(xrxbuf->xdp);
18862d03330SJakub Kicinski 		net_prefetch(xrxbuf->xdp->data);
18962d03330SJakub Kicinski 
19062d03330SJakub Kicinski 		if (meta_len) {
19162d03330SJakub Kicinski 			if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
19262d03330SJakub Kicinski 							 xrxbuf->xdp->data -
19362d03330SJakub Kicinski 							 meta_len,
19462d03330SJakub Kicinski 							 xrxbuf->xdp->data,
19562d03330SJakub Kicinski 							 pkt_len, meta_len))) {
19662d03330SJakub Kicinski 				nn_dp_warn(dp, "Invalid RX packet metadata\n");
19762d03330SJakub Kicinski 				nfp_net_xsk_rx_drop(r_vec, xrxbuf);
19862d03330SJakub Kicinski 				continue;
19962d03330SJakub Kicinski 			}
20062d03330SJakub Kicinski 
20162d03330SJakub Kicinski 			if (unlikely(meta.portid)) {
20262d03330SJakub Kicinski 				struct nfp_net *nn = netdev_priv(dp->netdev);
20362d03330SJakub Kicinski 
20462d03330SJakub Kicinski 				if (meta.portid != NFP_META_PORT_ID_CTRL) {
20562d03330SJakub Kicinski 					nfp_nfd3_xsk_rx_skb(rx_ring, rxd,
20662d03330SJakub Kicinski 							    xrxbuf, &meta,
20762d03330SJakub Kicinski 							    pkt_len, false,
20862d03330SJakub Kicinski 							    skbs_polled);
20962d03330SJakub Kicinski 					continue;
21062d03330SJakub Kicinski 				}
21162d03330SJakub Kicinski 
21262d03330SJakub Kicinski 				nfp_app_ctrl_rx_raw(nn->app, xrxbuf->xdp->data,
21362d03330SJakub Kicinski 						    pkt_len);
21462d03330SJakub Kicinski 				nfp_net_xsk_rx_free(xrxbuf);
21562d03330SJakub Kicinski 				continue;
21662d03330SJakub Kicinski 			}
21762d03330SJakub Kicinski 		}
21862d03330SJakub Kicinski 
21962d03330SJakub Kicinski 		act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp);
22062d03330SJakub Kicinski 
22162d03330SJakub Kicinski 		pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data;
22262d03330SJakub Kicinski 		pkt_off = xrxbuf->xdp->data - xrxbuf->xdp->data_hard_start;
22362d03330SJakub Kicinski 
22462d03330SJakub Kicinski 		switch (act) {
22562d03330SJakub Kicinski 		case XDP_PASS:
22662d03330SJakub Kicinski 			nfp_nfd3_xsk_rx_skb(rx_ring, rxd, xrxbuf, &meta, pkt_len,
22762d03330SJakub Kicinski 					    true, skbs_polled);
22862d03330SJakub Kicinski 			break;
22962d03330SJakub Kicinski 		case XDP_TX:
23062d03330SJakub Kicinski 			if (!nfp_nfd3_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring,
23162d03330SJakub Kicinski 						 xrxbuf, pkt_len, pkt_off))
23262d03330SJakub Kicinski 				nfp_net_xsk_rx_drop(r_vec, xrxbuf);
23362d03330SJakub Kicinski 			else
23462d03330SJakub Kicinski 				nfp_net_xsk_rx_unstash(xrxbuf);
23562d03330SJakub Kicinski 			break;
23662d03330SJakub Kicinski 		case XDP_REDIRECT:
23762d03330SJakub Kicinski 			if (xdp_do_redirect(dp->netdev, xrxbuf->xdp, xdp_prog)) {
23862d03330SJakub Kicinski 				nfp_net_xsk_rx_drop(r_vec, xrxbuf);
23962d03330SJakub Kicinski 			} else {
24062d03330SJakub Kicinski 				nfp_net_xsk_rx_unstash(xrxbuf);
24162d03330SJakub Kicinski 				xdp_redir = true;
24262d03330SJakub Kicinski 			}
24362d03330SJakub Kicinski 			break;
24462d03330SJakub Kicinski 		default:
24562d03330SJakub Kicinski 			bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
24662d03330SJakub Kicinski 			fallthrough;
24762d03330SJakub Kicinski 		case XDP_ABORTED:
24862d03330SJakub Kicinski 			trace_xdp_exception(dp->netdev, xdp_prog, act);
24962d03330SJakub Kicinski 			fallthrough;
25062d03330SJakub Kicinski 		case XDP_DROP:
25162d03330SJakub Kicinski 			nfp_net_xsk_rx_drop(r_vec, xrxbuf);
25262d03330SJakub Kicinski 			break;
25362d03330SJakub Kicinski 		}
25462d03330SJakub Kicinski 	}
25562d03330SJakub Kicinski 
25662d03330SJakub Kicinski 	nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
25762d03330SJakub Kicinski 
25862d03330SJakub Kicinski 	if (xdp_redir)
2597f04bd10SSebastian Andrzej Siewior 		xdp_do_flush();
26062d03330SJakub Kicinski 
26162d03330SJakub Kicinski 	if (tx_ring->wr_ptr_add)
26262d03330SJakub Kicinski 		nfp_net_tx_xmit_more_flush(tx_ring);
26362d03330SJakub Kicinski 
26462d03330SJakub Kicinski 	return pkts_polled;
26562d03330SJakub Kicinski }
26662d03330SJakub Kicinski 
26762d03330SJakub Kicinski void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf)
26862d03330SJakub Kicinski {
26962d03330SJakub Kicinski 	xsk_buff_free(txbuf->xdp);
27062d03330SJakub Kicinski 
27162d03330SJakub Kicinski 	txbuf->dma_addr = 0;
27262d03330SJakub Kicinski 	txbuf->xdp = NULL;
27362d03330SJakub Kicinski }
27462d03330SJakub Kicinski 
27562d03330SJakub Kicinski static bool nfp_nfd3_xsk_complete(struct nfp_net_tx_ring *tx_ring)
27662d03330SJakub Kicinski {
27762d03330SJakub Kicinski 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
27862d03330SJakub Kicinski 	u32 done_pkts = 0, done_bytes = 0, reused = 0;
27962d03330SJakub Kicinski 	bool done_all;
28062d03330SJakub Kicinski 	int idx, todo;
28162d03330SJakub Kicinski 	u32 qcp_rd_p;
28262d03330SJakub Kicinski 
28362d03330SJakub Kicinski 	if (tx_ring->wr_p == tx_ring->rd_p)
28462d03330SJakub Kicinski 		return true;
28562d03330SJakub Kicinski 
28662d03330SJakub Kicinski 	/* Work out how many descriptors have been transmitted. */
28762d03330SJakub Kicinski 	qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
28862d03330SJakub Kicinski 
28962d03330SJakub Kicinski 	if (qcp_rd_p == tx_ring->qcp_rd_p)
29062d03330SJakub Kicinski 		return true;
29162d03330SJakub Kicinski 
29262d03330SJakub Kicinski 	todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
29362d03330SJakub Kicinski 
29462d03330SJakub Kicinski 	done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
29562d03330SJakub Kicinski 	todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
29662d03330SJakub Kicinski 
29762d03330SJakub Kicinski 	tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
29862d03330SJakub Kicinski 
29962d03330SJakub Kicinski 	done_pkts = todo;
30062d03330SJakub Kicinski 	while (todo--) {
30162d03330SJakub Kicinski 		struct nfp_nfd3_tx_buf *txbuf;
30262d03330SJakub Kicinski 
30362d03330SJakub Kicinski 		idx = D_IDX(tx_ring, tx_ring->rd_p);
30462d03330SJakub Kicinski 		tx_ring->rd_p++;
30562d03330SJakub Kicinski 
30662d03330SJakub Kicinski 		txbuf = &tx_ring->txbufs[idx];
30762d03330SJakub Kicinski 		if (unlikely(!txbuf->real_len))
30862d03330SJakub Kicinski 			continue;
30962d03330SJakub Kicinski 
31062d03330SJakub Kicinski 		done_bytes += txbuf->real_len;
31162d03330SJakub Kicinski 		txbuf->real_len = 0;
31262d03330SJakub Kicinski 
31362d03330SJakub Kicinski 		if (txbuf->is_xsk_tx) {
31462d03330SJakub Kicinski 			nfp_nfd3_xsk_tx_free(txbuf);
31562d03330SJakub Kicinski 			reused++;
31662d03330SJakub Kicinski 		}
31762d03330SJakub Kicinski 	}
31862d03330SJakub Kicinski 
31962d03330SJakub Kicinski 	u64_stats_update_begin(&r_vec->tx_sync);
32062d03330SJakub Kicinski 	r_vec->tx_bytes += done_bytes;
32162d03330SJakub Kicinski 	r_vec->tx_pkts += done_pkts;
32262d03330SJakub Kicinski 	u64_stats_update_end(&r_vec->tx_sync);
32362d03330SJakub Kicinski 
32462d03330SJakub Kicinski 	xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused);
32562d03330SJakub Kicinski 
32662d03330SJakub Kicinski 	WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
32762d03330SJakub Kicinski 		  "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
32862d03330SJakub Kicinski 		  tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
32962d03330SJakub Kicinski 
33062d03330SJakub Kicinski 	return done_all;
33162d03330SJakub Kicinski }
33262d03330SJakub Kicinski 
33362d03330SJakub Kicinski static void nfp_nfd3_xsk_tx(struct nfp_net_tx_ring *tx_ring)
33462d03330SJakub Kicinski {
33562d03330SJakub Kicinski 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
33662d03330SJakub Kicinski 	struct xdp_desc desc[NFP_NET_XSK_TX_BATCH];
33762d03330SJakub Kicinski 	struct xsk_buff_pool *xsk_pool;
33862d03330SJakub Kicinski 	struct nfp_nfd3_tx_desc *txd;
33962d03330SJakub Kicinski 	u32 pkts = 0, wr_idx;
34062d03330SJakub Kicinski 	u32 i, got;
34162d03330SJakub Kicinski 
34262d03330SJakub Kicinski 	xsk_pool = r_vec->xsk_pool;
34362d03330SJakub Kicinski 
34462d03330SJakub Kicinski 	while (nfp_net_tx_space(tx_ring) >= NFP_NET_XSK_TX_BATCH) {
34562d03330SJakub Kicinski 		for (i = 0; i < NFP_NET_XSK_TX_BATCH; i++)
34662d03330SJakub Kicinski 			if (!xsk_tx_peek_desc(xsk_pool, &desc[i]))
34762d03330SJakub Kicinski 				break;
34862d03330SJakub Kicinski 		got = i;
34962d03330SJakub Kicinski 		if (!got)
35062d03330SJakub Kicinski 			break;
35162d03330SJakub Kicinski 
35262d03330SJakub Kicinski 		wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
35362d03330SJakub Kicinski 		prefetchw(&tx_ring->txds[wr_idx]);
35462d03330SJakub Kicinski 
35562d03330SJakub Kicinski 		for (i = 0; i < got; i++)
35662d03330SJakub Kicinski 			xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr,
35762d03330SJakub Kicinski 							 desc[i].len);
35862d03330SJakub Kicinski 
35962d03330SJakub Kicinski 		for (i = 0; i < got; i++) {
36062d03330SJakub Kicinski 			wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
36162d03330SJakub Kicinski 
36262d03330SJakub Kicinski 			tx_ring->txbufs[wr_idx].real_len = desc[i].len;
36362d03330SJakub Kicinski 			tx_ring->txbufs[wr_idx].is_xsk_tx = false;
36462d03330SJakub Kicinski 
36562d03330SJakub Kicinski 			/* Build TX descriptor. */
36662d03330SJakub Kicinski 			txd = &tx_ring->txds[wr_idx];
3675f30671dSYinjun Zhang 			nfp_desc_set_dma_addr_40b(txd,
3685f30671dSYinjun Zhang 						  xsk_buff_raw_get_dma(xsk_pool, desc[i].addr));
36962d03330SJakub Kicinski 			txd->offset_eop = NFD3_DESC_TX_EOP;
37062d03330SJakub Kicinski 			txd->dma_len = cpu_to_le16(desc[i].len);
37162d03330SJakub Kicinski 			txd->data_len = cpu_to_le16(desc[i].len);
37262d03330SJakub Kicinski 		}
37362d03330SJakub Kicinski 
37462d03330SJakub Kicinski 		tx_ring->wr_p += got;
37562d03330SJakub Kicinski 		pkts += got;
37662d03330SJakub Kicinski 	}
37762d03330SJakub Kicinski 
37862d03330SJakub Kicinski 	if (!pkts)
37962d03330SJakub Kicinski 		return;
38062d03330SJakub Kicinski 
38162d03330SJakub Kicinski 	xsk_tx_release(xsk_pool);
38262d03330SJakub Kicinski 	/* Ensure all records are visible before incrementing write counter. */
38362d03330SJakub Kicinski 	wmb();
38462d03330SJakub Kicinski 	nfp_qcp_wr_ptr_add(tx_ring->qcp_q, pkts);
38562d03330SJakub Kicinski }
38662d03330SJakub Kicinski 
38762d03330SJakub Kicinski int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget)
38862d03330SJakub Kicinski {
38962d03330SJakub Kicinski 	struct nfp_net_r_vector *r_vec =
39062d03330SJakub Kicinski 		container_of(napi, struct nfp_net_r_vector, napi);
39162d03330SJakub Kicinski 	unsigned int pkts_polled, skbs = 0;
39262d03330SJakub Kicinski 
39362d03330SJakub Kicinski 	pkts_polled = nfp_nfd3_xsk_rx(r_vec->rx_ring, budget, &skbs);
39462d03330SJakub Kicinski 
39562d03330SJakub Kicinski 	if (pkts_polled < budget) {
39662d03330SJakub Kicinski 		if (r_vec->tx_ring)
39762d03330SJakub Kicinski 			nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
39862d03330SJakub Kicinski 
39962d03330SJakub Kicinski 		if (!nfp_nfd3_xsk_complete(r_vec->xdp_ring))
40062d03330SJakub Kicinski 			pkts_polled = budget;
40162d03330SJakub Kicinski 
40262d03330SJakub Kicinski 		nfp_nfd3_xsk_tx(r_vec->xdp_ring);
40362d03330SJakub Kicinski 
40462d03330SJakub Kicinski 		if (pkts_polled < budget && napi_complete_done(napi, skbs))
40562d03330SJakub Kicinski 			nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
40662d03330SJakub Kicinski 	}
40762d03330SJakub Kicinski 
40862d03330SJakub Kicinski 	return pkts_polled;
40962d03330SJakub Kicinski }
410