xref: /linux/drivers/net/ethernet/intel/idpf/xdp.h (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
1ac8a861fSMichal Kubiak /* SPDX-License-Identifier: GPL-2.0-only */
2ac8a861fSMichal Kubiak /* Copyright (C) 2025 Intel Corporation */
3ac8a861fSMichal Kubiak 
4ac8a861fSMichal Kubiak #ifndef _IDPF_XDP_H_
5ac8a861fSMichal Kubiak #define _IDPF_XDP_H_
6ac8a861fSMichal Kubiak 
7cba102cdSAlexander Lobakin #include <net/libeth/xdp.h>
8ac8a861fSMichal Kubiak 
9cba102cdSAlexander Lobakin #include "idpf_txrx.h"
10ac8a861fSMichal Kubiak 
113d57b2c0SMichal Kubiak int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
12ac8a861fSMichal Kubiak int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
133d57b2c0SMichal Kubiak void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
14ac8a861fSMichal Kubiak void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
15705457e7SMichal Kubiak void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
16705457e7SMichal Kubiak 			       struct bpf_prog *xdp_prog);
17ac8a861fSMichal Kubiak 
18ac8a861fSMichal Kubiak int idpf_xdpsqs_get(const struct idpf_vport *vport);
19ac8a861fSMichal Kubiak void idpf_xdpsqs_put(const struct idpf_vport *vport);
20ac8a861fSMichal Kubiak 
21*8ff6d622SAlexander Lobakin u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget);
22cba102cdSAlexander Lobakin bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags);
23cba102cdSAlexander Lobakin 
24cba102cdSAlexander Lobakin /**
25cba102cdSAlexander Lobakin  * idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc
26cba102cdSAlexander Lobakin  * @desc: XDP descriptor to pull the DMA address and length from
27cba102cdSAlexander Lobakin  * @i: descriptor index on the queue to fill
28cba102cdSAlexander Lobakin  * @sq: XDP queue to produce the HW Tx descriptor on
29cba102cdSAlexander Lobakin  * @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL
30cba102cdSAlexander Lobakin  */
31cba102cdSAlexander Lobakin static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i,
32cba102cdSAlexander Lobakin 				    const struct libeth_xdpsq *sq, u64 priv)
33cba102cdSAlexander Lobakin {
34cba102cdSAlexander Lobakin 	struct idpf_flex_tx_desc *tx_desc = sq->descs;
35cba102cdSAlexander Lobakin 	u32 cmd;
36cba102cdSAlexander Lobakin 
37cba102cdSAlexander Lobakin 	cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M,
38cba102cdSAlexander Lobakin 			 IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2);
39cba102cdSAlexander Lobakin 	if (desc.flags & LIBETH_XDP_TX_LAST)
40cba102cdSAlexander Lobakin 		cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
41cba102cdSAlexander Lobakin 				  IDPF_TX_DESC_CMD_EOP);
42cba102cdSAlexander Lobakin 	if (priv && (desc.flags & LIBETH_XDP_TX_CSUM))
43cba102cdSAlexander Lobakin 		cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
44cba102cdSAlexander Lobakin 				  IDPF_TX_FLEX_DESC_CMD_CS_EN);
45cba102cdSAlexander Lobakin 
46cba102cdSAlexander Lobakin 	tx_desc = &tx_desc[i];
47cba102cdSAlexander Lobakin 	tx_desc->buf_addr = cpu_to_le64(desc.addr);
48cba102cdSAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS
49cba102cdSAlexander Lobakin 	*(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd;
50cba102cdSAlexander Lobakin #else
51cba102cdSAlexander Lobakin 	tx_desc->qw1.buf_size = cpu_to_le16(desc.len);
52cba102cdSAlexander Lobakin 	tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd);
53cba102cdSAlexander Lobakin #endif
54cba102cdSAlexander Lobakin }
55cba102cdSAlexander Lobakin 
56cba102cdSAlexander Lobakin static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq)
57cba102cdSAlexander Lobakin {
58cba102cdSAlexander Lobakin 	u32 ntu, cmd;
59cba102cdSAlexander Lobakin 
60cba102cdSAlexander Lobakin 	ntu = xdpsq->next_to_use;
61cba102cdSAlexander Lobakin 	if (unlikely(!ntu))
62cba102cdSAlexander Lobakin 		ntu = xdpsq->desc_count;
63cba102cdSAlexander Lobakin 
64cba102cdSAlexander Lobakin 	cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS);
65cba102cdSAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS
66cba102cdSAlexander Lobakin 	*(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
67cba102cdSAlexander Lobakin #else
68cba102cdSAlexander Lobakin 	xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
69cba102cdSAlexander Lobakin #endif
70cba102cdSAlexander Lobakin }
71cba102cdSAlexander Lobakin 
72cba102cdSAlexander Lobakin static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq)
73cba102cdSAlexander Lobakin {
74cba102cdSAlexander Lobakin 	dma_wmb();
75cba102cdSAlexander Lobakin 	writel_relaxed(xdpsq->next_to_use, xdpsq->tail);
76cba102cdSAlexander Lobakin }
77cba102cdSAlexander Lobakin 
78cba102cdSAlexander Lobakin /**
79cba102cdSAlexander Lobakin  * idpf_xdp_tx_finalize - finalize sending over XDPSQ
80cba102cdSAlexander Lobakin  * @_xdpsq: XDP Tx queue
81cba102cdSAlexander Lobakin  * @sent: whether any frames were sent
82cba102cdSAlexander Lobakin  * @flush: whether to update RS bit and the tail register
83cba102cdSAlexander Lobakin  *
84cba102cdSAlexander Lobakin  * Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer.
85cba102cdSAlexander Lobakin  * To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc.
86cba102cdSAlexander Lobakin  */
87cba102cdSAlexander Lobakin static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
88cba102cdSAlexander Lobakin {
89cba102cdSAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
90cba102cdSAlexander Lobakin 
91cba102cdSAlexander Lobakin 	if ((!flush || unlikely(!sent)) &&
92cba102cdSAlexander Lobakin 	    likely(xdpsq->desc_count - 1 != xdpsq->pending))
93cba102cdSAlexander Lobakin 		return;
94cba102cdSAlexander Lobakin 
95cba102cdSAlexander Lobakin 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
96cba102cdSAlexander Lobakin 
97cba102cdSAlexander Lobakin 	idpf_xdpsq_set_rs(xdpsq);
98cba102cdSAlexander Lobakin 	idpf_xdpsq_update_tail(xdpsq);
99cba102cdSAlexander Lobakin 
100cba102cdSAlexander Lobakin 	libeth_xdpsq_queue_timer(xdpsq->timer);
101cba102cdSAlexander Lobakin 
102cba102cdSAlexander Lobakin 	libeth_xdpsq_unlock(&xdpsq->xdp_lock);
103cba102cdSAlexander Lobakin }
104cba102cdSAlexander Lobakin 
10588ca0c73SAlexander Lobakin struct idpf_xdp_rx_desc {
10688ca0c73SAlexander Lobakin 	aligned_u64		qw0;
10788ca0c73SAlexander Lobakin #define IDPF_XDP_RX_BUFQ	BIT_ULL(47)
10888ca0c73SAlexander Lobakin #define IDPF_XDP_RX_GEN		BIT_ULL(46)
10988ca0c73SAlexander Lobakin #define IDPF_XDP_RX_LEN		GENMASK_ULL(45, 32)
11088ca0c73SAlexander Lobakin #define IDPF_XDP_RX_PT		GENMASK_ULL(25, 16)
11188ca0c73SAlexander Lobakin 
11288ca0c73SAlexander Lobakin 	aligned_u64		qw1;
11388ca0c73SAlexander Lobakin #define IDPF_XDP_RX_BUF		GENMASK_ULL(47, 32)
11488ca0c73SAlexander Lobakin #define IDPF_XDP_RX_EOP		BIT_ULL(1)
11588ca0c73SAlexander Lobakin 
11688ca0c73SAlexander Lobakin 	aligned_u64		qw2;
11788ca0c73SAlexander Lobakin #define IDPF_XDP_RX_HASH	GENMASK_ULL(31, 0)
11888ca0c73SAlexander Lobakin 
11988ca0c73SAlexander Lobakin 	aligned_u64		qw3;
12088ca0c73SAlexander Lobakin } __aligned(4 * sizeof(u64));
12188ca0c73SAlexander Lobakin static_assert(sizeof(struct idpf_xdp_rx_desc) ==
12288ca0c73SAlexander Lobakin 	      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
12388ca0c73SAlexander Lobakin 
12488ca0c73SAlexander Lobakin #define idpf_xdp_rx_bufq(desc)	!!((desc)->qw0 & IDPF_XDP_RX_BUFQ)
12588ca0c73SAlexander Lobakin #define idpf_xdp_rx_gen(desc)	!!((desc)->qw0 & IDPF_XDP_RX_GEN)
12688ca0c73SAlexander Lobakin #define idpf_xdp_rx_len(desc)	FIELD_GET(IDPF_XDP_RX_LEN, (desc)->qw0)
12788ca0c73SAlexander Lobakin #define idpf_xdp_rx_pt(desc)	FIELD_GET(IDPF_XDP_RX_PT, (desc)->qw0)
12888ca0c73SAlexander Lobakin #define idpf_xdp_rx_buf(desc)	FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
12988ca0c73SAlexander Lobakin #define idpf_xdp_rx_eop(desc)	!!((desc)->qw1 & IDPF_XDP_RX_EOP)
13088ca0c73SAlexander Lobakin #define idpf_xdp_rx_hash(desc)	FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
13188ca0c73SAlexander Lobakin 
13288ca0c73SAlexander Lobakin static inline void
13388ca0c73SAlexander Lobakin idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
13488ca0c73SAlexander Lobakin 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
13588ca0c73SAlexander Lobakin {
13688ca0c73SAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS
13788ca0c73SAlexander Lobakin 	desc->qw0 = ((const typeof(desc))rxd)->qw0;
13888ca0c73SAlexander Lobakin #else
13988ca0c73SAlexander Lobakin 	desc->qw0 = ((u64)le16_to_cpu(rxd->pktlen_gen_bufq_id) << 32) |
14088ca0c73SAlexander Lobakin 		    ((u64)le16_to_cpu(rxd->ptype_err_fflags0) << 16);
14188ca0c73SAlexander Lobakin #endif
14288ca0c73SAlexander Lobakin }
14388ca0c73SAlexander Lobakin 
14488ca0c73SAlexander Lobakin static inline void
14588ca0c73SAlexander Lobakin idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
14688ca0c73SAlexander Lobakin 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
14788ca0c73SAlexander Lobakin {
14888ca0c73SAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS
14988ca0c73SAlexander Lobakin 	desc->qw1 = ((const typeof(desc))rxd)->qw1;
15088ca0c73SAlexander Lobakin #else
15188ca0c73SAlexander Lobakin 	desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
15288ca0c73SAlexander Lobakin 		    rxd->status_err0_qw1;
15388ca0c73SAlexander Lobakin #endif
15488ca0c73SAlexander Lobakin }
15588ca0c73SAlexander Lobakin 
15688ca0c73SAlexander Lobakin static inline void
15788ca0c73SAlexander Lobakin idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
15888ca0c73SAlexander Lobakin 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
15988ca0c73SAlexander Lobakin {
16088ca0c73SAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS
16188ca0c73SAlexander Lobakin 	desc->qw2 = ((const typeof(desc))rxd)->qw2;
16288ca0c73SAlexander Lobakin #else
16388ca0c73SAlexander Lobakin 	desc->qw2 = ((u64)rxd->hash3 << 24) |
16488ca0c73SAlexander Lobakin 		    ((u64)rxd->ff2_mirrid_hash2.hash2 << 16) |
16588ca0c73SAlexander Lobakin 		    le16_to_cpu(rxd->hash1);
16688ca0c73SAlexander Lobakin #endif
16788ca0c73SAlexander Lobakin }
16888ca0c73SAlexander Lobakin 
169cba102cdSAlexander Lobakin void idpf_xdp_set_features(const struct idpf_vport *vport);
170cba102cdSAlexander Lobakin 
171705457e7SMichal Kubiak int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
172aaa3ac64SAlexander Lobakin int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
173aaa3ac64SAlexander Lobakin 		  u32 flags);
174705457e7SMichal Kubiak 
175ac8a861fSMichal Kubiak #endif /* _IDPF_XDP_H_ */
176