xref: /linux/drivers/net/ethernet/intel/idpf/xdp.h (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #ifndef _IDPF_XDP_H_
5 #define _IDPF_XDP_H_
6 
7 #include <net/libeth/xdp.h>
8 
9 #include "idpf_txrx.h"
10 
11 int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
12 int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
13 void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
14 void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
15 void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
16 			       struct bpf_prog *xdp_prog);
17 
18 int idpf_xdpsqs_get(const struct idpf_vport *vport);
19 void idpf_xdpsqs_put(const struct idpf_vport *vport);
20 
21 u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget);
22 bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags);
23 
24 /**
25  * idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc
26  * @desc: XDP descriptor to pull the DMA address and length from
27  * @i: descriptor index on the queue to fill
28  * @sq: XDP queue to produce the HW Tx descriptor on
29  * @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL
30  */
31 static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i,
32 				    const struct libeth_xdpsq *sq, u64 priv)
33 {
34 	struct idpf_flex_tx_desc *tx_desc = sq->descs;
35 	u32 cmd;
36 
37 	cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M,
38 			 IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2);
39 	if (desc.flags & LIBETH_XDP_TX_LAST)
40 		cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
41 				  IDPF_TX_DESC_CMD_EOP);
42 	if (priv && (desc.flags & LIBETH_XDP_TX_CSUM))
43 		cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
44 				  IDPF_TX_FLEX_DESC_CMD_CS_EN);
45 
46 	tx_desc = &tx_desc[i];
47 	tx_desc->buf_addr = cpu_to_le64(desc.addr);
48 #ifdef __LIBETH_WORD_ACCESS
49 	*(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd;
50 #else
51 	tx_desc->qw1.buf_size = cpu_to_le16(desc.len);
52 	tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd);
53 #endif
54 }
55 
56 static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq)
57 {
58 	u32 ntu, cmd;
59 
60 	ntu = xdpsq->next_to_use;
61 	if (unlikely(!ntu))
62 		ntu = xdpsq->desc_count;
63 
64 	cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS);
65 #ifdef __LIBETH_WORD_ACCESS
66 	*(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
67 #else
68 	xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
69 #endif
70 }
71 
72 static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq)
73 {
74 	dma_wmb();
75 	writel_relaxed(xdpsq->next_to_use, xdpsq->tail);
76 }
77 
78 /**
79  * idpf_xdp_tx_finalize - finalize sending over XDPSQ
80  * @_xdpsq: XDP Tx queue
81  * @sent: whether any frames were sent
82  * @flush: whether to update RS bit and the tail register
83  *
84  * Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer.
85  * To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc.
86  */
87 static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
88 {
89 	struct idpf_tx_queue *xdpsq = _xdpsq;
90 
91 	if ((!flush || unlikely(!sent)) &&
92 	    likely(xdpsq->desc_count - 1 != xdpsq->pending))
93 		return;
94 
95 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
96 
97 	idpf_xdpsq_set_rs(xdpsq);
98 	idpf_xdpsq_update_tail(xdpsq);
99 
100 	libeth_xdpsq_queue_timer(xdpsq->timer);
101 
102 	libeth_xdpsq_unlock(&xdpsq->xdp_lock);
103 }
104 
105 struct idpf_xdp_rx_desc {
106 	aligned_u64		qw0;
107 #define IDPF_XDP_RX_BUFQ	BIT_ULL(47)
108 #define IDPF_XDP_RX_GEN		BIT_ULL(46)
109 #define IDPF_XDP_RX_LEN		GENMASK_ULL(45, 32)
110 #define IDPF_XDP_RX_PT		GENMASK_ULL(25, 16)
111 
112 	aligned_u64		qw1;
113 #define IDPF_XDP_RX_BUF		GENMASK_ULL(47, 32)
114 #define IDPF_XDP_RX_EOP		BIT_ULL(1)
115 
116 	aligned_u64		qw2;
117 #define IDPF_XDP_RX_HASH	GENMASK_ULL(31, 0)
118 
119 	aligned_u64		qw3;
120 } __aligned(4 * sizeof(u64));
121 static_assert(sizeof(struct idpf_xdp_rx_desc) ==
122 	      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
123 
124 #define idpf_xdp_rx_bufq(desc)	!!((desc)->qw0 & IDPF_XDP_RX_BUFQ)
125 #define idpf_xdp_rx_gen(desc)	!!((desc)->qw0 & IDPF_XDP_RX_GEN)
126 #define idpf_xdp_rx_len(desc)	FIELD_GET(IDPF_XDP_RX_LEN, (desc)->qw0)
127 #define idpf_xdp_rx_pt(desc)	FIELD_GET(IDPF_XDP_RX_PT, (desc)->qw0)
128 #define idpf_xdp_rx_buf(desc)	FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
129 #define idpf_xdp_rx_eop(desc)	!!((desc)->qw1 & IDPF_XDP_RX_EOP)
130 #define idpf_xdp_rx_hash(desc)	FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
131 
132 static inline void
133 idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
134 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
135 {
136 #ifdef __LIBETH_WORD_ACCESS
137 	desc->qw0 = ((const typeof(desc))rxd)->qw0;
138 #else
139 	desc->qw0 = ((u64)le16_to_cpu(rxd->pktlen_gen_bufq_id) << 32) |
140 		    ((u64)le16_to_cpu(rxd->ptype_err_fflags0) << 16);
141 #endif
142 }
143 
144 static inline void
145 idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
146 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
147 {
148 #ifdef __LIBETH_WORD_ACCESS
149 	desc->qw1 = ((const typeof(desc))rxd)->qw1;
150 #else
151 	desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
152 		    rxd->status_err0_qw1;
153 #endif
154 }
155 
156 static inline void
157 idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
158 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
159 {
160 #ifdef __LIBETH_WORD_ACCESS
161 	desc->qw2 = ((const typeof(desc))rxd)->qw2;
162 #else
163 	desc->qw2 = ((u64)rxd->hash3 << 24) |
164 		    ((u64)rxd->ff2_mirrid_hash2.hash2 << 16) |
165 		    le16_to_cpu(rxd->hash1);
166 #endif
167 }
168 
169 void idpf_xdp_set_features(const struct idpf_vport *vport);
170 
171 int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
172 int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
173 		  u32 flags);
174 
175 #endif /* _IDPF_XDP_H_ */
176