xref: /linux/drivers/net/ethernet/intel/idpf/xdp.h (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #ifndef _IDPF_XDP_H_
5 #define _IDPF_XDP_H_
6 
7 #include <net/libeth/xdp.h>
8 
9 #include "idpf_txrx.h"
10 
11 int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
12 int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc);
13 void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
14 void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc);
15 void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
16 			       struct bpf_prog *xdp_prog);
17 
18 int idpf_xdpsqs_get(const struct idpf_vport *vport);
19 void idpf_xdpsqs_put(const struct idpf_vport *vport);
20 
21 u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget);
22 bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags);
23 
24 /**
25  * idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc
26  * @desc: XDP descriptor to pull the DMA address and length from
27  * @i: descriptor index on the queue to fill
28  * @sq: XDP queue to produce the HW Tx descriptor on
29  * @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL
30  */
31 static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i,
32 				    const struct libeth_xdpsq *sq, u64 priv)
33 {
34 	struct idpf_flex_tx_desc *tx_desc = sq->descs;
35 	u32 cmd;
36 
37 	cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M,
38 			 IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2);
39 	if (desc.flags & LIBETH_XDP_TX_LAST)
40 		cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
41 				  IDPF_TX_DESC_CMD_EOP);
42 	if (priv && (desc.flags & LIBETH_XDP_TX_CSUM))
43 		cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
44 				  IDPF_TX_FLEX_DESC_CMD_CS_EN);
45 
46 	tx_desc = &tx_desc[i];
47 	tx_desc->buf_addr = cpu_to_le64(desc.addr);
48 #ifdef __LIBETH_WORD_ACCESS
49 	*(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd;
50 #else
51 	tx_desc->qw1.buf_size = cpu_to_le16(desc.len);
52 	tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd);
53 #endif
54 }
55 
56 static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq)
57 {
58 	u32 ntu, cmd;
59 
60 	ntu = xdpsq->next_to_use;
61 	if (unlikely(!ntu))
62 		ntu = xdpsq->desc_count;
63 
64 	cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS);
65 #ifdef __LIBETH_WORD_ACCESS
66 	*(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
67 #else
68 	xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
69 #endif
70 }
71 
72 static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq)
73 {
74 	dma_wmb();
75 	writel_relaxed(xdpsq->next_to_use, xdpsq->tail);
76 }
77 
78 /**
79  * idpf_xdp_tx_finalize - finalize sending over XDPSQ
80  * @_xdpsq: XDP Tx queue
81  * @sent: whether any frames were sent
82  * @flush: whether to update RS bit and the tail register
83  *
84  * Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer.
85  * To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc.
86  */
87 static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
88 {
89 	struct idpf_tx_queue *xdpsq = _xdpsq;
90 
91 	if ((!flush || unlikely(!sent)) &&
92 	    likely(xdpsq->desc_count - 1 != xdpsq->pending))
93 		return;
94 
95 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
96 
97 	idpf_xdpsq_set_rs(xdpsq);
98 	idpf_xdpsq_update_tail(xdpsq);
99 
100 	libeth_xdpsq_queue_timer(xdpsq->timer);
101 
102 	libeth_xdpsq_unlock(&xdpsq->xdp_lock);
103 }
104 
105 struct idpf_xdp_rx_desc {
106 	aligned_u64		qw0;
107 #define IDPF_XDP_RX_BUFQ	BIT_ULL(47)
108 #define IDPF_XDP_RX_GEN		BIT_ULL(46)
109 #define IDPF_XDP_RX_LEN		GENMASK_ULL(45, 32)
110 #define IDPF_XDP_RX_PT		GENMASK_ULL(25, 16)
111 
112 	aligned_u64		qw1;
113 #define IDPF_XDP_RX_BUF		GENMASK_ULL(47, 32)
114 #define IDPF_XDP_RX_EOP		BIT_ULL(1)
115 #define IDPF_XDP_RX_TS_LOW	GENMASK_ULL(31, 24)
116 
117 	aligned_u64		qw2;
118 #define IDPF_XDP_RX_HASH	GENMASK_ULL(31, 0)
119 
120 	aligned_u64		qw3;
121 #define IDPF_XDP_RX_TS_HIGH	GENMASK_ULL(63, 32)
122 } __aligned(4 * sizeof(u64));
123 static_assert(sizeof(struct idpf_xdp_rx_desc) ==
124 	      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
125 
126 #define idpf_xdp_rx_bufq(desc)	!!((desc)->qw0 & IDPF_XDP_RX_BUFQ)
127 #define idpf_xdp_rx_gen(desc)	!!((desc)->qw0 & IDPF_XDP_RX_GEN)
128 #define idpf_xdp_rx_len(desc)	FIELD_GET(IDPF_XDP_RX_LEN, (desc)->qw0)
129 #define idpf_xdp_rx_pt(desc)	FIELD_GET(IDPF_XDP_RX_PT, (desc)->qw0)
130 #define idpf_xdp_rx_buf(desc)	FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
131 #define idpf_xdp_rx_eop(desc)	!!((desc)->qw1 & IDPF_XDP_RX_EOP)
132 #define idpf_xdp_rx_hash(desc)	FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
133 #define idpf_xdp_rx_ts_low(desc)	FIELD_GET(IDPF_XDP_RX_TS_LOW, (desc)->qw1)
134 #define idpf_xdp_rx_ts_high(desc)	FIELD_GET(IDPF_XDP_RX_TS_HIGH, (desc)->qw3)
135 
136 static inline void
137 idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
138 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
139 {
140 #ifdef __LIBETH_WORD_ACCESS
141 	desc->qw0 = ((const typeof(desc))rxd)->qw0;
142 #else
143 	desc->qw0 = ((u64)le16_to_cpu(rxd->pktlen_gen_bufq_id) << 32) |
144 		    ((u64)le16_to_cpu(rxd->ptype_err_fflags0) << 16);
145 #endif
146 }
147 
148 static inline void
149 idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
150 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
151 {
152 #ifdef __LIBETH_WORD_ACCESS
153 	desc->qw1 = ((const typeof(desc))rxd)->qw1;
154 #else
155 	desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
156 		    ((u64)rxd->ts_low << 24) |
157 		    ((u64)rxd->fflags1 << 16) |
158 		    ((u64)rxd->status_err1 << 8) |
159 		    rxd->status_err0_qw1;
160 #endif
161 }
162 
163 static inline void
164 idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
165 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
166 {
167 #ifdef __LIBETH_WORD_ACCESS
168 	desc->qw2 = ((const typeof(desc))rxd)->qw2;
169 #else
170 	desc->qw2 = ((u64)rxd->hash3 << 24) |
171 		    ((u64)rxd->ff2_mirrid_hash2.hash2 << 16) |
172 		    le16_to_cpu(rxd->hash1);
173 #endif
174 }
175 
176 static inline void
177 idpf_xdp_get_qw3(struct idpf_xdp_rx_desc *desc,
178 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
179 {
180 #ifdef __LIBETH_WORD_ACCESS
181 	desc->qw3 = ((const typeof(desc))rxd)->qw3;
182 #else
183 	desc->qw3 = ((u64)le32_to_cpu(rxd->ts_high) << 32) |
184 		    ((u64)le16_to_cpu(rxd->fmd6) << 16) |
185 		    le16_to_cpu(rxd->l2tag1);
186 #endif
187 }
188 
189 void idpf_xdp_set_features(const struct idpf_vport *vport);
190 
191 int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
192 int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
193 		  u32 flags);
194 
195 #endif /* _IDPF_XDP_H_ */
196