xref: /linux/drivers/net/ethernet/intel/idpf/xdp.h (revision 7a3aaaa9fce710938c3557e5708ba5b00dd38226)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2025 Intel Corporation */
3 
4 #ifndef _IDPF_XDP_H_
5 #define _IDPF_XDP_H_
6 
7 #include <net/libeth/xdp.h>
8 
9 #include "idpf_txrx.h"
10 
11 int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
12 void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
13 void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
14 			       struct bpf_prog *xdp_prog);
15 
16 int idpf_xdpsqs_get(const struct idpf_vport *vport);
17 void idpf_xdpsqs_put(const struct idpf_vport *vport);
18 
19 bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags);
20 
21 /**
22  * idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc
23  * @desc: XDP descriptor to pull the DMA address and length from
24  * @i: descriptor index on the queue to fill
25  * @sq: XDP queue to produce the HW Tx descriptor on
26  * @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL
27  */
28 static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i,
29 				    const struct libeth_xdpsq *sq, u64 priv)
30 {
31 	struct idpf_flex_tx_desc *tx_desc = sq->descs;
32 	u32 cmd;
33 
34 	cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M,
35 			 IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2);
36 	if (desc.flags & LIBETH_XDP_TX_LAST)
37 		cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
38 				  IDPF_TX_DESC_CMD_EOP);
39 	if (priv && (desc.flags & LIBETH_XDP_TX_CSUM))
40 		cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
41 				  IDPF_TX_FLEX_DESC_CMD_CS_EN);
42 
43 	tx_desc = &tx_desc[i];
44 	tx_desc->buf_addr = cpu_to_le64(desc.addr);
45 #ifdef __LIBETH_WORD_ACCESS
46 	*(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd;
47 #else
48 	tx_desc->qw1.buf_size = cpu_to_le16(desc.len);
49 	tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd);
50 #endif
51 }
52 
53 static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq)
54 {
55 	u32 ntu, cmd;
56 
57 	ntu = xdpsq->next_to_use;
58 	if (unlikely(!ntu))
59 		ntu = xdpsq->desc_count;
60 
61 	cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS);
62 #ifdef __LIBETH_WORD_ACCESS
63 	*(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
64 #else
65 	xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
66 #endif
67 }
68 
69 static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq)
70 {
71 	dma_wmb();
72 	writel_relaxed(xdpsq->next_to_use, xdpsq->tail);
73 }
74 
75 /**
76  * idpf_xdp_tx_finalize - finalize sending over XDPSQ
77  * @_xdpsq: XDP Tx queue
78  * @sent: whether any frames were sent
79  * @flush: whether to update RS bit and the tail register
80  *
81  * Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer.
82  * To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc.
83  */
84 static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
85 {
86 	struct idpf_tx_queue *xdpsq = _xdpsq;
87 
88 	if ((!flush || unlikely(!sent)) &&
89 	    likely(xdpsq->desc_count - 1 != xdpsq->pending))
90 		return;
91 
92 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
93 
94 	idpf_xdpsq_set_rs(xdpsq);
95 	idpf_xdpsq_update_tail(xdpsq);
96 
97 	libeth_xdpsq_queue_timer(xdpsq->timer);
98 
99 	libeth_xdpsq_unlock(&xdpsq->xdp_lock);
100 }
101 
102 struct idpf_xdp_rx_desc {
103 	aligned_u64		qw0;
104 #define IDPF_XDP_RX_BUFQ	BIT_ULL(47)
105 #define IDPF_XDP_RX_GEN		BIT_ULL(46)
106 #define IDPF_XDP_RX_LEN		GENMASK_ULL(45, 32)
107 #define IDPF_XDP_RX_PT		GENMASK_ULL(25, 16)
108 
109 	aligned_u64		qw1;
110 #define IDPF_XDP_RX_BUF		GENMASK_ULL(47, 32)
111 #define IDPF_XDP_RX_EOP		BIT_ULL(1)
112 
113 	aligned_u64		qw2;
114 #define IDPF_XDP_RX_HASH	GENMASK_ULL(31, 0)
115 
116 	aligned_u64		qw3;
117 } __aligned(4 * sizeof(u64));
118 static_assert(sizeof(struct idpf_xdp_rx_desc) ==
119 	      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
120 
121 #define idpf_xdp_rx_bufq(desc)	!!((desc)->qw0 & IDPF_XDP_RX_BUFQ)
122 #define idpf_xdp_rx_gen(desc)	!!((desc)->qw0 & IDPF_XDP_RX_GEN)
123 #define idpf_xdp_rx_len(desc)	FIELD_GET(IDPF_XDP_RX_LEN, (desc)->qw0)
124 #define idpf_xdp_rx_pt(desc)	FIELD_GET(IDPF_XDP_RX_PT, (desc)->qw0)
125 #define idpf_xdp_rx_buf(desc)	FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
126 #define idpf_xdp_rx_eop(desc)	!!((desc)->qw1 & IDPF_XDP_RX_EOP)
127 #define idpf_xdp_rx_hash(desc)	FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
128 
129 static inline void
130 idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
131 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
132 {
133 #ifdef __LIBETH_WORD_ACCESS
134 	desc->qw0 = ((const typeof(desc))rxd)->qw0;
135 #else
136 	desc->qw0 = ((u64)le16_to_cpu(rxd->pktlen_gen_bufq_id) << 32) |
137 		    ((u64)le16_to_cpu(rxd->ptype_err_fflags0) << 16);
138 #endif
139 }
140 
141 static inline void
142 idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
143 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
144 {
145 #ifdef __LIBETH_WORD_ACCESS
146 	desc->qw1 = ((const typeof(desc))rxd)->qw1;
147 #else
148 	desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
149 		    rxd->status_err0_qw1;
150 #endif
151 }
152 
153 static inline void
154 idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
155 		 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
156 {
157 #ifdef __LIBETH_WORD_ACCESS
158 	desc->qw2 = ((const typeof(desc))rxd)->qw2;
159 #else
160 	desc->qw2 = ((u64)rxd->hash3 << 24) |
161 		    ((u64)rxd->ff2_mirrid_hash2.hash2 << 16) |
162 		    le16_to_cpu(rxd->hash1);
163 #endif
164 }
165 
166 void idpf_xdp_set_features(const struct idpf_vport *vport);
167 
168 int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
169 int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
170 		  u32 flags);
171 
172 #endif /* _IDPF_XDP_H_ */
173