xref: /freebsd/sys/contrib/ena-com/ena_eth_com.h (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef ENA_ETH_COM_H_
35 #define ENA_ETH_COM_H_
36 
37 #if defined(__cplusplus)
38 extern "C" {
39 #endif
40 #include "ena_com.h"
41 
42 /* we allow 2 DMA descriptors per LLQ entry */
43 #define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE	(2 * sizeof(struct ena_eth_io_tx_desc))
44 #define ENA_LLQ_HEADER		(128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
45 #define ENA_LLQ_LARGE_HEADER	(256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
46 
47 struct ena_com_tx_ctx {
48 	struct ena_com_tx_meta ena_meta;
49 	struct ena_com_buf *ena_bufs;
50 	/* For LLQ, header buffer - pushed to the device mem space */
51 	void *push_header;
52 
53 	enum ena_eth_io_l3_proto_index l3_proto;
54 	enum ena_eth_io_l4_proto_index l4_proto;
55 	u16 num_bufs;
56 	u16 req_id;
57 	/* For regular queue, indicate the size of the header
58 	 * For LLQ, indicate the size of the pushed buffer
59 	 */
60 	u16 header_len;
61 
62 	u8 meta_valid;
63 	u8 tso_enable;
64 	u8 l3_csum_enable;
65 	u8 l4_csum_enable;
66 	u8 l4_csum_partial;
67 	u8 df; /* Don't fragment */
68 };
69 
70 struct ena_com_rx_ctx {
71 	struct ena_com_rx_buf_info *ena_bufs;
72 	enum ena_eth_io_l3_proto_index l3_proto;
73 	enum ena_eth_io_l4_proto_index l4_proto;
74 	bool l3_csum_err;
75 	bool l4_csum_err;
76 	u8 l4_csum_checked;
77 	/* fragmented packet */
78 	bool frag;
79 	u32 hash;
80 	u16 descs;
81 	u16 max_bufs;
82 	u8 pkt_offset;
83 };
84 
85 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
86 		       struct ena_com_tx_ctx *ena_tx_ctx,
87 		       int *nb_hw_desc);
88 
89 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
90 		   struct ena_com_io_sq *io_sq,
91 		   struct ena_com_rx_ctx *ena_rx_ctx);
92 
93 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
94 			       struct ena_com_buf *ena_buf,
95 			       u16 req_id);
96 
97 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
98 
99 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
100 				       struct ena_eth_io_intr_reg *intr_reg)
101 {
102 	ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
103 }
104 
105 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
106 {
107 	u16 tail, next_to_comp, cnt;
108 
109 	next_to_comp = io_sq->next_to_comp;
110 	tail = io_sq->tail;
111 	cnt = tail - next_to_comp;
112 
113 	return io_sq->q_depth - 1 - cnt;
114 }
115 
116 /* Check if the submission queue has enough space to hold required_buffers */
117 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
118 						u16 required_buffers)
119 {
120 	int temp;
121 
122 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
123 		return ena_com_free_q_entries(io_sq) >= required_buffers;
124 
125 	/* This calculation doesn't need to be 100% accurate. So to reduce
126 	 * the calculation overhead just Subtract 2 lines from the free descs
127 	 * (one for the header line and one to compensate the devision
128 	 * down calculation.
129 	 */
130 	temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
131 
132 	return ena_com_free_q_entries(io_sq) > temp;
133 }
134 
135 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
136 					     struct ena_com_tx_ctx *ena_tx_ctx)
137 {
138 	if (!ena_tx_ctx->meta_valid)
139 		return false;
140 
141 	return !!memcmp(&io_sq->cached_tx_meta,
142 			&ena_tx_ctx->ena_meta,
143 			sizeof(struct ena_com_tx_meta));
144 }
145 
146 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
147 {
148 	return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
149 	       io_sq->llq_info.max_entries_in_tx_burst > 0;
150 }
151 
152 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
153 					      struct ena_com_tx_ctx *ena_tx_ctx)
154 {
155 	struct ena_com_llq_info *llq_info;
156 	int descs_after_first_entry;
157 	int num_entries_needed = 1;
158 	u16 num_descs;
159 
160 	if (!is_llq_max_tx_burst_exists(io_sq))
161 		return false;
162 
163 	llq_info = &io_sq->llq_info;
164 	num_descs = ena_tx_ctx->num_bufs;
165 
166 	if (llq_info->disable_meta_caching ||
167 	    unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
168 		++num_descs;
169 
170 	if (num_descs > llq_info->descs_num_before_header) {
171 		descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
172 		num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
173 						   llq_info->descs_per_entry);
174 	}
175 
176 	ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
177 		    "Queue: %d num_descs: %d num_entries_needed: %d\n",
178 		    io_sq->qid, num_descs, num_entries_needed);
179 
180 	return num_entries_needed > io_sq->entries_in_tx_burst_left;
181 }
182 
183 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
184 {
185 	u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
186 	u16 tail = io_sq->tail;
187 
188 	ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
189 		    "Write submission queue doorbell for queue: %d tail: %d\n",
190 		    io_sq->qid, tail);
191 
192 	ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
193 
194 	if (is_llq_max_tx_burst_exists(io_sq)) {
195 		ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
196 			    "Reset available entries in tx burst for queue %d to %d\n",
197 			    io_sq->qid, max_entries_in_tx_burst);
198 		io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
199 	}
200 
201 	return 0;
202 }
203 
204 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
205 					    u8 numa_node)
206 {
207 	struct ena_eth_io_numa_node_cfg_reg numa_cfg;
208 
209 	if (!io_cq->numa_node_cfg_reg)
210 		return;
211 
212 	numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
213 		| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
214 
215 	ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
216 }
217 
218 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
219 {
220 	io_sq->next_to_comp += elem;
221 }
222 
223 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
224 {
225 	io_cq->head++;
226 
227 	/* Switch phase bit in case of wrap around */
228 	if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
229 		io_cq->phase ^= 1;
230 }
231 
232 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
233 					     u16 *req_id)
234 {
235 	struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
236 	u8 expected_phase, cdesc_phase;
237 	struct ena_eth_io_tx_cdesc *cdesc;
238 	u16 masked_head;
239 	u8 flags;
240 
241 	masked_head = io_cq->head & (io_cq->q_depth - 1);
242 	expected_phase = io_cq->phase;
243 
244 	cdesc = (struct ena_eth_io_tx_cdesc *)
245 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
246 		(masked_head * io_cq->cdesc_entry_size_in_bytes));
247 
248 	flags = READ_ONCE8(cdesc->flags);
249 
250 	/* When the current completion descriptor phase isn't the same as the
251 	 * expected, it mean that the device still didn't update
252 	 * this completion.
253 	 */
254 	cdesc_phase = flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
255 	if (cdesc_phase != expected_phase)
256 		return ENA_COM_TRY_AGAIN;
257 
258 	if (unlikely((flags & ENA_ETH_IO_TX_CDESC_MBZ6_MASK) &&
259 		      ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
260 		ena_trc_err(dev,
261 			    "Corrupted TX descriptor on q_id: %d, req_id: %u\n",
262 			    io_cq->qid, cdesc->req_id);
263 		return ENA_COM_FAULT;
264 	}
265 
266 	dma_rmb();
267 
268 	*req_id = READ_ONCE16(cdesc->req_id);
269 	if (unlikely(*req_id >= io_cq->q_depth)) {
270 		ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
271 			    "Invalid req id %d\n", cdesc->req_id);
272 		return ENA_COM_INVAL;
273 	}
274 
275 	ena_com_cq_inc_head(io_cq);
276 
277 	return 0;
278 }
279 
280 #if defined(__cplusplus)
281 }
282 #endif
283 #endif /* ENA_ETH_COM_H_ */
284