xref: /freebsd/sys/contrib/ena-com/ena_eth_com.c (revision f5f8d7c9cdf0341f7c5fdb3a7c3358ec0ed67a0c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "ena_eth_com.h"
35 
ena_com_get_next_rx_cdesc(struct ena_com_io_cq * io_cq)36 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
37 	struct ena_com_io_cq *io_cq)
38 {
39 	struct ena_eth_io_rx_cdesc_base *cdesc;
40 	u16 expected_phase, head_masked;
41 	u16 desc_phase;
42 
43 	head_masked = io_cq->head & (io_cq->q_depth - 1);
44 	expected_phase = io_cq->phase;
45 
46 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
47 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
48 
49 	desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
50 			ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
51 
52 	if (desc_phase != expected_phase)
53 		return NULL;
54 
55 	/* Make sure we read the rest of the descriptor after the phase bit
56 	 * has been read
57 	 */
58 	dma_rmb();
59 
60 	return cdesc;
61 }
62 
get_sq_desc_regular_queue(struct ena_com_io_sq * io_sq)63 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
64 {
65 	u16 tail_masked;
66 	u32 offset;
67 
68 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
69 
70 	offset = tail_masked * io_sq->desc_entry_size;
71 
72 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
73 }
74 
ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq * io_sq,u8 * bounce_buffer)75 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
76 						     u8 *bounce_buffer)
77 {
78 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
79 
80 	u16 dst_tail_mask;
81 	u32 dst_offset;
82 
83 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
84 	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
85 
86 	if (is_llq_max_tx_burst_exists(io_sq)) {
87 		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
88 			ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
89 				    "Error: trying to send more packets than tx burst allows\n");
90 			return ENA_COM_NO_SPACE;
91 		}
92 
93 		io_sq->entries_in_tx_burst_left--;
94 		ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
95 			    "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
96 			    io_sq->qid, io_sq->entries_in_tx_burst_left);
97 	}
98 
99 	/* Make sure everything was written into the bounce buffer before
100 	 * writing the bounce buffer to the device
101 	 */
102 	wmb();
103 
104 	/* The line is completed. Copy it to dev */
105 	ENA_MEMCPY_TO_DEVICE_64(io_sq->bus,
106 				io_sq->desc_addr.pbuf_dev_addr + dst_offset,
107 				bounce_buffer,
108 				llq_info->desc_list_entry_size);
109 
110 	io_sq->tail++;
111 
112 	/* Switch phase bit in case of wrap around */
113 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
114 		io_sq->phase ^= 1;
115 
116 	return ENA_COM_OK;
117 }
118 
ena_com_write_header_to_bounce(struct ena_com_io_sq * io_sq,u8 * header_src,u16 header_len)119 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
120 						 u8 *header_src,
121 						 u16 header_len)
122 {
123 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
124 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
125 	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
126 	u16 header_offset;
127 
128 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
129 		return 0;
130 
131 	header_offset =
132 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
133 
134 	if (unlikely((header_offset + header_len) >  llq_info->desc_list_entry_size)) {
135 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
136 			    "Trying to write header larger than llq entry can accommodate\n");
137 		return ENA_COM_FAULT;
138 	}
139 
140 	if (unlikely(!bounce_buffer)) {
141 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
142 			    "Bounce buffer is NULL\n");
143 		return ENA_COM_FAULT;
144 	}
145 
146 	memcpy(bounce_buffer + header_offset, header_src, header_len);
147 
148 	return 0;
149 }
150 
get_sq_desc_llq(struct ena_com_io_sq * io_sq)151 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
152 {
153 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
154 	u8 *bounce_buffer;
155 	void *sq_desc;
156 
157 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
158 
159 	if (unlikely(!bounce_buffer)) {
160 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
161 			    "Bounce buffer is NULL\n");
162 		return NULL;
163 	}
164 
165 	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
166 	pkt_ctrl->idx++;
167 	pkt_ctrl->descs_left_in_line--;
168 
169 	return sq_desc;
170 }
171 
ena_com_close_bounce_buffer(struct ena_com_io_sq * io_sq)172 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
173 {
174 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
175 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
176 	int rc;
177 
178 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
179 		return ENA_COM_OK;
180 
181 	/* bounce buffer was used, so write it and get a new one */
182 	if (likely(pkt_ctrl->idx)) {
183 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
184 							pkt_ctrl->curr_bounce_buf);
185 		if (unlikely(rc)) {
186 			ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
187 				    "Failed to write bounce buffer to device\n");
188 			return rc;
189 		}
190 
191 		pkt_ctrl->curr_bounce_buf =
192 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
193 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
194 		       0x0, llq_info->desc_list_entry_size);
195 	}
196 
197 	pkt_ctrl->idx = 0;
198 	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
199 	return ENA_COM_OK;
200 }
201 
get_sq_desc(struct ena_com_io_sq * io_sq)202 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
203 {
204 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
205 		return get_sq_desc_llq(io_sq);
206 
207 	return get_sq_desc_regular_queue(io_sq);
208 }
209 
ena_com_sq_update_llq_tail(struct ena_com_io_sq * io_sq)210 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
211 {
212 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
213 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
214 	int rc;
215 
216 	if (!pkt_ctrl->descs_left_in_line) {
217 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
218 							pkt_ctrl->curr_bounce_buf);
219 		if (unlikely(rc)) {
220 			ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
221 				    "Failed to write bounce buffer to device\n");
222 			return rc;
223 		}
224 
225 		pkt_ctrl->curr_bounce_buf =
226 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
227 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
228 		       0x0, llq_info->desc_list_entry_size);
229 
230 		pkt_ctrl->idx = 0;
231 		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
232 			pkt_ctrl->descs_left_in_line = 1;
233 		else
234 			pkt_ctrl->descs_left_in_line =
235 			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
236 	}
237 
238 	return ENA_COM_OK;
239 }
240 
ena_com_sq_update_reqular_queue_tail(struct ena_com_io_sq * io_sq)241 static int ena_com_sq_update_reqular_queue_tail(struct ena_com_io_sq *io_sq)
242 {
243 	io_sq->tail++;
244 
245 	/* Switch phase bit in case of wrap around */
246 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
247 		io_sq->phase ^= 1;
248 
249 	return ENA_COM_OK;
250 }
251 
ena_com_sq_update_tail(struct ena_com_io_sq * io_sq)252 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
253 {
254 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
255 		return ena_com_sq_update_llq_tail(io_sq);
256 
257 	return ena_com_sq_update_reqular_queue_tail(io_sq);
258 }
259 
260 static struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq * io_cq,u16 idx)261 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
262 {
263 	idx &= (io_cq->q_depth - 1);
264 	return (struct ena_eth_io_rx_cdesc_base *)
265 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
266 		idx * io_cq->cdesc_entry_size_in_bytes);
267 }
268 
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq * io_cq,u16 * first_cdesc_idx,u16 * num_descs)269 static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
270 				    u16 *first_cdesc_idx,
271 				    u16 *num_descs)
272 {
273 	struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
274 	u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
275 	struct ena_eth_io_rx_cdesc_base *cdesc;
276 	u32 last = 0;
277 
278 	do {
279 		u32 status;
280 
281 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
282 		if (!cdesc)
283 			break;
284 		status = READ_ONCE32(cdesc->status);
285 
286 		ena_com_cq_inc_head(io_cq);
287 		if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
288 		    ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
289 			ena_trc_err(dev,
290 				    "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
291 				    count, io_cq->qid, cdesc->req_id);
292 			return ENA_COM_FAULT;
293 		}
294 
295 		if (unlikely((status & (ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK |
296 					ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK)) &&
297 			      ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
298 			ena_trc_err(dev,
299 				    "Corrupted RX descriptor #%d on q_id: %d, req_id: %u\n",
300 				    count, io_cq->qid, cdesc->req_id);
301 			return ENA_COM_FAULT;
302 		}
303 
304 		count++;
305 		last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
306 			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
307 	} while (!last);
308 
309 	if (last) {
310 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
311 
312 		head_masked = io_cq->head & (io_cq->q_depth - 1);
313 
314 		*num_descs = count;
315 		io_cq->cur_rx_pkt_cdesc_count = 0;
316 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
317 
318 		ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
319 			    "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
320 			    io_cq->qid, *first_cdesc_idx, count);
321 	} else {
322 		io_cq->cur_rx_pkt_cdesc_count = count;
323 		*num_descs = 0;
324 	}
325 
326 	return ENA_COM_OK;
327 }
328 
ena_com_create_meta(struct ena_com_io_sq * io_sq,struct ena_com_tx_meta * ena_meta)329 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
330 			       struct ena_com_tx_meta *ena_meta)
331 {
332 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
333 
334 	meta_desc = get_sq_desc(io_sq);
335 	if (unlikely(!meta_desc))
336 		return ENA_COM_FAULT;
337 
338 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
339 
340 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
341 
342 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
343 
344 	/* bits 0-9 of the mss */
345 	meta_desc->word2 |= ((u32)ena_meta->mss <<
346 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
347 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
348 	/* bits 10-13 of the mss */
349 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
350 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
351 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
352 
353 	/* Extended meta desc */
354 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
355 	meta_desc->len_ctrl |= ((u32)io_sq->phase <<
356 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
357 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
358 
359 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
360 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
361 
362 	meta_desc->word2 |= ena_meta->l3_hdr_len &
363 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
364 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
365 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
366 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
367 
368 	meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
369 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
370 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
371 
372 	return ena_com_sq_update_tail(io_sq);
373 }
374 
ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,bool * have_meta)375 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
376 						 struct ena_com_tx_ctx *ena_tx_ctx,
377 						 bool *have_meta)
378 {
379 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
380 
381 	/* When disable meta caching is set, don't bother to save the meta and
382 	 * compare it to the stored version, just create the meta
383 	 */
384 	if (io_sq->disable_meta_caching) {
385 		*have_meta = true;
386 		return ena_com_create_meta(io_sq, ena_meta);
387 	}
388 
389 	if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
390 		*have_meta = true;
391 		/* Cache the meta desc */
392 		memcpy(&io_sq->cached_tx_meta, ena_meta,
393 		       sizeof(struct ena_com_tx_meta));
394 		return ena_com_create_meta(io_sq, ena_meta);
395 	}
396 
397 	*have_meta = false;
398 	return ENA_COM_OK;
399 }
400 
ena_com_rx_set_flags(struct ena_com_io_cq * io_cq,struct ena_com_rx_ctx * ena_rx_ctx,struct ena_eth_io_rx_cdesc_base * cdesc)401 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
402 				 struct ena_com_rx_ctx *ena_rx_ctx,
403 				 struct ena_eth_io_rx_cdesc_base *cdesc)
404 {
405 	ena_rx_ctx->l3_proto = cdesc->status &
406 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
407 	ena_rx_ctx->l4_proto =
408 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
409 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
410 	ena_rx_ctx->l3_csum_err =
411 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
412 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
413 	ena_rx_ctx->l4_csum_err =
414 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
415 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
416 	ena_rx_ctx->l4_csum_checked =
417 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
418 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
419 	ena_rx_ctx->hash = cdesc->hash;
420 	ena_rx_ctx->frag =
421 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
422 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
423 
424 	ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
425 		    "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
426 		    ena_rx_ctx->l3_proto,
427 		    ena_rx_ctx->l4_proto,
428 		    ena_rx_ctx->l3_csum_err,
429 		    ena_rx_ctx->l4_csum_err,
430 		    ena_rx_ctx->hash,
431 		    ena_rx_ctx->frag,
432 		    cdesc->status);
433 }
434 
435 /*****************************************************************************/
436 /*****************************     API      **********************************/
437 /*****************************************************************************/
438 
ena_com_prepare_tx(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,int * nb_hw_desc)439 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
440 		       struct ena_com_tx_ctx *ena_tx_ctx,
441 		       int *nb_hw_desc)
442 {
443 	struct ena_eth_io_tx_desc *desc = NULL;
444 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
445 	void *buffer_to_push = ena_tx_ctx->push_header;
446 	u16 header_len = ena_tx_ctx->header_len;
447 	u16 num_bufs = ena_tx_ctx->num_bufs;
448 	u16 start_tail = io_sq->tail;
449 	int i, rc;
450 	bool have_meta;
451 	u64 addr_hi;
452 
453 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
454 		 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
455 
456 	/* num_bufs +1 for potential meta desc */
457 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
458 		ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
459 			    "Not enough space in the tx queue\n");
460 		return ENA_COM_NO_MEM;
461 	}
462 
463 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
464 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
465 			    "Header size is too large %d max header: %d\n",
466 			    header_len, io_sq->tx_max_header_size);
467 		return ENA_COM_INVAL;
468 	}
469 
470 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
471 		     && !buffer_to_push)) {
472 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
473 			    "Push header wasn't provided in LLQ mode\n");
474 		return ENA_COM_INVAL;
475 	}
476 
477 	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
478 	if (unlikely(rc))
479 		return rc;
480 
481 	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
482 	if (unlikely(rc)) {
483 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
484 			    "Failed to create and store tx meta desc\n");
485 		return rc;
486 	}
487 
488 	/* If the caller doesn't want to send packets */
489 	if (unlikely(!num_bufs && !header_len)) {
490 		rc = ena_com_close_bounce_buffer(io_sq);
491 		if (unlikely(rc))
492 			ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
493 				    "Failed to write buffers to LLQ\n");
494 		*nb_hw_desc = io_sq->tail - start_tail;
495 		return rc;
496 	}
497 
498 	desc = get_sq_desc(io_sq);
499 	if (unlikely(!desc))
500 		return ENA_COM_FAULT;
501 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
502 
503 	/* Set first desc when we don't have meta descriptor */
504 	if (!have_meta)
505 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
506 
507 	desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
508 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
509 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
510 	desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
511 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
512 
513 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
514 
515 	/* Bits 0-9 */
516 	desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
517 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
518 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
519 
520 	desc->meta_ctrl |= (ena_tx_ctx->df <<
521 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
522 		ENA_ETH_IO_TX_DESC_DF_MASK;
523 
524 	/* Bits 10-15 */
525 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
526 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
527 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
528 
529 	if (ena_tx_ctx->meta_valid) {
530 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
531 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
532 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
533 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
534 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
535 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
536 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
537 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
538 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
539 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
540 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
541 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
542 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
543 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
544 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
545 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
546 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
547 	}
548 
549 	for (i = 0; i < num_bufs; i++) {
550 		/* The first desc share the same desc as the header */
551 		if (likely(i != 0)) {
552 			rc = ena_com_sq_update_tail(io_sq);
553 			if (unlikely(rc)) {
554 				ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
555 					    "Failed to update sq tail\n");
556 				return rc;
557 			}
558 
559 			desc = get_sq_desc(io_sq);
560 			if (unlikely(!desc))
561 				return ENA_COM_FAULT;
562 
563 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
564 
565 			desc->len_ctrl |= ((u32)io_sq->phase <<
566 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
567 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
568 		}
569 
570 		desc->len_ctrl |= ena_bufs->len &
571 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
572 
573 		addr_hi = ((ena_bufs->paddr &
574 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
575 
576 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
577 		desc->buff_addr_hi_hdr_sz |= addr_hi &
578 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
579 		ena_bufs++;
580 	}
581 
582 	/* set the last desc indicator */
583 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
584 
585 	rc = ena_com_sq_update_tail(io_sq);
586 	if (unlikely(rc)) {
587 		ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
588 			    "Failed to update sq tail of the last descriptor\n");
589 		return rc;
590 	}
591 
592 	rc = ena_com_close_bounce_buffer(io_sq);
593 
594 	*nb_hw_desc = io_sq->tail - start_tail;
595 	return rc;
596 }
597 
ena_com_rx_pkt(struct ena_com_io_cq * io_cq,struct ena_com_io_sq * io_sq,struct ena_com_rx_ctx * ena_rx_ctx)598 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
599 		   struct ena_com_io_sq *io_sq,
600 		   struct ena_com_rx_ctx *ena_rx_ctx)
601 {
602 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
603 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
604 	u16 q_depth = io_cq->q_depth;
605 	u16 cdesc_idx = 0;
606 	u16 nb_hw_desc;
607 	u16 i = 0;
608 	int rc;
609 
610 	ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
611 		 ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type");
612 
613 	rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
614 	if (unlikely(rc != ENA_COM_OK))
615 		return ENA_COM_FAULT;
616 
617 	if (nb_hw_desc == 0) {
618 		ena_rx_ctx->descs = nb_hw_desc;
619 		return 0;
620 	}
621 
622 	ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
623 		    "Fetch rx packet: queue %d completed desc: %d\n",
624 		    io_cq->qid, nb_hw_desc);
625 
626 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
627 		ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
628 			    "Too many RX cdescs (%d) > MAX(%d)\n",
629 			    nb_hw_desc, ena_rx_ctx->max_bufs);
630 		return ENA_COM_NO_SPACE;
631 	}
632 
633 	cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
634 	ena_rx_ctx->pkt_offset = cdesc->offset;
635 
636 	do {
637 		ena_buf[i].len = cdesc->length;
638 		ena_buf[i].req_id = cdesc->req_id;
639 		if (unlikely(ena_buf[i].req_id >= q_depth))
640 			return ENA_COM_EIO;
641 
642 		if (++i >= nb_hw_desc)
643 			break;
644 
645 		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
646 
647 	} while (1);
648 
649 	/* Update SQ head ptr */
650 	io_sq->next_to_comp += nb_hw_desc;
651 
652 	ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
653 		    "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
654 		    io_sq->qid, io_sq->next_to_comp);
655 
656 	/* Get rx flags from the last pkt */
657 	ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
658 
659 	ena_rx_ctx->descs = nb_hw_desc;
660 
661 	return 0;
662 }
663 
ena_com_add_single_rx_desc(struct ena_com_io_sq * io_sq,struct ena_com_buf * ena_buf,u16 req_id)664 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
665 			       struct ena_com_buf *ena_buf,
666 			       u16 req_id)
667 {
668 	struct ena_eth_io_rx_desc *desc;
669 
670 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
671 		 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
672 
673 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
674 		return ENA_COM_NO_SPACE;
675 
676 	/* virt_addr allocation success is checked before calling this function */
677 	desc = get_sq_desc_regular_queue(io_sq);
678 
679 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
680 
681 	desc->length = ena_buf->len;
682 
683 	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
684 		     ENA_ETH_IO_RX_DESC_LAST_MASK |
685 		     ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
686 		     (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
687 
688 	desc->req_id = req_id;
689 
690 	ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
691 		    "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
692 		    __func__, io_sq->qid, req_id);
693 
694 	desc->buff_addr_lo = (u32)ena_buf->paddr;
695 	desc->buff_addr_hi =
696 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
697 
698 	return ena_com_sq_update_reqular_queue_tail(io_sq);
699 }
700 
ena_com_cq_empty(struct ena_com_io_cq * io_cq)701 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
702 {
703 	struct ena_eth_io_rx_cdesc_base *cdesc;
704 
705 	cdesc = ena_com_get_next_rx_cdesc(io_cq);
706 	if (cdesc)
707 		return false;
708 	else
709 		return true;
710 }
711