xref: /linux/drivers/net/ethernet/amazon/ena/ena_eth_com.c (revision 962fad301c33dec69324dc2d9320fd84a119a24c)
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "ena_eth_com.h"
34 
35 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
36 	struct ena_com_io_cq *io_cq)
37 {
38 	struct ena_eth_io_rx_cdesc_base *cdesc;
39 	u16 expected_phase, head_masked;
40 	u16 desc_phase;
41 
42 	head_masked = io_cq->head & (io_cq->q_depth - 1);
43 	expected_phase = io_cq->phase;
44 
45 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
46 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
47 
48 	desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
49 			ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
50 
51 	if (desc_phase != expected_phase)
52 		return NULL;
53 
54 	/* Make sure we read the rest of the descriptor after the phase bit
55 	 * has been read
56 	 */
57 	dma_rmb();
58 
59 	return cdesc;
60 }
61 
62 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
63 {
64 	u16 tail_masked;
65 	u32 offset;
66 
67 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
68 
69 	offset = tail_masked * io_sq->desc_entry_size;
70 
71 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
72 }
73 
74 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
75 						     u8 *bounce_buffer)
76 {
77 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
78 
79 	u16 dst_tail_mask;
80 	u32 dst_offset;
81 
82 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
83 	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
84 
85 	if (is_llq_max_tx_burst_exists(io_sq)) {
86 		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
87 			pr_err("Error: trying to send more packets than tx burst allows\n");
88 			return -ENOSPC;
89 		}
90 
91 		io_sq->entries_in_tx_burst_left--;
92 		pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
93 			 io_sq->qid, io_sq->entries_in_tx_burst_left);
94 	}
95 
96 	/* Make sure everything was written into the bounce buffer before
97 	 * writing the bounce buffer to the device
98 	 */
99 	wmb();
100 
101 	/* The line is completed. Copy it to dev */
102 	__iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
103 			 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
104 
105 	io_sq->tail++;
106 
107 	/* Switch phase bit in case of wrap around */
108 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
109 		io_sq->phase ^= 1;
110 
111 	return 0;
112 }
113 
114 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
115 						 u8 *header_src,
116 						 u16 header_len)
117 {
118 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
119 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
120 	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
121 	u16 header_offset;
122 
123 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
124 		return 0;
125 
126 	header_offset =
127 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
128 
129 	if (unlikely((header_offset + header_len) >
130 		     llq_info->desc_list_entry_size)) {
131 		pr_err("trying to write header larger than llq entry can accommodate\n");
132 		return -EFAULT;
133 	}
134 
135 	if (unlikely(!bounce_buffer)) {
136 		pr_err("bounce buffer is NULL\n");
137 		return -EFAULT;
138 	}
139 
140 	memcpy(bounce_buffer + header_offset, header_src, header_len);
141 
142 	return 0;
143 }
144 
145 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
146 {
147 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
148 	u8 *bounce_buffer;
149 	void *sq_desc;
150 
151 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
152 
153 	if (unlikely(!bounce_buffer)) {
154 		pr_err("bounce buffer is NULL\n");
155 		return NULL;
156 	}
157 
158 	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
159 	pkt_ctrl->idx++;
160 	pkt_ctrl->descs_left_in_line--;
161 
162 	return sq_desc;
163 }
164 
165 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
166 {
167 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
168 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
169 	int rc;
170 
171 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
172 		return 0;
173 
174 	/* bounce buffer was used, so write it and get a new one */
175 	if (pkt_ctrl->idx) {
176 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
177 							pkt_ctrl->curr_bounce_buf);
178 		if (unlikely(rc))
179 			return rc;
180 
181 		pkt_ctrl->curr_bounce_buf =
182 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
183 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
184 		       0x0, llq_info->desc_list_entry_size);
185 	}
186 
187 	pkt_ctrl->idx = 0;
188 	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
189 	return 0;
190 }
191 
192 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
193 {
194 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
195 		return get_sq_desc_llq(io_sq);
196 
197 	return get_sq_desc_regular_queue(io_sq);
198 }
199 
200 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
201 {
202 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
203 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
204 	int rc;
205 
206 	if (!pkt_ctrl->descs_left_in_line) {
207 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
208 							pkt_ctrl->curr_bounce_buf);
209 		if (unlikely(rc))
210 			return rc;
211 
212 		pkt_ctrl->curr_bounce_buf =
213 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
214 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
215 		       0x0, llq_info->desc_list_entry_size);
216 
217 		pkt_ctrl->idx = 0;
218 		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
219 			pkt_ctrl->descs_left_in_line = 1;
220 		else
221 			pkt_ctrl->descs_left_in_line =
222 			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
223 	}
224 
225 	return 0;
226 }
227 
228 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
229 {
230 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
231 		return ena_com_sq_update_llq_tail(io_sq);
232 
233 	io_sq->tail++;
234 
235 	/* Switch phase bit in case of wrap around */
236 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
237 		io_sq->phase ^= 1;
238 
239 	return 0;
240 }
241 
242 static struct ena_eth_io_rx_cdesc_base *
243 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
244 {
245 	idx &= (io_cq->q_depth - 1);
246 	return (struct ena_eth_io_rx_cdesc_base *)
247 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
248 		idx * io_cq->cdesc_entry_size_in_bytes);
249 }
250 
251 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
252 					   u16 *first_cdesc_idx)
253 {
254 	struct ena_eth_io_rx_cdesc_base *cdesc;
255 	u16 count = 0, head_masked;
256 	u32 last = 0;
257 
258 	do {
259 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
260 		if (!cdesc)
261 			break;
262 
263 		ena_com_cq_inc_head(io_cq);
264 		count++;
265 		last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
266 			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
267 	} while (!last);
268 
269 	if (last) {
270 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
271 		count += io_cq->cur_rx_pkt_cdesc_count;
272 
273 		head_masked = io_cq->head & (io_cq->q_depth - 1);
274 
275 		io_cq->cur_rx_pkt_cdesc_count = 0;
276 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
277 
278 		pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
279 			 io_cq->qid, *first_cdesc_idx, count);
280 	} else {
281 		io_cq->cur_rx_pkt_cdesc_count += count;
282 		count = 0;
283 	}
284 
285 	return count;
286 }
287 
288 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
289 			       struct ena_com_tx_meta *ena_meta)
290 {
291 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
292 
293 	meta_desc = get_sq_desc(io_sq);
294 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
295 
296 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
297 
298 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
299 
300 	/* bits 0-9 of the mss */
301 	meta_desc->word2 |= (ena_meta->mss <<
302 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
303 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
304 	/* bits 10-13 of the mss */
305 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
306 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
307 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
308 
309 	/* Extended meta desc */
310 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
311 	meta_desc->len_ctrl |= (io_sq->phase <<
312 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
313 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
314 
315 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
316 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
317 
318 	meta_desc->word2 |= ena_meta->l3_hdr_len &
319 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
320 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
321 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
322 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
323 
324 	meta_desc->word2 |= (ena_meta->l4_hdr_len <<
325 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
326 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
327 
328 	return ena_com_sq_update_tail(io_sq);
329 }
330 
331 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
332 						 struct ena_com_tx_ctx *ena_tx_ctx,
333 						 bool *have_meta)
334 {
335 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
336 
337 	/* When disable meta caching is set, don't bother to save the meta and
338 	 * compare it to the stored version, just create the meta
339 	 */
340 	if (io_sq->disable_meta_caching) {
341 		if (unlikely(!ena_tx_ctx->meta_valid))
342 			return -EINVAL;
343 
344 		*have_meta = true;
345 		return ena_com_create_meta(io_sq, ena_meta);
346 	}
347 
348 	if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
349 		*have_meta = true;
350 		/* Cache the meta desc */
351 		memcpy(&io_sq->cached_tx_meta, ena_meta,
352 		       sizeof(struct ena_com_tx_meta));
353 		return ena_com_create_meta(io_sq, ena_meta);
354 	}
355 
356 	*have_meta = false;
357 	return 0;
358 }
359 
360 static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
361 					struct ena_eth_io_rx_cdesc_base *cdesc)
362 {
363 	ena_rx_ctx->l3_proto = cdesc->status &
364 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
365 	ena_rx_ctx->l4_proto =
366 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
367 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
368 	ena_rx_ctx->l3_csum_err =
369 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
370 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
371 	ena_rx_ctx->l4_csum_err =
372 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
373 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
374 	ena_rx_ctx->l4_csum_checked =
375 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
376 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
377 	ena_rx_ctx->hash = cdesc->hash;
378 	ena_rx_ctx->frag =
379 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
380 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
381 
382 	pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
383 		 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
384 		 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
385 		 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
386 }
387 
388 /*****************************************************************************/
389 /*****************************     API      **********************************/
390 /*****************************************************************************/
391 
392 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
393 		       struct ena_com_tx_ctx *ena_tx_ctx,
394 		       int *nb_hw_desc)
395 {
396 	struct ena_eth_io_tx_desc *desc = NULL;
397 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
398 	void *buffer_to_push = ena_tx_ctx->push_header;
399 	u16 header_len = ena_tx_ctx->header_len;
400 	u16 num_bufs = ena_tx_ctx->num_bufs;
401 	u16 start_tail = io_sq->tail;
402 	int i, rc;
403 	bool have_meta;
404 	u64 addr_hi;
405 
406 	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
407 
408 	/* num_bufs +1 for potential meta desc */
409 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
410 		pr_debug("Not enough space in the tx queue\n");
411 		return -ENOMEM;
412 	}
413 
414 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
415 		pr_err("header size is too large %d max header: %d\n",
416 		       header_len, io_sq->tx_max_header_size);
417 		return -EINVAL;
418 	}
419 
420 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
421 		     !buffer_to_push))
422 		return -EINVAL;
423 
424 	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
425 	if (unlikely(rc))
426 		return rc;
427 
428 	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
429 	if (unlikely(rc)) {
430 		pr_err("failed to create and store tx meta desc\n");
431 		return rc;
432 	}
433 
434 	/* If the caller doesn't want to send packets */
435 	if (unlikely(!num_bufs && !header_len)) {
436 		rc = ena_com_close_bounce_buffer(io_sq);
437 		*nb_hw_desc = io_sq->tail - start_tail;
438 		return rc;
439 	}
440 
441 	desc = get_sq_desc(io_sq);
442 	if (unlikely(!desc))
443 		return -EFAULT;
444 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
445 
446 	/* Set first desc when we don't have meta descriptor */
447 	if (!have_meta)
448 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
449 
450 	desc->buff_addr_hi_hdr_sz |= (header_len <<
451 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
452 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
453 	desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
454 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
455 
456 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
457 
458 	/* Bits 0-9 */
459 	desc->meta_ctrl |= (ena_tx_ctx->req_id <<
460 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
461 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
462 
463 	desc->meta_ctrl |= (ena_tx_ctx->df <<
464 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
465 		ENA_ETH_IO_TX_DESC_DF_MASK;
466 
467 	/* Bits 10-15 */
468 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
469 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
470 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
471 
472 	if (ena_tx_ctx->meta_valid) {
473 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
474 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
475 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
476 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
477 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
478 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
479 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
480 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
481 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
482 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
483 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
484 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
485 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
486 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
487 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
488 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
489 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
490 	}
491 
492 	for (i = 0; i < num_bufs; i++) {
493 		/* The first desc share the same desc as the header */
494 		if (likely(i != 0)) {
495 			rc = ena_com_sq_update_tail(io_sq);
496 			if (unlikely(rc))
497 				return rc;
498 
499 			desc = get_sq_desc(io_sq);
500 			if (unlikely(!desc))
501 				return -EFAULT;
502 
503 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
504 
505 			desc->len_ctrl |= (io_sq->phase <<
506 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
507 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
508 		}
509 
510 		desc->len_ctrl |= ena_bufs->len &
511 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
512 
513 		addr_hi = ((ena_bufs->paddr &
514 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
515 
516 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
517 		desc->buff_addr_hi_hdr_sz |= addr_hi &
518 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
519 		ena_bufs++;
520 	}
521 
522 	/* set the last desc indicator */
523 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
524 
525 	rc = ena_com_sq_update_tail(io_sq);
526 	if (unlikely(rc))
527 		return rc;
528 
529 	rc = ena_com_close_bounce_buffer(io_sq);
530 
531 	*nb_hw_desc = io_sq->tail - start_tail;
532 	return rc;
533 }
534 
535 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
536 		   struct ena_com_io_sq *io_sq,
537 		   struct ena_com_rx_ctx *ena_rx_ctx)
538 {
539 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
540 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
541 	u16 cdesc_idx = 0;
542 	u16 nb_hw_desc;
543 	u16 i = 0;
544 
545 	WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
546 
547 	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
548 	if (nb_hw_desc == 0) {
549 		ena_rx_ctx->descs = nb_hw_desc;
550 		return 0;
551 	}
552 
553 	pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
554 		 nb_hw_desc);
555 
556 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
557 		pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
558 		       ena_rx_ctx->max_bufs);
559 		return -ENOSPC;
560 	}
561 
562 	cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
563 	ena_rx_ctx->pkt_offset = cdesc->offset;
564 
565 	do {
566 		ena_buf[i].len = cdesc->length;
567 		ena_buf[i].req_id = cdesc->req_id;
568 
569 		if (++i >= nb_hw_desc)
570 			break;
571 
572 		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
573 
574 	} while (1);
575 
576 	/* Update SQ head ptr */
577 	io_sq->next_to_comp += nb_hw_desc;
578 
579 	pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
580 		 io_sq->next_to_comp);
581 
582 	/* Get rx flags from the last pkt */
583 	ena_com_rx_set_flags(ena_rx_ctx, cdesc);
584 
585 	ena_rx_ctx->descs = nb_hw_desc;
586 	return 0;
587 }
588 
589 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
590 			       struct ena_com_buf *ena_buf,
591 			       u16 req_id)
592 {
593 	struct ena_eth_io_rx_desc *desc;
594 
595 	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
596 
597 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
598 		return -ENOSPC;
599 
600 	desc = get_sq_desc(io_sq);
601 	if (unlikely(!desc))
602 		return -EFAULT;
603 
604 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
605 
606 	desc->length = ena_buf->len;
607 
608 	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
609 		ENA_ETH_IO_RX_DESC_LAST_MASK |
610 		(io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
611 		ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
612 
613 	desc->req_id = req_id;
614 
615 	desc->buff_addr_lo = (u32)ena_buf->paddr;
616 	desc->buff_addr_hi =
617 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
618 
619 	return ena_com_sq_update_tail(io_sq);
620 }
621 
622 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
623 {
624 	struct ena_eth_io_rx_cdesc_base *cdesc;
625 
626 	cdesc = ena_com_get_next_rx_cdesc(io_cq);
627 	if (cdesc)
628 		return false;
629 	else
630 		return true;
631 }
632