xref: /linux/drivers/net/ethernet/amazon/ena/ena_eth_com.c (revision 955abe0a1b41de5ba61fe4cd614ebc123084d499)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #include "ena_eth_com.h"
7 
8 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9 	struct ena_com_io_cq *io_cq)
10 {
11 	struct ena_eth_io_rx_cdesc_base *cdesc;
12 	u16 expected_phase, head_masked;
13 	u16 desc_phase;
14 
15 	head_masked = io_cq->head & (io_cq->q_depth - 1);
16 	expected_phase = io_cq->phase;
17 
18 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
20 
21 	desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
22 		     ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
23 
24 	if (desc_phase != expected_phase)
25 		return NULL;
26 
27 	/* Make sure we read the rest of the descriptor after the phase bit
28 	 * has been read
29 	 */
30 	dma_rmb();
31 
32 	return cdesc;
33 }
34 
35 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
36 {
37 	u16 tail_masked;
38 	u32 offset;
39 
40 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
41 
42 	offset = tail_masked * io_sq->desc_entry_size;
43 
44 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
45 }
46 
47 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
48 						     u8 *bounce_buffer)
49 {
50 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
51 
52 	u16 dst_tail_mask;
53 	u32 dst_offset;
54 
55 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
56 	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
57 
58 	if (is_llq_max_tx_burst_exists(io_sq)) {
59 		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
60 			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
61 				   "Error: trying to send more packets than tx burst allows\n");
62 			return -ENOSPC;
63 		}
64 
65 		io_sq->entries_in_tx_burst_left--;
66 		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
67 			   "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
68 			   io_sq->entries_in_tx_burst_left);
69 	}
70 
71 	/* Make sure everything was written into the bounce buffer before
72 	 * writing the bounce buffer to the device
73 	 */
74 	wmb();
75 
76 	/* The line is completed. Copy it to dev */
77 	__iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
78 			 (llq_info->desc_list_entry_size) / 8);
79 
80 	io_sq->tail++;
81 
82 	/* Switch phase bit in case of wrap around */
83 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
84 		io_sq->phase ^= 1;
85 
86 	return 0;
87 }
88 
89 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
90 						 u8 *header_src,
91 						 u16 header_len)
92 {
93 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
94 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
95 	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
96 	u16 header_offset;
97 
98 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
99 		return 0;
100 
101 	header_offset =
102 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
103 
104 	if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
105 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
106 			   "Trying to write header larger than llq entry can accommodate\n");
107 		return -EFAULT;
108 	}
109 
110 	if (unlikely(!bounce_buffer)) {
111 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
112 		return -EFAULT;
113 	}
114 
115 	memcpy(bounce_buffer + header_offset, header_src, header_len);
116 
117 	return 0;
118 }
119 
120 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
121 {
122 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
123 	u8 *bounce_buffer;
124 	void *sq_desc;
125 
126 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
127 
128 	if (unlikely(!bounce_buffer)) {
129 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
130 		return NULL;
131 	}
132 
133 	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
134 	pkt_ctrl->idx++;
135 	pkt_ctrl->descs_left_in_line--;
136 
137 	return sq_desc;
138 }
139 
140 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
141 {
142 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
143 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
144 	int rc;
145 
146 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
147 		return 0;
148 
149 	/* bounce buffer was used, so write it and get a new one */
150 	if (likely(pkt_ctrl->idx)) {
151 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
152 							pkt_ctrl->curr_bounce_buf);
153 		if (unlikely(rc)) {
154 			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
155 				   "Failed to write bounce buffer to device\n");
156 			return rc;
157 		}
158 
159 		pkt_ctrl->curr_bounce_buf =
160 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
161 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
162 		       0x0, llq_info->desc_list_entry_size);
163 	}
164 
165 	pkt_ctrl->idx = 0;
166 	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
167 	return 0;
168 }
169 
170 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
171 {
172 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
173 		return get_sq_desc_llq(io_sq);
174 
175 	return get_sq_desc_regular_queue(io_sq);
176 }
177 
178 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
179 {
180 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
181 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
182 	int rc;
183 
184 	if (!pkt_ctrl->descs_left_in_line) {
185 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
186 							pkt_ctrl->curr_bounce_buf);
187 		if (unlikely(rc)) {
188 			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
189 				   "Failed to write bounce buffer to device\n");
190 			return rc;
191 		}
192 
193 		pkt_ctrl->curr_bounce_buf =
194 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
195 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
196 		       0x0, llq_info->desc_list_entry_size);
197 
198 		pkt_ctrl->idx = 0;
199 		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
200 			pkt_ctrl->descs_left_in_line = 1;
201 		else
202 			pkt_ctrl->descs_left_in_line =
203 			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
204 	}
205 
206 	return 0;
207 }
208 
209 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
210 {
211 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
212 		return ena_com_sq_update_llq_tail(io_sq);
213 
214 	io_sq->tail++;
215 
216 	/* Switch phase bit in case of wrap around */
217 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
218 		io_sq->phase ^= 1;
219 
220 	return 0;
221 }
222 
223 static struct ena_eth_io_rx_cdesc_base *
224 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
225 {
226 	idx &= (io_cq->q_depth - 1);
227 	return (struct ena_eth_io_rx_cdesc_base *)
228 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
229 		idx * io_cq->cdesc_entry_size_in_bytes);
230 }
231 
232 static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
233 				    u16 *first_cdesc_idx,
234 				    u16 *num_descs)
235 {
236 	u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
237 	struct ena_eth_io_rx_cdesc_base *cdesc;
238 	u32 last = 0;
239 
240 	do {
241 		u32 status;
242 
243 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
244 		if (!cdesc)
245 			break;
246 		status = READ_ONCE(cdesc->status);
247 
248 		ena_com_cq_inc_head(io_cq);
249 		if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
250 		    ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
251 			struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
252 
253 			netdev_err(dev->net_device,
254 				   "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
255 				   count, io_cq->qid, cdesc->req_id);
256 			return -EFAULT;
257 		}
258 		count++;
259 		last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
260 			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
261 	} while (!last);
262 
263 	if (last) {
264 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
265 
266 		head_masked = io_cq->head & (io_cq->q_depth - 1);
267 
268 		*num_descs = count;
269 		io_cq->cur_rx_pkt_cdesc_count = 0;
270 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
271 
272 		netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
273 			   "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
274 			   io_cq->qid, *first_cdesc_idx, count);
275 	} else {
276 		io_cq->cur_rx_pkt_cdesc_count = count;
277 		*num_descs = 0;
278 	}
279 
280 	return 0;
281 }
282 
283 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
284 			       struct ena_com_tx_meta *ena_meta)
285 {
286 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
287 
288 	meta_desc = get_sq_desc(io_sq);
289 	if (unlikely(!meta_desc))
290 		return -EFAULT;
291 
292 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
293 
294 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
295 
296 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
297 
298 	/* bits 0-9 of the mss */
299 	meta_desc->word2 |= ((u32)ena_meta->mss <<
300 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
301 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
302 	/* bits 10-13 of the mss */
303 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
304 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
305 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
306 
307 	/* Extended meta desc */
308 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
309 	meta_desc->len_ctrl |= ((u32)io_sq->phase <<
310 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
311 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
312 
313 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
314 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
315 
316 	meta_desc->word2 |= ena_meta->l3_hdr_len &
317 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
318 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
319 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
320 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
321 
322 	meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
323 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
324 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
325 
326 	return ena_com_sq_update_tail(io_sq);
327 }
328 
329 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
330 						 struct ena_com_tx_ctx *ena_tx_ctx,
331 						 bool *have_meta)
332 {
333 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
334 
335 	/* When disable meta caching is set, don't bother to save the meta and
336 	 * compare it to the stored version, just create the meta
337 	 */
338 	if (io_sq->disable_meta_caching) {
339 		*have_meta = true;
340 		return ena_com_create_meta(io_sq, ena_meta);
341 	}
342 
343 	if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
344 		*have_meta = true;
345 		/* Cache the meta desc */
346 		memcpy(&io_sq->cached_tx_meta, ena_meta,
347 		       sizeof(struct ena_com_tx_meta));
348 		return ena_com_create_meta(io_sq, ena_meta);
349 	}
350 
351 	*have_meta = false;
352 	return 0;
353 }
354 
355 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
356 				 struct ena_com_rx_ctx *ena_rx_ctx,
357 				 struct ena_eth_io_rx_cdesc_base *cdesc)
358 {
359 	ena_rx_ctx->l3_proto = cdesc->status &
360 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
361 	ena_rx_ctx->l4_proto =
362 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
363 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
364 	ena_rx_ctx->l3_csum_err =
365 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
366 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
367 	ena_rx_ctx->l4_csum_err =
368 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
369 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
370 	ena_rx_ctx->l4_csum_checked =
371 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
372 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
373 	ena_rx_ctx->hash = cdesc->hash;
374 	ena_rx_ctx->frag =
375 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
376 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
377 
378 	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
379 		   "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
380 		   ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
381 		   ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
382 }
383 
384 /*****************************************************************************/
385 /*****************************     API      **********************************/
386 /*****************************************************************************/
387 
388 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
389 		       struct ena_com_tx_ctx *ena_tx_ctx,
390 		       int *nb_hw_desc)
391 {
392 	struct ena_eth_io_tx_desc *desc = NULL;
393 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
394 	void *buffer_to_push = ena_tx_ctx->push_header;
395 	u16 header_len = ena_tx_ctx->header_len;
396 	u16 num_bufs = ena_tx_ctx->num_bufs;
397 	u16 start_tail = io_sq->tail;
398 	int i, rc;
399 	bool have_meta;
400 	u64 addr_hi;
401 
402 	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
403 
404 	/* num_bufs +1 for potential meta desc */
405 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
406 		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
407 			   "Not enough space in the tx queue\n");
408 		return -ENOMEM;
409 	}
410 
411 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
412 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
413 			   "Header size is too large %d max header: %d\n", header_len,
414 			   io_sq->tx_max_header_size);
415 		return -EINVAL;
416 	}
417 
418 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
419 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
420 			   "Push header wasn't provided in LLQ mode\n");
421 		return -EINVAL;
422 	}
423 
424 	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
425 	if (unlikely(rc))
426 		return rc;
427 
428 	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
429 	if (unlikely(rc)) {
430 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
431 			   "Failed to create and store tx meta desc\n");
432 		return rc;
433 	}
434 
435 	/* If the caller doesn't want to send packets */
436 	if (unlikely(!num_bufs && !header_len)) {
437 		rc = ena_com_close_bounce_buffer(io_sq);
438 		if (rc)
439 			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
440 				   "Failed to write buffers to LLQ\n");
441 		*nb_hw_desc = io_sq->tail - start_tail;
442 		return rc;
443 	}
444 
445 	desc = get_sq_desc(io_sq);
446 	if (unlikely(!desc))
447 		return -EFAULT;
448 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
449 
450 	/* Set first desc when we don't have meta descriptor */
451 	if (!have_meta)
452 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
453 
454 	desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
455 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
456 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
457 	desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
458 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
459 
460 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
461 
462 	/* Bits 0-9 */
463 	desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
464 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
465 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
466 
467 	desc->meta_ctrl |= (ena_tx_ctx->df <<
468 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
469 		ENA_ETH_IO_TX_DESC_DF_MASK;
470 
471 	/* Bits 10-15 */
472 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
473 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
474 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
475 
476 	if (ena_tx_ctx->meta_valid) {
477 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
478 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
479 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
480 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
481 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
482 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
483 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
484 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
485 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
486 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
487 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
488 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
489 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
490 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
491 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
492 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
493 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
494 	}
495 
496 	for (i = 0; i < num_bufs; i++) {
497 		/* The first desc share the same desc as the header */
498 		if (likely(i != 0)) {
499 			rc = ena_com_sq_update_tail(io_sq);
500 			if (unlikely(rc)) {
501 				netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
502 					   "Failed to update sq tail\n");
503 				return rc;
504 			}
505 
506 			desc = get_sq_desc(io_sq);
507 			if (unlikely(!desc))
508 				return -EFAULT;
509 
510 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
511 
512 			desc->len_ctrl |= ((u32)io_sq->phase <<
513 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
514 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
515 		}
516 
517 		desc->len_ctrl |= ena_bufs->len &
518 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
519 
520 		addr_hi = ((ena_bufs->paddr &
521 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
522 
523 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
524 		desc->buff_addr_hi_hdr_sz |= addr_hi &
525 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
526 		ena_bufs++;
527 	}
528 
529 	/* set the last desc indicator */
530 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
531 
532 	rc = ena_com_sq_update_tail(io_sq);
533 	if (unlikely(rc)) {
534 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
535 			   "Failed to update sq tail of the last descriptor\n");
536 		return rc;
537 	}
538 
539 	rc = ena_com_close_bounce_buffer(io_sq);
540 
541 	*nb_hw_desc = io_sq->tail - start_tail;
542 	return rc;
543 }
544 
545 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
546 		   struct ena_com_io_sq *io_sq,
547 		   struct ena_com_rx_ctx *ena_rx_ctx)
548 {
549 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
550 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
551 	u16 q_depth = io_cq->q_depth;
552 	u16 cdesc_idx = 0;
553 	u16 nb_hw_desc;
554 	u16 i = 0;
555 	int rc;
556 
557 	WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
558 
559 	rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
560 	if (unlikely(rc != 0))
561 		return -EFAULT;
562 
563 	if (nb_hw_desc == 0) {
564 		ena_rx_ctx->descs = nb_hw_desc;
565 		return 0;
566 	}
567 
568 	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
569 		   "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
570 
571 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
572 		netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
573 			   "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
574 		return -ENOSPC;
575 	}
576 
577 	cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
578 	ena_rx_ctx->pkt_offset = cdesc->offset;
579 
580 	do {
581 		ena_buf[i].len = cdesc->length;
582 		ena_buf[i].req_id = cdesc->req_id;
583 		if (unlikely(ena_buf[i].req_id >= q_depth))
584 			return -EIO;
585 
586 		if (++i >= nb_hw_desc)
587 			break;
588 
589 		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
590 
591 	} while (1);
592 
593 	/* Update SQ head ptr */
594 	io_sq->next_to_comp += nb_hw_desc;
595 
596 	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
597 		   "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
598 		   io_sq->next_to_comp);
599 
600 	/* Get rx flags from the last pkt */
601 	ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
602 
603 	ena_rx_ctx->descs = nb_hw_desc;
604 
605 	return 0;
606 }
607 
608 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
609 			       struct ena_com_buf *ena_buf,
610 			       u16 req_id)
611 {
612 	struct ena_eth_io_rx_desc *desc;
613 
614 	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
615 
616 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
617 		return -ENOSPC;
618 
619 	desc = get_sq_desc(io_sq);
620 	if (unlikely(!desc))
621 		return -EFAULT;
622 
623 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
624 
625 	desc->length = ena_buf->len;
626 
627 	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
628 		     ENA_ETH_IO_RX_DESC_LAST_MASK |
629 		     ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
630 		     (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
631 
632 	desc->req_id = req_id;
633 
634 	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
635 		   "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
636 		   req_id);
637 
638 	desc->buff_addr_lo = (u32)ena_buf->paddr;
639 	desc->buff_addr_hi =
640 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
641 
642 	return ena_com_sq_update_tail(io_sq);
643 }
644 
645 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
646 {
647 	struct ena_eth_io_rx_cdesc_base *cdesc;
648 
649 	cdesc = ena_com_get_next_rx_cdesc(io_cq);
650 	if (cdesc)
651 		return false;
652 	else
653 		return true;
654 }
655