xref: /freebsd/sys/contrib/ena-com/ena_eth_com.c (revision d93a896ef95946b0bf1219866fcb324b78543444)
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "ena_eth_com.h"
35 
36 static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
37 	struct ena_com_io_cq *io_cq)
38 {
39 	struct ena_eth_io_rx_cdesc_base *cdesc;
40 	u16 expected_phase, head_masked;
41 	u16 desc_phase;
42 
43 	head_masked = io_cq->head & (io_cq->q_depth - 1);
44 	expected_phase = io_cq->phase;
45 
46 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
47 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
48 
49 	desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
50 			ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
51 
52 	if (desc_phase != expected_phase)
53 		return NULL;
54 
55 	return cdesc;
56 }
57 
58 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
59 {
60 	io_cq->head++;
61 
62 	/* Switch phase bit in case of wrap around */
63 	if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
64 		io_cq->phase ^= 1;
65 }
66 
67 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
68 {
69 	u16 tail_masked;
70 	u32 offset;
71 
72 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
73 
74 	offset = tail_masked * io_sq->desc_entry_size;
75 
76 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
77 }
78 
79 static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
80 {
81 	u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
82 	u32 offset = tail_masked * io_sq->desc_entry_size;
83 
84 	/* In case this queue isn't a LLQ */
85 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
86 		return;
87 
88 	memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
89 		    io_sq->desc_addr.virt_addr + offset,
90 		    io_sq->desc_entry_size);
91 }
92 
93 static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
94 {
95 	io_sq->tail++;
96 
97 	/* Switch phase bit in case of wrap around */
98 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
99 		io_sq->phase ^= 1;
100 }
101 
102 static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
103 				       u8 *head_src, u16 header_len)
104 {
105 	u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
106 	u8 __iomem *dev_head_addr =
107 		io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
108 
109 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
110 		return 0;
111 
112 	if (unlikely(!io_sq->header_addr)) {
113 		ena_trc_err("Push buffer header ptr is NULL\n");
114 		return ENA_COM_INVAL;
115 	}
116 
117 	memcpy_toio(dev_head_addr, head_src, header_len);
118 
119 	return 0;
120 }
121 
122 static inline struct ena_eth_io_rx_cdesc_base *
123 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
124 {
125 	idx &= (io_cq->q_depth - 1);
126 	return (struct ena_eth_io_rx_cdesc_base *)
127 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
128 		idx * io_cq->cdesc_entry_size_in_bytes);
129 }
130 
131 static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
132 					   u16 *first_cdesc_idx)
133 {
134 	struct ena_eth_io_rx_cdesc_base *cdesc;
135 	u16 count = 0, head_masked;
136 	u32 last = 0;
137 
138 	do {
139 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
140 		if (!cdesc)
141 			break;
142 
143 		ena_com_cq_inc_head(io_cq);
144 		count++;
145 		last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
146 			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
147 	} while (!last);
148 
149 	if (last) {
150 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
151 		count += io_cq->cur_rx_pkt_cdesc_count;
152 
153 		head_masked = io_cq->head & (io_cq->q_depth - 1);
154 
155 		io_cq->cur_rx_pkt_cdesc_count = 0;
156 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
157 
158 		ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
159 			    io_cq->qid, *first_cdesc_idx, count);
160 	} else {
161 		io_cq->cur_rx_pkt_cdesc_count += count;
162 		count = 0;
163 	}
164 
165 	return count;
166 }
167 
168 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
169 					     struct ena_com_tx_ctx *ena_tx_ctx)
170 {
171 	int rc;
172 
173 	if (ena_tx_ctx->meta_valid) {
174 		rc = memcmp(&io_sq->cached_tx_meta,
175 			    &ena_tx_ctx->ena_meta,
176 			    sizeof(struct ena_com_tx_meta));
177 
178 		if (unlikely(rc != 0))
179 			return true;
180 	}
181 
182 	return false;
183 }
184 
185 static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
186 							 struct ena_com_tx_ctx *ena_tx_ctx)
187 {
188 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
189 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
190 
191 	meta_desc = get_sq_desc(io_sq);
192 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
193 
194 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
195 
196 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
197 
198 	/* bits 0-9 of the mss */
199 	meta_desc->word2 |= (ena_meta->mss <<
200 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
201 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
202 	/* bits 10-13 of the mss */
203 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
204 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
205 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
206 
207 	/* Extended meta desc */
208 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
209 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
210 	meta_desc->len_ctrl |= (io_sq->phase <<
211 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
212 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
213 
214 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
215 	meta_desc->word2 |= ena_meta->l3_hdr_len &
216 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
217 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
218 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
219 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
220 
221 	meta_desc->word2 |= (ena_meta->l4_hdr_len <<
222 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
223 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
224 
225 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
226 
227 	/* Cached the meta desc */
228 	memcpy(&io_sq->cached_tx_meta, ena_meta,
229 	       sizeof(struct ena_com_tx_meta));
230 
231 	ena_com_copy_curr_sq_desc_to_dev(io_sq);
232 	ena_com_sq_update_tail(io_sq);
233 }
234 
235 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
236 					struct ena_eth_io_rx_cdesc_base *cdesc)
237 {
238 	ena_rx_ctx->l3_proto = cdesc->status &
239 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
240 	ena_rx_ctx->l4_proto =
241 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
242 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
243 	ena_rx_ctx->l3_csum_err =
244 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
245 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
246 	ena_rx_ctx->l4_csum_err =
247 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
248 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
249 	ena_rx_ctx->hash = cdesc->hash;
250 	ena_rx_ctx->frag =
251 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
252 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
253 
254 	ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
255 		    ena_rx_ctx->l3_proto,
256 		    ena_rx_ctx->l4_proto,
257 		    ena_rx_ctx->l3_csum_err,
258 		    ena_rx_ctx->l4_csum_err,
259 		    ena_rx_ctx->hash,
260 		    ena_rx_ctx->frag,
261 		    cdesc->status);
262 }
263 
264 /*****************************************************************************/
265 /*****************************     API      **********************************/
266 /*****************************************************************************/
267 
268 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
269 		       struct ena_com_tx_ctx *ena_tx_ctx,
270 		       int *nb_hw_desc)
271 {
272 	struct ena_eth_io_tx_desc *desc = NULL;
273 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
274 	void *push_header = ena_tx_ctx->push_header;
275 	u16 header_len = ena_tx_ctx->header_len;
276 	u16 num_bufs = ena_tx_ctx->num_bufs;
277 	int total_desc, i, rc;
278 	bool have_meta;
279 	u64 addr_hi;
280 
281 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
282 		 "wrong Q type");
283 
284 	/* num_bufs +1 for potential meta desc */
285 	if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
286 		ena_trc_err("Not enough space in the tx queue\n");
287 		return ENA_COM_NO_MEM;
288 	}
289 
290 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
291 		ena_trc_err("header size is too large %d max header: %d\n",
292 			    header_len, io_sq->tx_max_header_size);
293 		return ENA_COM_INVAL;
294 	}
295 
296 	/* start with pushing the header (if needed) */
297 	rc = ena_com_write_header(io_sq, push_header, header_len);
298 	if (unlikely(rc))
299 		return rc;
300 
301 	have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
302 			ena_tx_ctx);
303 	if (have_meta)
304 		ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
305 
306 	/* If the caller doesn't want send packets */
307 	if (unlikely(!num_bufs && !header_len)) {
308 		*nb_hw_desc = have_meta ? 0 : 1;
309 		return 0;
310 	}
311 
312 	desc = get_sq_desc(io_sq);
313 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
314 
315 	/* Set first desc when we don't have meta descriptor */
316 	if (!have_meta)
317 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
318 
319 	desc->buff_addr_hi_hdr_sz |= (header_len <<
320 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
321 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
322 	desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
323 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
324 
325 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
326 
327 	/* Bits 0-9 */
328 	desc->meta_ctrl |= (ena_tx_ctx->req_id <<
329 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
330 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
331 
332 	desc->meta_ctrl |= (ena_tx_ctx->df <<
333 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
334 		ENA_ETH_IO_TX_DESC_DF_MASK;
335 
336 	/* Bits 10-15 */
337 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
338 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
339 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
340 
341 	if (ena_tx_ctx->meta_valid) {
342 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
343 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
344 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
345 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
346 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
347 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
348 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
349 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
350 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
351 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
352 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
353 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
354 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
355 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
356 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
357 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
358 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
359 	}
360 
361 	for (i = 0; i < num_bufs; i++) {
362 		/* The first desc share the same desc as the header */
363 		if (likely(i != 0)) {
364 			ena_com_copy_curr_sq_desc_to_dev(io_sq);
365 			ena_com_sq_update_tail(io_sq);
366 
367 			desc = get_sq_desc(io_sq);
368 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
369 
370 			desc->len_ctrl |= (io_sq->phase <<
371 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
372 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
373 		}
374 
375 		desc->len_ctrl |= ena_bufs->len &
376 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
377 
378 		addr_hi = ((ena_bufs->paddr &
379 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
380 
381 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
382 		desc->buff_addr_hi_hdr_sz |= addr_hi &
383 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
384 		ena_bufs++;
385 	}
386 
387 	/* set the last desc indicator */
388 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
389 
390 	ena_com_copy_curr_sq_desc_to_dev(io_sq);
391 
392 	ena_com_sq_update_tail(io_sq);
393 
394 	total_desc = ENA_MAX16(num_bufs, 1);
395 	total_desc += have_meta ? 1 : 0;
396 
397 	*nb_hw_desc = total_desc;
398 	return 0;
399 }
400 
401 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
402 		   struct ena_com_io_sq *io_sq,
403 		   struct ena_com_rx_ctx *ena_rx_ctx)
404 {
405 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
406 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
407 	u16 cdesc_idx = 0;
408 	u16 nb_hw_desc;
409 	u16 i;
410 
411 	ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
412 		 "wrong Q type");
413 
414 	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
415 	if (nb_hw_desc == 0) {
416 		ena_rx_ctx->descs = nb_hw_desc;
417 		return 0;
418 	}
419 
420 	ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
421 		    io_cq->qid, nb_hw_desc);
422 
423 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
424 		ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
425 			    nb_hw_desc, ena_rx_ctx->max_bufs);
426 		return ENA_COM_NO_SPACE;
427 	}
428 
429 	for (i = 0; i < nb_hw_desc; i++) {
430 		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
431 
432 		ena_buf->len = cdesc->length;
433 		ena_buf->req_id = cdesc->req_id;
434 		ena_buf++;
435 	}
436 
437 	/* Update SQ head ptr */
438 	io_sq->next_to_comp += nb_hw_desc;
439 
440 	ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
441 		    io_sq->qid, io_sq->next_to_comp);
442 
443 	/* Get rx flags from the last pkt */
444 	ena_com_rx_set_flags(ena_rx_ctx, cdesc);
445 
446 	ena_rx_ctx->descs = nb_hw_desc;
447 	return 0;
448 }
449 
450 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
451 			       struct ena_com_buf *ena_buf,
452 			       u16 req_id)
453 {
454 	struct ena_eth_io_rx_desc *desc;
455 
456 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
457 		 "wrong Q type");
458 
459 	if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
460 		return ENA_COM_NO_SPACE;
461 
462 	desc = get_sq_desc(io_sq);
463 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
464 
465 	desc->length = ena_buf->len;
466 
467 	desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
468 	desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
469 	desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
470 	desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
471 
472 	desc->req_id = req_id;
473 
474 	desc->buff_addr_lo = (u32)ena_buf->paddr;
475 	desc->buff_addr_hi =
476 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
477 
478 	ena_com_sq_update_tail(io_sq);
479 
480 	return 0;
481 }
482 
483 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
484 {
485 	u8 expected_phase, cdesc_phase;
486 	struct ena_eth_io_tx_cdesc *cdesc;
487 	u16 masked_head;
488 
489 	masked_head = io_cq->head & (io_cq->q_depth - 1);
490 	expected_phase = io_cq->phase;
491 
492 	cdesc = (struct ena_eth_io_tx_cdesc *)
493 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
494 		(masked_head * io_cq->cdesc_entry_size_in_bytes));
495 
496 	/* When the current completion descriptor phase isn't the same as the
497 	 * expected, it mean that the device still didn't update
498 	 * this completion.
499 	 */
500 	cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
501 	if (cdesc_phase != expected_phase)
502 		return ENA_COM_TRY_AGAIN;
503 
504 	ena_com_cq_inc_head(io_cq);
505 
506 	*req_id = READ_ONCE(cdesc->req_id);
507 
508 	return 0;
509 }
510