xref: /freebsd/sys/dev/ice/ice_controlq.c (revision b4e38a41f584ad4391c04b8cfec81f46176b18b0)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2020, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 
35 #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
36 do {								\
37 	(qinfo)->sq.head = prefix##_ATQH;			\
38 	(qinfo)->sq.tail = prefix##_ATQT;			\
39 	(qinfo)->sq.len = prefix##_ATQLEN;			\
40 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
41 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
42 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
43 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
44 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
45 	(qinfo)->rq.head = prefix##_ARQH;			\
46 	(qinfo)->rq.tail = prefix##_ARQT;			\
47 	(qinfo)->rq.len = prefix##_ARQLEN;			\
48 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
49 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
50 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
51 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
52 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
53 } while (0)
54 
55 /**
56  * ice_adminq_init_regs - Initialize AdminQ registers
57  * @hw: pointer to the hardware structure
58  *
59  * This assumes the alloc_sq and alloc_rq functions have already been called
60  */
61 static void ice_adminq_init_regs(struct ice_hw *hw)
62 {
63 	struct ice_ctl_q_info *cq = &hw->adminq;
64 
65 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
66 
67 	ICE_CQ_INIT_REGS(cq, PF_FW);
68 }
69 
70 /**
71  * ice_mailbox_init_regs - Initialize Mailbox registers
72  * @hw: pointer to the hardware structure
73  *
74  * This assumes the alloc_sq and alloc_rq functions have already been called
75  */
76 static void ice_mailbox_init_regs(struct ice_hw *hw)
77 {
78 	struct ice_ctl_q_info *cq = &hw->mailboxq;
79 
80 	ICE_CQ_INIT_REGS(cq, PF_MBX);
81 }
82 
83 /**
84  * ice_check_sq_alive
85  * @hw: pointer to the HW struct
86  * @cq: pointer to the specific Control queue
87  *
88  * Returns true if Queue is enabled else false.
89  */
90 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
91 {
92 	/* check both queue-length and queue-enable fields */
93 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
94 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
95 						cq->sq.len_ena_mask)) ==
96 			(cq->num_sq_entries | cq->sq.len_ena_mask);
97 
98 	return false;
99 }
100 
101 /**
102  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
103  * @hw: pointer to the hardware structure
104  * @cq: pointer to the specific Control queue
105  */
106 static enum ice_status
107 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
108 {
109 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
110 
111 	cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
112 	if (!cq->sq.desc_buf.va)
113 		return ICE_ERR_NO_MEMORY;
114 
115 	cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
116 				    sizeof(struct ice_sq_cd));
117 	if (!cq->sq.cmd_buf) {
118 		ice_free_dma_mem(hw, &cq->sq.desc_buf);
119 		return ICE_ERR_NO_MEMORY;
120 	}
121 
122 	return ICE_SUCCESS;
123 }
124 
125 /**
126  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
127  * @hw: pointer to the hardware structure
128  * @cq: pointer to the specific Control queue
129  */
130 static enum ice_status
131 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
132 {
133 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
134 
135 	cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
136 	if (!cq->rq.desc_buf.va)
137 		return ICE_ERR_NO_MEMORY;
138 	return ICE_SUCCESS;
139 }
140 
141 /**
142  * ice_free_cq_ring - Free control queue ring
143  * @hw: pointer to the hardware structure
144  * @ring: pointer to the specific control queue ring
145  *
146  * This assumes the posted buffers have already been cleaned
147  * and de-allocated
148  */
149 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
150 {
151 	ice_free_dma_mem(hw, &ring->desc_buf);
152 }
153 
154 /**
155  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
156  * @hw: pointer to the hardware structure
157  * @cq: pointer to the specific Control queue
158  */
159 static enum ice_status
160 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
161 {
162 	int i;
163 
164 	/* We'll be allocating the buffer info memory first, then we can
165 	 * allocate the mapped buffers for the event processing
166 	 */
167 	cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
168 				     sizeof(cq->rq.desc_buf));
169 	if (!cq->rq.dma_head)
170 		return ICE_ERR_NO_MEMORY;
171 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
172 
173 	/* allocate the mapped buffers */
174 	for (i = 0; i < cq->num_rq_entries; i++) {
175 		struct ice_aq_desc *desc;
176 		struct ice_dma_mem *bi;
177 
178 		bi = &cq->rq.r.rq_bi[i];
179 		bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
180 		if (!bi->va)
181 			goto unwind_alloc_rq_bufs;
182 
183 		/* now configure the descriptors for use */
184 		desc = ICE_CTL_Q_DESC(cq->rq, i);
185 
186 		desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
187 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
188 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
189 		desc->opcode = 0;
190 		/* This is in accordance with Admin queue design, there is no
191 		 * register for buffer size configuration
192 		 */
193 		desc->datalen = CPU_TO_LE16(bi->size);
194 		desc->retval = 0;
195 		desc->cookie_high = 0;
196 		desc->cookie_low = 0;
197 		desc->params.generic.addr_high =
198 			CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
199 		desc->params.generic.addr_low =
200 			CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
201 		desc->params.generic.param0 = 0;
202 		desc->params.generic.param1 = 0;
203 	}
204 	return ICE_SUCCESS;
205 
206 unwind_alloc_rq_bufs:
207 	/* don't try to free the one that failed... */
208 	i--;
209 	for (; i >= 0; i--)
210 		ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
211 	ice_free(hw, cq->rq.dma_head);
212 
213 	return ICE_ERR_NO_MEMORY;
214 }
215 
216 /**
217  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
218  * @hw: pointer to the hardware structure
219  * @cq: pointer to the specific Control queue
220  */
221 static enum ice_status
222 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
223 {
224 	int i;
225 
226 	/* No mapped memory needed yet, just the buffer info structures */
227 	cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
228 				     sizeof(cq->sq.desc_buf));
229 	if (!cq->sq.dma_head)
230 		return ICE_ERR_NO_MEMORY;
231 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
232 
233 	/* allocate the mapped buffers */
234 	for (i = 0; i < cq->num_sq_entries; i++) {
235 		struct ice_dma_mem *bi;
236 
237 		bi = &cq->sq.r.sq_bi[i];
238 		bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
239 		if (!bi->va)
240 			goto unwind_alloc_sq_bufs;
241 	}
242 	return ICE_SUCCESS;
243 
244 unwind_alloc_sq_bufs:
245 	/* don't try to free the one that failed... */
246 	i--;
247 	for (; i >= 0; i--)
248 		ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
249 	ice_free(hw, cq->sq.dma_head);
250 
251 	return ICE_ERR_NO_MEMORY;
252 }
253 
254 static enum ice_status
255 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
256 {
257 	/* Clear Head and Tail */
258 	wr32(hw, ring->head, 0);
259 	wr32(hw, ring->tail, 0);
260 
261 	/* set starting point */
262 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
263 	wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
264 	wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
265 
266 	/* Check one register to verify that config was applied */
267 	if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
268 		return ICE_ERR_AQ_ERROR;
269 
270 	return ICE_SUCCESS;
271 }
272 
273 /**
274  * ice_cfg_sq_regs - configure Control ATQ registers
275  * @hw: pointer to the hardware structure
276  * @cq: pointer to the specific Control queue
277  *
278  * Configure base address and length registers for the transmit queue
279  */
280 static enum ice_status
281 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
282 {
283 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
284 }
285 
286 /**
287  * ice_cfg_rq_regs - configure Control ARQ register
288  * @hw: pointer to the hardware structure
289  * @cq: pointer to the specific Control queue
290  *
291  * Configure base address and length registers for the receive (event queue)
292  */
293 static enum ice_status
294 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
295 {
296 	enum ice_status status;
297 
298 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
299 	if (status)
300 		return status;
301 
302 	/* Update tail in the HW to post pre-allocated buffers */
303 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
304 
305 	return ICE_SUCCESS;
306 }
307 
308 /**
309  * ice_init_sq - main initialization routine for Control ATQ
310  * @hw: pointer to the hardware structure
311  * @cq: pointer to the specific Control queue
312  *
313  * This is the main initialization routine for the Control Send Queue
314  * Prior to calling this function, the driver *MUST* set the following fields
315  * in the cq->structure:
316  *     - cq->num_sq_entries
317  *     - cq->sq_buf_size
318  *
319  * Do *NOT* hold the lock when calling this as the memory allocation routines
320  * called are not going to be atomic context safe
321  */
322 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
323 {
324 	enum ice_status ret_code;
325 
326 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
327 
328 	if (cq->sq.count > 0) {
329 		/* queue already initialized */
330 		ret_code = ICE_ERR_NOT_READY;
331 		goto init_ctrlq_exit;
332 	}
333 
334 	/* verify input for valid configuration */
335 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
336 		ret_code = ICE_ERR_CFG;
337 		goto init_ctrlq_exit;
338 	}
339 
340 	cq->sq.next_to_use = 0;
341 	cq->sq.next_to_clean = 0;
342 
343 	/* allocate the ring memory */
344 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
345 	if (ret_code)
346 		goto init_ctrlq_exit;
347 
348 	/* allocate buffers in the rings */
349 	ret_code = ice_alloc_sq_bufs(hw, cq);
350 	if (ret_code)
351 		goto init_ctrlq_free_rings;
352 
353 	/* initialize base registers */
354 	ret_code = ice_cfg_sq_regs(hw, cq);
355 	if (ret_code)
356 		goto init_ctrlq_free_rings;
357 
358 	/* success! */
359 	cq->sq.count = cq->num_sq_entries;
360 	goto init_ctrlq_exit;
361 
362 init_ctrlq_free_rings:
363 	ice_free_cq_ring(hw, &cq->sq);
364 
365 init_ctrlq_exit:
366 	return ret_code;
367 }
368 
369 /**
370  * ice_init_rq - initialize ARQ
371  * @hw: pointer to the hardware structure
372  * @cq: pointer to the specific Control queue
373  *
374  * The main initialization routine for the Admin Receive (Event) Queue.
375  * Prior to calling this function, the driver *MUST* set the following fields
376  * in the cq->structure:
377  *     - cq->num_rq_entries
378  *     - cq->rq_buf_size
379  *
380  * Do *NOT* hold the lock when calling this as the memory allocation routines
381  * called are not going to be atomic context safe
382  */
383 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
384 {
385 	enum ice_status ret_code;
386 
387 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
388 
389 	if (cq->rq.count > 0) {
390 		/* queue already initialized */
391 		ret_code = ICE_ERR_NOT_READY;
392 		goto init_ctrlq_exit;
393 	}
394 
395 	/* verify input for valid configuration */
396 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
397 		ret_code = ICE_ERR_CFG;
398 		goto init_ctrlq_exit;
399 	}
400 
401 	cq->rq.next_to_use = 0;
402 	cq->rq.next_to_clean = 0;
403 
404 	/* allocate the ring memory */
405 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
406 	if (ret_code)
407 		goto init_ctrlq_exit;
408 
409 	/* allocate buffers in the rings */
410 	ret_code = ice_alloc_rq_bufs(hw, cq);
411 	if (ret_code)
412 		goto init_ctrlq_free_rings;
413 
414 	/* initialize base registers */
415 	ret_code = ice_cfg_rq_regs(hw, cq);
416 	if (ret_code)
417 		goto init_ctrlq_free_rings;
418 
419 	/* success! */
420 	cq->rq.count = cq->num_rq_entries;
421 	goto init_ctrlq_exit;
422 
423 init_ctrlq_free_rings:
424 	ice_free_cq_ring(hw, &cq->rq);
425 
426 init_ctrlq_exit:
427 	return ret_code;
428 }
429 
430 #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
431 do {									\
432 	int i;								\
433 	/* free descriptors */						\
434 	for (i = 0; i < (qi)->num_##ring##_entries; i++)		\
435 		if ((qi)->ring.r.ring##_bi[i].pa)			\
436 			ice_free_dma_mem((hw),				\
437 					 &(qi)->ring.r.ring##_bi[i]);	\
438 	/* free the buffer info list */					\
439 	if ((qi)->ring.cmd_buf)						\
440 		ice_free(hw, (qi)->ring.cmd_buf);			\
441 	/* free DMA head */						\
442 	ice_free(hw, (qi)->ring.dma_head);				\
443 } while (0)
444 
445 /**
446  * ice_shutdown_sq - shutdown the Control ATQ
447  * @hw: pointer to the hardware structure
448  * @cq: pointer to the specific Control queue
449  *
450  * The main shutdown routine for the Control Transmit Queue
451  */
452 static enum ice_status
453 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
454 {
455 	enum ice_status ret_code = ICE_SUCCESS;
456 
457 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
458 
459 	ice_acquire_lock(&cq->sq_lock);
460 
461 	if (!cq->sq.count) {
462 		ret_code = ICE_ERR_NOT_READY;
463 		goto shutdown_sq_out;
464 	}
465 
466 	/* Stop firmware AdminQ processing */
467 	wr32(hw, cq->sq.head, 0);
468 	wr32(hw, cq->sq.tail, 0);
469 	wr32(hw, cq->sq.len, 0);
470 	wr32(hw, cq->sq.bal, 0);
471 	wr32(hw, cq->sq.bah, 0);
472 
473 	cq->sq.count = 0;	/* to indicate uninitialized queue */
474 
475 	/* free ring buffers and the ring itself */
476 	ICE_FREE_CQ_BUFS(hw, cq, sq);
477 	ice_free_cq_ring(hw, &cq->sq);
478 
479 shutdown_sq_out:
480 	ice_release_lock(&cq->sq_lock);
481 	return ret_code;
482 }
483 
484 /**
485  * ice_aq_ver_check - Check the reported AQ API version.
486  * @hw: pointer to the hardware structure
487  *
488  * Checks if the driver should load on a given AQ API version.
489  *
490  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
491  */
492 static bool ice_aq_ver_check(struct ice_hw *hw)
493 {
494 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
495 		/* Major API version is newer than expected, don't load */
496 		ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
497 		return false;
498 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
499 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
500 			ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
501 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
502 			ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
503 	} else {
504 		/* Major API version is older than expected, log a warning */
505 		ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
506 	}
507 	return true;
508 }
509 
510 /**
511  * ice_shutdown_rq - shutdown Control ARQ
512  * @hw: pointer to the hardware structure
513  * @cq: pointer to the specific Control queue
514  *
515  * The main shutdown routine for the Control Receive Queue
516  */
517 static enum ice_status
518 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
519 {
520 	enum ice_status ret_code = ICE_SUCCESS;
521 
522 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
523 
524 	ice_acquire_lock(&cq->rq_lock);
525 
526 	if (!cq->rq.count) {
527 		ret_code = ICE_ERR_NOT_READY;
528 		goto shutdown_rq_out;
529 	}
530 
531 	/* Stop Control Queue processing */
532 	wr32(hw, cq->rq.head, 0);
533 	wr32(hw, cq->rq.tail, 0);
534 	wr32(hw, cq->rq.len, 0);
535 	wr32(hw, cq->rq.bal, 0);
536 	wr32(hw, cq->rq.bah, 0);
537 
538 	/* set rq.count to 0 to indicate uninitialized queue */
539 	cq->rq.count = 0;
540 
541 	/* free ring buffers and the ring itself */
542 	ICE_FREE_CQ_BUFS(hw, cq, rq);
543 	ice_free_cq_ring(hw, &cq->rq);
544 
545 shutdown_rq_out:
546 	ice_release_lock(&cq->rq_lock);
547 	return ret_code;
548 }
549 
550 /**
551  * ice_idle_aq - stop ARQ/ATQ processing momentarily
552  * @hw: pointer to the hardware structure
553  * @cq: pointer to the specific Control queue
554  */
555 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
556 {
557 	wr32(hw, cq->sq.len, 0);
558 	wr32(hw, cq->rq.len, 0);
559 
560 	ice_msec_delay(2, false);
561 }
562 
563 /**
564  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
565  * @hw: pointer to the hardware structure
566  */
567 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
568 {
569 	struct ice_ctl_q_info *cq = &hw->adminq;
570 	enum ice_status status;
571 
572 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
573 
574 	status = ice_aq_get_fw_ver(hw, NULL);
575 	if (status)
576 		goto init_ctrlq_free_rq;
577 
578 	if (!ice_aq_ver_check(hw)) {
579 		status = ICE_ERR_FW_API_VER;
580 		goto init_ctrlq_free_rq;
581 	}
582 
583 	return ICE_SUCCESS;
584 
585 init_ctrlq_free_rq:
586 	ice_shutdown_rq(hw, cq);
587 	ice_shutdown_sq(hw, cq);
588 	return status;
589 }
590 
591 /**
592  * ice_init_ctrlq - main initialization routine for any control Queue
593  * @hw: pointer to the hardware structure
594  * @q_type: specific Control queue type
595  *
596  * Prior to calling this function, the driver *MUST* set the following fields
597  * in the cq->structure:
598  *     - cq->num_sq_entries
599  *     - cq->num_rq_entries
600  *     - cq->rq_buf_size
601  *     - cq->sq_buf_size
602  *
603  * NOTE: this function does not initialize the controlq locks
604  */
605 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
606 {
607 	struct ice_ctl_q_info *cq;
608 	enum ice_status ret_code;
609 
610 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
611 
612 	switch (q_type) {
613 	case ICE_CTL_Q_ADMIN:
614 		ice_adminq_init_regs(hw);
615 		cq = &hw->adminq;
616 		break;
617 	case ICE_CTL_Q_MAILBOX:
618 		ice_mailbox_init_regs(hw);
619 		cq = &hw->mailboxq;
620 		break;
621 	default:
622 		return ICE_ERR_PARAM;
623 	}
624 	cq->qtype = q_type;
625 
626 	/* verify input for valid configuration */
627 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
628 	    !cq->rq_buf_size || !cq->sq_buf_size) {
629 		return ICE_ERR_CFG;
630 	}
631 
632 	/* setup SQ command write back timeout */
633 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
634 
635 	/* allocate the ATQ */
636 	ret_code = ice_init_sq(hw, cq);
637 	if (ret_code)
638 		return ret_code;
639 
640 	/* allocate the ARQ */
641 	ret_code = ice_init_rq(hw, cq);
642 	if (ret_code)
643 		goto init_ctrlq_free_sq;
644 
645 	/* success! */
646 	return ICE_SUCCESS;
647 
648 init_ctrlq_free_sq:
649 	ice_shutdown_sq(hw, cq);
650 	return ret_code;
651 }
652 
653 /**
654  * ice_init_all_ctrlq - main initialization routine for all control queues
655  * @hw: pointer to the hardware structure
656  *
657  * Prior to calling this function, the driver MUST* set the following fields
658  * in the cq->structure for all control queues:
659  *     - cq->num_sq_entries
660  *     - cq->num_rq_entries
661  *     - cq->rq_buf_size
662  *     - cq->sq_buf_size
663  *
664  * NOTE: this function does not initialize the controlq locks.
665  */
666 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
667 {
668 	enum ice_status status;
669 
670 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
671 
672 	/* Init FW admin queue */
673 	status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
674 	if (status)
675 		return status;
676 
677 	status = ice_init_check_adminq(hw);
678 	if (status)
679 		return status;
680 	/* Init Mailbox queue */
681 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
682 }
683 
684 /**
685  * ice_init_ctrlq_locks - Initialize locks for a control queue
686  * @cq: pointer to the control queue
687  *
688  * Initializes the send and receive queue locks for a given control queue.
689  */
690 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
691 {
692 	ice_init_lock(&cq->sq_lock);
693 	ice_init_lock(&cq->rq_lock);
694 }
695 
696 /**
697  * ice_create_all_ctrlq - main initialization routine for all control queues
698  * @hw: pointer to the hardware structure
699  *
700  * Prior to calling this function, the driver *MUST* set the following fields
701  * in the cq->structure for all control queues:
702  *     - cq->num_sq_entries
703  *     - cq->num_rq_entries
704  *     - cq->rq_buf_size
705  *     - cq->sq_buf_size
706  *
707  * This function creates all the control queue locks and then calls
708  * ice_init_all_ctrlq. It should be called once during driver load. If the
709  * driver needs to re-initialize control queues at run time it should call
710  * ice_init_all_ctrlq instead.
711  */
712 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
713 {
714 	ice_init_ctrlq_locks(&hw->adminq);
715 	ice_init_ctrlq_locks(&hw->mailboxq);
716 
717 	return ice_init_all_ctrlq(hw);
718 }
719 
720 /**
721  * ice_shutdown_ctrlq - shutdown routine for any control queue
722  * @hw: pointer to the hardware structure
723  * @q_type: specific Control queue type
724  *
725  * NOTE: this function does not destroy the control queue locks.
726  */
727 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
728 {
729 	struct ice_ctl_q_info *cq;
730 
731 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
732 
733 	switch (q_type) {
734 	case ICE_CTL_Q_ADMIN:
735 		cq = &hw->adminq;
736 		if (ice_check_sq_alive(hw, cq))
737 			ice_aq_q_shutdown(hw, true);
738 		break;
739 	case ICE_CTL_Q_MAILBOX:
740 		cq = &hw->mailboxq;
741 		break;
742 	default:
743 		return;
744 	}
745 
746 	ice_shutdown_sq(hw, cq);
747 	ice_shutdown_rq(hw, cq);
748 }
749 
750 /**
751  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
752  * @hw: pointer to the hardware structure
753  *
754  * NOTE: this function does not destroy the control queue locks. The driver
755  * may call this at runtime to shutdown and later restart control queues, such
756  * as in response to a reset event.
757  */
758 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
759 {
760 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
761 	/* Shutdown FW admin queue */
762 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
763 	/* Shutdown PF-VF Mailbox */
764 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
765 }
766 
767 /**
768  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
769  * @cq: pointer to the control queue
770  *
771  * Destroys the send and receive queue locks for a given control queue.
772  */
773 static void
774 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
775 {
776 	ice_destroy_lock(&cq->sq_lock);
777 	ice_destroy_lock(&cq->rq_lock);
778 }
779 
780 /**
781  * ice_destroy_all_ctrlq - exit routine for all control queues
782  * @hw: pointer to the hardware structure
783  *
784  * This function shuts down all the control queues and then destroys the
785  * control queue locks. It should be called once during driver unload. The
786  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
787  * reinitialize control queues, such as in response to a reset event.
788  */
789 void ice_destroy_all_ctrlq(struct ice_hw *hw)
790 {
791 	/* shut down all the control queues first */
792 	ice_shutdown_all_ctrlq(hw);
793 
794 	ice_destroy_ctrlq_locks(&hw->adminq);
795 	ice_destroy_ctrlq_locks(&hw->mailboxq);
796 }
797 
798 /**
799  * ice_clean_sq - cleans Admin send queue (ATQ)
800  * @hw: pointer to the hardware structure
801  * @cq: pointer to the specific Control queue
802  *
803  * returns the number of free desc
804  */
805 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
806 {
807 	struct ice_ctl_q_ring *sq = &cq->sq;
808 	u16 ntc = sq->next_to_clean;
809 	struct ice_sq_cd *details;
810 	struct ice_aq_desc *desc;
811 
812 	desc = ICE_CTL_Q_DESC(*sq, ntc);
813 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
814 
815 	while (rd32(hw, cq->sq.head) != ntc) {
816 		ice_debug(hw, ICE_DBG_AQ_MSG,
817 			  "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
818 		ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
819 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
820 		ntc++;
821 		if (ntc == sq->count)
822 			ntc = 0;
823 		desc = ICE_CTL_Q_DESC(*sq, ntc);
824 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
825 	}
826 
827 	sq->next_to_clean = ntc;
828 
829 	return ICE_CTL_Q_DESC_UNUSED(sq);
830 }
831 
832 /**
833  * ice_debug_cq
834  * @hw: pointer to the hardware structure
835  * @desc: pointer to control queue descriptor
836  * @buf: pointer to command buffer
837  * @buf_len: max length of buf
838  *
839  * Dumps debug log about control command with descriptor contents.
840  */
841 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
842 {
843 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
844 	u16 datalen, flags;
845 
846 	if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
847 		return;
848 
849 	if (!desc)
850 		return;
851 
852 	datalen = LE16_TO_CPU(cq_desc->datalen);
853 	flags = LE16_TO_CPU(cq_desc->flags);
854 
855 	ice_debug(hw, ICE_DBG_AQ_DESC,
856 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
857 		  LE16_TO_CPU(cq_desc->opcode), flags, datalen,
858 		  LE16_TO_CPU(cq_desc->retval));
859 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
860 		  LE32_TO_CPU(cq_desc->cookie_high),
861 		  LE32_TO_CPU(cq_desc->cookie_low));
862 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
863 		  LE32_TO_CPU(cq_desc->params.generic.param0),
864 		  LE32_TO_CPU(cq_desc->params.generic.param1));
865 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
866 		  LE32_TO_CPU(cq_desc->params.generic.addr_high),
867 		  LE32_TO_CPU(cq_desc->params.generic.addr_low));
868 	/* Dump buffer iff 1) one exists and 2) is either a response indicated
869 	 * by the DD and/or CMP flag set or a command with the RD flag set.
870 	 */
871 	if (buf && cq_desc->datalen != 0 &&
872 	    (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
873 	     flags & ICE_AQ_FLAG_RD)) {
874 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
875 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
876 				MIN_T(u16, buf_len, datalen));
877 	}
878 }
879 
880 /**
881  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
882  * @hw: pointer to the HW struct
883  * @cq: pointer to the specific Control queue
884  *
885  * Returns true if the firmware has processed all descriptors on the
886  * admin send queue. Returns false if there are still requests pending.
887  */
888 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
889 {
890 	/* AQ designers suggest use of head for better
891 	 * timing reliability than DD bit
892 	 */
893 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
894 }
895 
896 /**
897  * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
898  * @hw: pointer to the HW struct
899  * @cq: pointer to the specific Control queue
900  * @desc: prefilled descriptor describing the command (non DMA mem)
901  * @buf: buffer to use for indirect commands (or NULL for direct commands)
902  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
903  * @cd: pointer to command details structure
904  *
905  * This is the main send command routine for the ATQ. It runs the queue,
906  * cleans the queue, etc.
907  */
908 static enum ice_status
909 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
910 		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
911 		       struct ice_sq_cd *cd)
912 {
913 	struct ice_dma_mem *dma_buf = NULL;
914 	struct ice_aq_desc *desc_on_ring;
915 	bool cmd_completed = false;
916 	enum ice_status status = ICE_SUCCESS;
917 	struct ice_sq_cd *details;
918 	u32 total_delay = 0;
919 	u16 retval = 0;
920 	u32 val = 0;
921 
922 	/* if reset is in progress return a soft error */
923 	if (hw->reset_ongoing)
924 		return ICE_ERR_RESET_ONGOING;
925 
926 	cq->sq_last_status = ICE_AQ_RC_OK;
927 
928 	if (!cq->sq.count) {
929 		ice_debug(hw, ICE_DBG_AQ_MSG,
930 			  "Control Send queue not initialized.\n");
931 		status = ICE_ERR_AQ_EMPTY;
932 		goto sq_send_command_error;
933 	}
934 
935 	if ((buf && !buf_size) || (!buf && buf_size)) {
936 		status = ICE_ERR_PARAM;
937 		goto sq_send_command_error;
938 	}
939 
940 	if (buf) {
941 		if (buf_size > cq->sq_buf_size) {
942 			ice_debug(hw, ICE_DBG_AQ_MSG,
943 				  "Invalid buffer size for Control Send queue: %d.\n",
944 				  buf_size);
945 			status = ICE_ERR_INVAL_SIZE;
946 			goto sq_send_command_error;
947 		}
948 
949 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
950 		if (buf_size > ICE_AQ_LG_BUF)
951 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
952 	}
953 
954 	val = rd32(hw, cq->sq.head);
955 	if (val >= cq->num_sq_entries) {
956 		ice_debug(hw, ICE_DBG_AQ_MSG,
957 			  "head overrun at %d in the Control Send Queue ring\n",
958 			  val);
959 		status = ICE_ERR_AQ_EMPTY;
960 		goto sq_send_command_error;
961 	}
962 
963 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
964 	if (cd)
965 		*details = *cd;
966 	else
967 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
968 
969 	/* Call clean and check queue available function to reclaim the
970 	 * descriptors that were processed by FW/MBX; the function returns the
971 	 * number of desc available. The clean function called here could be
972 	 * called in a separate thread in case of asynchronous completions.
973 	 */
974 	if (ice_clean_sq(hw, cq) == 0) {
975 		ice_debug(hw, ICE_DBG_AQ_MSG,
976 			  "Error: Control Send Queue is full.\n");
977 		status = ICE_ERR_AQ_FULL;
978 		goto sq_send_command_error;
979 	}
980 
981 	/* initialize the temp desc pointer with the right desc */
982 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
983 
984 	/* if the desc is available copy the temp desc to the right place */
985 	ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
986 		   ICE_NONDMA_TO_DMA);
987 
988 	/* if buf is not NULL assume indirect command */
989 	if (buf) {
990 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
991 		/* copy the user buf into the respective DMA buf */
992 		ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
993 		desc_on_ring->datalen = CPU_TO_LE16(buf_size);
994 
995 		/* Update the address values in the desc with the pa value
996 		 * for respective buffer
997 		 */
998 		desc_on_ring->params.generic.addr_high =
999 			CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1000 		desc_on_ring->params.generic.addr_low =
1001 			CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1002 	}
1003 
1004 	/* Debug desc and buffer */
1005 	ice_debug(hw, ICE_DBG_AQ_DESC,
1006 		  "ATQ: Control Send queue desc and buffer:\n");
1007 
1008 	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1009 
1010 	(cq->sq.next_to_use)++;
1011 	if (cq->sq.next_to_use == cq->sq.count)
1012 		cq->sq.next_to_use = 0;
1013 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1014 
1015 	do {
1016 		if (ice_sq_done(hw, cq))
1017 			break;
1018 
1019 		ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
1020 		total_delay++;
1021 	} while (total_delay < cq->sq_cmd_timeout);
1022 
1023 	/* if ready, copy the desc back to temp */
1024 	if (ice_sq_done(hw, cq)) {
1025 		ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1026 			   ICE_DMA_TO_NONDMA);
1027 		if (buf) {
1028 			/* get returned length to copy */
1029 			u16 copy_size = LE16_TO_CPU(desc->datalen);
1030 
1031 			if (copy_size > buf_size) {
1032 				ice_debug(hw, ICE_DBG_AQ_MSG,
1033 					  "Return len %d > than buf len %d\n",
1034 					  copy_size, buf_size);
1035 				status = ICE_ERR_AQ_ERROR;
1036 			} else {
1037 				ice_memcpy(buf, dma_buf->va, copy_size,
1038 					   ICE_DMA_TO_NONDMA);
1039 			}
1040 		}
1041 		retval = LE16_TO_CPU(desc->retval);
1042 		if (retval) {
1043 			ice_debug(hw, ICE_DBG_AQ_MSG,
1044 				  "Control Send Queue command 0x%04X completed with error 0x%X\n",
1045 				  LE16_TO_CPU(desc->opcode),
1046 				  retval);
1047 
1048 			/* strip off FW internal code */
1049 			retval &= 0xff;
1050 		}
1051 		cmd_completed = true;
1052 		if (!status && retval != ICE_AQ_RC_OK)
1053 			status = ICE_ERR_AQ_ERROR;
1054 		cq->sq_last_status = (enum ice_aq_err)retval;
1055 	}
1056 
1057 	ice_debug(hw, ICE_DBG_AQ_MSG,
1058 		  "ATQ: desc and buffer writeback:\n");
1059 
1060 	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1061 
1062 	/* save writeback AQ if requested */
1063 	if (details->wb_desc)
1064 		ice_memcpy(details->wb_desc, desc_on_ring,
1065 			   sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1066 
1067 	/* update the error if time out occurred */
1068 	if (!cmd_completed) {
1069 		ice_debug(hw, ICE_DBG_AQ_MSG,
1070 			  "Control Send Queue Writeback timeout.\n");
1071 		status = ICE_ERR_AQ_TIMEOUT;
1072 	}
1073 
1074 sq_send_command_error:
1075 	return status;
1076 }
1077 
1078 /**
1079  * ice_sq_send_cmd - send command to Control Queue (ATQ)
1080  * @hw: pointer to the HW struct
1081  * @cq: pointer to the specific Control queue
1082  * @desc: prefilled descriptor describing the command (non DMA mem)
1083  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1084  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1085  * @cd: pointer to command details structure
1086  *
1087  * This is the main send command routine for the ATQ. It runs the queue,
1088  * cleans the queue, etc.
1089  */
1090 enum ice_status
1091 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1092 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
1093 		struct ice_sq_cd *cd)
1094 {
1095 	enum ice_status status = ICE_SUCCESS;
1096 
1097 	/* if reset is in progress return a soft error */
1098 	if (hw->reset_ongoing)
1099 		return ICE_ERR_RESET_ONGOING;
1100 
1101 	ice_acquire_lock(&cq->sq_lock);
1102 	status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1103 	ice_release_lock(&cq->sq_lock);
1104 
1105 	return status;
1106 }
1107 
1108 /**
1109  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1110  * @desc: pointer to the temp descriptor (non DMA mem)
1111  * @opcode: the opcode can be used to decide which flags to turn off or on
1112  *
1113  * Fill the desc with default values
1114  */
1115 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1116 {
1117 	/* zero out the desc */
1118 	ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1119 	desc->opcode = CPU_TO_LE16(opcode);
1120 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1121 }
1122 
1123 /**
1124  * ice_clean_rq_elem
1125  * @hw: pointer to the HW struct
1126  * @cq: pointer to the specific Control queue
1127  * @e: event info from the receive descriptor, includes any buffers
1128  * @pending: number of events that could be left to process
1129  *
1130  * This function cleans one Admin Receive Queue element and returns
1131  * the contents through e. It can also return how many events are
1132  * left to process through 'pending'.
1133  */
1134 enum ice_status
1135 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1136 		  struct ice_rq_event_info *e, u16 *pending)
1137 {
1138 	u16 ntc = cq->rq.next_to_clean;
1139 	enum ice_status ret_code = ICE_SUCCESS;
1140 	struct ice_aq_desc *desc;
1141 	struct ice_dma_mem *bi;
1142 	u16 desc_idx;
1143 	u16 datalen;
1144 	u16 flags;
1145 	u16 ntu;
1146 
1147 	/* pre-clean the event info */
1148 	ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1149 
1150 	/* take the lock before we start messing with the ring */
1151 	ice_acquire_lock(&cq->rq_lock);
1152 
1153 	if (!cq->rq.count) {
1154 		ice_debug(hw, ICE_DBG_AQ_MSG,
1155 			  "Control Receive queue not initialized.\n");
1156 		ret_code = ICE_ERR_AQ_EMPTY;
1157 		goto clean_rq_elem_err;
1158 	}
1159 
1160 	/* set next_to_use to head */
1161 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1162 
1163 	if (ntu == ntc) {
1164 		/* nothing to do - shouldn't need to update ring's values */
1165 		ret_code = ICE_ERR_AQ_NO_WORK;
1166 		goto clean_rq_elem_out;
1167 	}
1168 
1169 	/* now clean the next descriptor */
1170 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1171 	desc_idx = ntc;
1172 
1173 	cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1174 	flags = LE16_TO_CPU(desc->flags);
1175 	if (flags & ICE_AQ_FLAG_ERR) {
1176 		ret_code = ICE_ERR_AQ_ERROR;
1177 		ice_debug(hw, ICE_DBG_AQ_MSG,
1178 			  "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1179 			  LE16_TO_CPU(desc->opcode),
1180 			  cq->rq_last_status);
1181 	}
1182 	ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1183 	datalen = LE16_TO_CPU(desc->datalen);
1184 	e->msg_len = MIN_T(u16, datalen, e->buf_len);
1185 	if (e->msg_buf && e->msg_len)
1186 		ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1187 			   e->msg_len, ICE_DMA_TO_NONDMA);
1188 
1189 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1190 
1191 	ice_debug_cq(hw, (void *)desc, e->msg_buf,
1192 		     cq->rq_buf_size);
1193 
1194 	/* Restore the original datalen and buffer address in the desc,
1195 	 * FW updates datalen to indicate the event message size
1196 	 */
1197 	bi = &cq->rq.r.rq_bi[ntc];
1198 	ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1199 
1200 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1201 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1202 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1203 	desc->datalen = CPU_TO_LE16(bi->size);
1204 	desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1205 	desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1206 
1207 	/* set tail = the last cleaned desc index. */
1208 	wr32(hw, cq->rq.tail, ntc);
1209 	/* ntc is updated to tail + 1 */
1210 	ntc++;
1211 	if (ntc == cq->num_rq_entries)
1212 		ntc = 0;
1213 	cq->rq.next_to_clean = ntc;
1214 	cq->rq.next_to_use = ntu;
1215 
1216 clean_rq_elem_out:
1217 	/* Set pending if needed, unlock and return */
1218 	if (pending) {
1219 		/* re-read HW head to calculate actual pending messages */
1220 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1221 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1222 	}
1223 clean_rq_elem_err:
1224 	ice_release_lock(&cq->rq_lock);
1225 
1226 	return ret_code;
1227 }
1228