xref: /freebsd/sys/dev/ice/ice_controlq.c (revision 681ce946f33e75c590e97c53076e86dff1fe8f4a)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_common.h"
34 
35 #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
36 do {								\
37 	(qinfo)->sq.head = prefix##_ATQH;			\
38 	(qinfo)->sq.tail = prefix##_ATQT;			\
39 	(qinfo)->sq.len = prefix##_ATQLEN;			\
40 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
41 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
42 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
43 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
44 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
45 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
46 	(qinfo)->rq.head = prefix##_ARQH;			\
47 	(qinfo)->rq.tail = prefix##_ARQT;			\
48 	(qinfo)->rq.len = prefix##_ARQLEN;			\
49 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
50 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
51 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
52 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
53 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
54 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
55 } while (0)
56 
57 /**
58  * ice_adminq_init_regs - Initialize AdminQ registers
59  * @hw: pointer to the hardware structure
60  *
61  * This assumes the alloc_sq and alloc_rq functions have already been called
62  */
63 static void ice_adminq_init_regs(struct ice_hw *hw)
64 {
65 	struct ice_ctl_q_info *cq = &hw->adminq;
66 
67 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
68 
69 	ICE_CQ_INIT_REGS(cq, PF_FW);
70 }
71 
72 /**
73  * ice_mailbox_init_regs - Initialize Mailbox registers
74  * @hw: pointer to the hardware structure
75  *
76  * This assumes the alloc_sq and alloc_rq functions have already been called
77  */
78 static void ice_mailbox_init_regs(struct ice_hw *hw)
79 {
80 	struct ice_ctl_q_info *cq = &hw->mailboxq;
81 
82 	ICE_CQ_INIT_REGS(cq, PF_MBX);
83 }
84 
85 /**
86  * ice_check_sq_alive
87  * @hw: pointer to the HW struct
88  * @cq: pointer to the specific Control queue
89  *
90  * Returns true if Queue is enabled else false.
91  */
92 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
93 {
94 	/* check both queue-length and queue-enable fields */
95 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
96 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
97 						cq->sq.len_ena_mask)) ==
98 			(cq->num_sq_entries | cq->sq.len_ena_mask);
99 
100 	return false;
101 }
102 
103 /**
104  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
105  * @hw: pointer to the hardware structure
106  * @cq: pointer to the specific Control queue
107  */
108 static enum ice_status
109 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
110 {
111 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
112 
113 	cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
114 	if (!cq->sq.desc_buf.va)
115 		return ICE_ERR_NO_MEMORY;
116 
117 	cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
118 				    sizeof(struct ice_sq_cd));
119 	if (!cq->sq.cmd_buf) {
120 		ice_free_dma_mem(hw, &cq->sq.desc_buf);
121 		return ICE_ERR_NO_MEMORY;
122 	}
123 
124 	return ICE_SUCCESS;
125 }
126 
127 /**
128  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
129  * @hw: pointer to the hardware structure
130  * @cq: pointer to the specific Control queue
131  */
132 static enum ice_status
133 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
134 {
135 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
136 
137 	cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
138 	if (!cq->rq.desc_buf.va)
139 		return ICE_ERR_NO_MEMORY;
140 	return ICE_SUCCESS;
141 }
142 
143 /**
144  * ice_free_cq_ring - Free control queue ring
145  * @hw: pointer to the hardware structure
146  * @ring: pointer to the specific control queue ring
147  *
148  * This assumes the posted buffers have already been cleaned
149  * and de-allocated
150  */
151 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
152 {
153 	ice_free_dma_mem(hw, &ring->desc_buf);
154 }
155 
156 /**
157  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
158  * @hw: pointer to the hardware structure
159  * @cq: pointer to the specific Control queue
160  */
161 static enum ice_status
162 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
163 {
164 	int i;
165 
166 	/* We'll be allocating the buffer info memory first, then we can
167 	 * allocate the mapped buffers for the event processing
168 	 */
169 	cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
170 				     sizeof(cq->rq.desc_buf));
171 	if (!cq->rq.dma_head)
172 		return ICE_ERR_NO_MEMORY;
173 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
174 
175 	/* allocate the mapped buffers */
176 	for (i = 0; i < cq->num_rq_entries; i++) {
177 		struct ice_aq_desc *desc;
178 		struct ice_dma_mem *bi;
179 
180 		bi = &cq->rq.r.rq_bi[i];
181 		bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
182 		if (!bi->va)
183 			goto unwind_alloc_rq_bufs;
184 
185 		/* now configure the descriptors for use */
186 		desc = ICE_CTL_Q_DESC(cq->rq, i);
187 
188 		desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
189 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
190 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
191 		desc->opcode = 0;
192 		/* This is in accordance with Admin queue design, there is no
193 		 * register for buffer size configuration
194 		 */
195 		desc->datalen = CPU_TO_LE16(bi->size);
196 		desc->retval = 0;
197 		desc->cookie_high = 0;
198 		desc->cookie_low = 0;
199 		desc->params.generic.addr_high =
200 			CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
201 		desc->params.generic.addr_low =
202 			CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
203 		desc->params.generic.param0 = 0;
204 		desc->params.generic.param1 = 0;
205 	}
206 	return ICE_SUCCESS;
207 
208 unwind_alloc_rq_bufs:
209 	/* don't try to free the one that failed... */
210 	i--;
211 	for (; i >= 0; i--)
212 		ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
213 	cq->rq.r.rq_bi = NULL;
214 	ice_free(hw, cq->rq.dma_head);
215 	cq->rq.dma_head = NULL;
216 
217 	return ICE_ERR_NO_MEMORY;
218 }
219 
220 /**
221  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
222  * @hw: pointer to the hardware structure
223  * @cq: pointer to the specific Control queue
224  */
225 static enum ice_status
226 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
227 {
228 	int i;
229 
230 	/* No mapped memory needed yet, just the buffer info structures */
231 	cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
232 				     sizeof(cq->sq.desc_buf));
233 	if (!cq->sq.dma_head)
234 		return ICE_ERR_NO_MEMORY;
235 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
236 
237 	/* allocate the mapped buffers */
238 	for (i = 0; i < cq->num_sq_entries; i++) {
239 		struct ice_dma_mem *bi;
240 
241 		bi = &cq->sq.r.sq_bi[i];
242 		bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
243 		if (!bi->va)
244 			goto unwind_alloc_sq_bufs;
245 	}
246 	return ICE_SUCCESS;
247 
248 unwind_alloc_sq_bufs:
249 	/* don't try to free the one that failed... */
250 	i--;
251 	for (; i >= 0; i--)
252 		ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
253 	cq->sq.r.sq_bi = NULL;
254 	ice_free(hw, cq->sq.dma_head);
255 	cq->sq.dma_head = NULL;
256 
257 	return ICE_ERR_NO_MEMORY;
258 }
259 
260 static enum ice_status
261 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
262 {
263 	/* Clear Head and Tail */
264 	wr32(hw, ring->head, 0);
265 	wr32(hw, ring->tail, 0);
266 
267 	/* set starting point */
268 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
269 	wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
270 	wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
271 
272 	/* Check one register to verify that config was applied */
273 	if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
274 		return ICE_ERR_AQ_ERROR;
275 
276 	return ICE_SUCCESS;
277 }
278 
279 /**
280  * ice_cfg_sq_regs - configure Control ATQ registers
281  * @hw: pointer to the hardware structure
282  * @cq: pointer to the specific Control queue
283  *
284  * Configure base address and length registers for the transmit queue
285  */
286 static enum ice_status
287 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
288 {
289 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
290 }
291 
292 /**
293  * ice_cfg_rq_regs - configure Control ARQ register
294  * @hw: pointer to the hardware structure
295  * @cq: pointer to the specific Control queue
296  *
297  * Configure base address and length registers for the receive (event queue)
298  */
299 static enum ice_status
300 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
301 {
302 	enum ice_status status;
303 
304 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
305 	if (status)
306 		return status;
307 
308 	/* Update tail in the HW to post pre-allocated buffers */
309 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
310 
311 	return ICE_SUCCESS;
312 }
313 
314 #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
315 do {									\
316 	/* free descriptors */						\
317 	if ((qi)->ring.r.ring##_bi) {					\
318 		int i;							\
319 									\
320 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
321 			if ((qi)->ring.r.ring##_bi[i].pa)		\
322 				ice_free_dma_mem((hw),			\
323 					&(qi)->ring.r.ring##_bi[i]);	\
324 	}								\
325 	/* free the buffer info list */					\
326 	if ((qi)->ring.cmd_buf)						\
327 		ice_free(hw, (qi)->ring.cmd_buf);			\
328 	/* free DMA head */						\
329 	ice_free(hw, (qi)->ring.dma_head);				\
330 } while (0)
331 
332 /**
333  * ice_init_sq - main initialization routine for Control ATQ
334  * @hw: pointer to the hardware structure
335  * @cq: pointer to the specific Control queue
336  *
337  * This is the main initialization routine for the Control Send Queue
338  * Prior to calling this function, the driver *MUST* set the following fields
339  * in the cq->structure:
340  *     - cq->num_sq_entries
341  *     - cq->sq_buf_size
342  *
343  * Do *NOT* hold the lock when calling this as the memory allocation routines
344  * called are not going to be atomic context safe
345  */
346 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
347 {
348 	enum ice_status ret_code;
349 
350 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
351 
352 	if (cq->sq.count > 0) {
353 		/* queue already initialized */
354 		ret_code = ICE_ERR_NOT_READY;
355 		goto init_ctrlq_exit;
356 	}
357 
358 	/* verify input for valid configuration */
359 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
360 		ret_code = ICE_ERR_CFG;
361 		goto init_ctrlq_exit;
362 	}
363 
364 	cq->sq.next_to_use = 0;
365 	cq->sq.next_to_clean = 0;
366 
367 	/* allocate the ring memory */
368 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
369 	if (ret_code)
370 		goto init_ctrlq_exit;
371 
372 	/* allocate buffers in the rings */
373 	ret_code = ice_alloc_sq_bufs(hw, cq);
374 	if (ret_code)
375 		goto init_ctrlq_free_rings;
376 
377 	/* initialize base registers */
378 	ret_code = ice_cfg_sq_regs(hw, cq);
379 	if (ret_code)
380 		goto init_ctrlq_free_rings;
381 
382 	/* success! */
383 	cq->sq.count = cq->num_sq_entries;
384 	goto init_ctrlq_exit;
385 
386 init_ctrlq_free_rings:
387 	ICE_FREE_CQ_BUFS(hw, cq, sq);
388 	ice_free_cq_ring(hw, &cq->sq);
389 
390 init_ctrlq_exit:
391 	return ret_code;
392 }
393 
394 /**
395  * ice_init_rq - initialize ARQ
396  * @hw: pointer to the hardware structure
397  * @cq: pointer to the specific Control queue
398  *
399  * The main initialization routine for the Admin Receive (Event) Queue.
400  * Prior to calling this function, the driver *MUST* set the following fields
401  * in the cq->structure:
402  *     - cq->num_rq_entries
403  *     - cq->rq_buf_size
404  *
405  * Do *NOT* hold the lock when calling this as the memory allocation routines
406  * called are not going to be atomic context safe
407  */
408 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
409 {
410 	enum ice_status ret_code;
411 
412 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
413 
414 	if (cq->rq.count > 0) {
415 		/* queue already initialized */
416 		ret_code = ICE_ERR_NOT_READY;
417 		goto init_ctrlq_exit;
418 	}
419 
420 	/* verify input for valid configuration */
421 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
422 		ret_code = ICE_ERR_CFG;
423 		goto init_ctrlq_exit;
424 	}
425 
426 	cq->rq.next_to_use = 0;
427 	cq->rq.next_to_clean = 0;
428 
429 	/* allocate the ring memory */
430 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
431 	if (ret_code)
432 		goto init_ctrlq_exit;
433 
434 	/* allocate buffers in the rings */
435 	ret_code = ice_alloc_rq_bufs(hw, cq);
436 	if (ret_code)
437 		goto init_ctrlq_free_rings;
438 
439 	/* initialize base registers */
440 	ret_code = ice_cfg_rq_regs(hw, cq);
441 	if (ret_code)
442 		goto init_ctrlq_free_rings;
443 
444 	/* success! */
445 	cq->rq.count = cq->num_rq_entries;
446 	goto init_ctrlq_exit;
447 
448 init_ctrlq_free_rings:
449 	ICE_FREE_CQ_BUFS(hw, cq, rq);
450 	ice_free_cq_ring(hw, &cq->rq);
451 
452 init_ctrlq_exit:
453 	return ret_code;
454 }
455 
456 /**
457  * ice_shutdown_sq - shutdown the Control ATQ
458  * @hw: pointer to the hardware structure
459  * @cq: pointer to the specific Control queue
460  *
461  * The main shutdown routine for the Control Transmit Queue
462  */
463 static enum ice_status
464 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
465 {
466 	enum ice_status ret_code = ICE_SUCCESS;
467 
468 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
469 
470 	ice_acquire_lock(&cq->sq_lock);
471 
472 	if (!cq->sq.count) {
473 		ret_code = ICE_ERR_NOT_READY;
474 		goto shutdown_sq_out;
475 	}
476 
477 	/* Stop firmware AdminQ processing */
478 	wr32(hw, cq->sq.head, 0);
479 	wr32(hw, cq->sq.tail, 0);
480 	wr32(hw, cq->sq.len, 0);
481 	wr32(hw, cq->sq.bal, 0);
482 	wr32(hw, cq->sq.bah, 0);
483 
484 	cq->sq.count = 0;	/* to indicate uninitialized queue */
485 
486 	/* free ring buffers and the ring itself */
487 	ICE_FREE_CQ_BUFS(hw, cq, sq);
488 	ice_free_cq_ring(hw, &cq->sq);
489 
490 shutdown_sq_out:
491 	ice_release_lock(&cq->sq_lock);
492 	return ret_code;
493 }
494 
495 /**
496  * ice_aq_ver_check - Check the reported AQ API version.
497  * @hw: pointer to the hardware structure
498  *
499  * Checks if the driver should load on a given AQ API version.
500  *
501  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
502  */
503 static bool ice_aq_ver_check(struct ice_hw *hw)
504 {
505 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
506 		/* Major API version is newer than expected, don't load */
507 		ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
508 		return false;
509 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
510 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
511 			ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
512 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
513 			ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
514 	} else {
515 		/* Major API version is older than expected, log a warning */
516 		ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
517 	}
518 	return true;
519 }
520 
521 /**
522  * ice_shutdown_rq - shutdown Control ARQ
523  * @hw: pointer to the hardware structure
524  * @cq: pointer to the specific Control queue
525  *
526  * The main shutdown routine for the Control Receive Queue
527  */
528 static enum ice_status
529 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
530 {
531 	enum ice_status ret_code = ICE_SUCCESS;
532 
533 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
534 
535 	ice_acquire_lock(&cq->rq_lock);
536 
537 	if (!cq->rq.count) {
538 		ret_code = ICE_ERR_NOT_READY;
539 		goto shutdown_rq_out;
540 	}
541 
542 	/* Stop Control Queue processing */
543 	wr32(hw, cq->rq.head, 0);
544 	wr32(hw, cq->rq.tail, 0);
545 	wr32(hw, cq->rq.len, 0);
546 	wr32(hw, cq->rq.bal, 0);
547 	wr32(hw, cq->rq.bah, 0);
548 
549 	/* set rq.count to 0 to indicate uninitialized queue */
550 	cq->rq.count = 0;
551 
552 	/* free ring buffers and the ring itself */
553 	ICE_FREE_CQ_BUFS(hw, cq, rq);
554 	ice_free_cq_ring(hw, &cq->rq);
555 
556 shutdown_rq_out:
557 	ice_release_lock(&cq->rq_lock);
558 	return ret_code;
559 }
560 
561 /**
562  * ice_idle_aq - stop ARQ/ATQ processing momentarily
563  * @hw: pointer to the hardware structure
564  * @cq: pointer to the specific Control queue
565  */
566 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
567 {
568 	wr32(hw, cq->sq.len, 0);
569 	wr32(hw, cq->rq.len, 0);
570 
571 	ice_msec_delay(2, false);
572 }
573 
574 /**
575  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
576  * @hw: pointer to the hardware structure
577  */
578 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
579 {
580 	struct ice_ctl_q_info *cq = &hw->adminq;
581 	enum ice_status status;
582 
583 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
584 
585 	status = ice_aq_get_fw_ver(hw, NULL);
586 	if (status)
587 		goto init_ctrlq_free_rq;
588 
589 	if (!ice_aq_ver_check(hw)) {
590 		status = ICE_ERR_FW_API_VER;
591 		goto init_ctrlq_free_rq;
592 	}
593 
594 	return ICE_SUCCESS;
595 
596 init_ctrlq_free_rq:
597 	ice_shutdown_rq(hw, cq);
598 	ice_shutdown_sq(hw, cq);
599 	return status;
600 }
601 
602 /**
603  * ice_init_ctrlq - main initialization routine for any control Queue
604  * @hw: pointer to the hardware structure
605  * @q_type: specific Control queue type
606  *
607  * Prior to calling this function, the driver *MUST* set the following fields
608  * in the cq->structure:
609  *     - cq->num_sq_entries
610  *     - cq->num_rq_entries
611  *     - cq->rq_buf_size
612  *     - cq->sq_buf_size
613  *
614  * NOTE: this function does not initialize the controlq locks
615  */
616 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
617 {
618 	struct ice_ctl_q_info *cq;
619 	enum ice_status ret_code;
620 
621 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
622 
623 	switch (q_type) {
624 	case ICE_CTL_Q_ADMIN:
625 		ice_adminq_init_regs(hw);
626 		cq = &hw->adminq;
627 		break;
628 	case ICE_CTL_Q_MAILBOX:
629 		ice_mailbox_init_regs(hw);
630 		cq = &hw->mailboxq;
631 		break;
632 	default:
633 		return ICE_ERR_PARAM;
634 	}
635 	cq->qtype = q_type;
636 
637 	/* verify input for valid configuration */
638 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
639 	    !cq->rq_buf_size || !cq->sq_buf_size) {
640 		return ICE_ERR_CFG;
641 	}
642 
643 	/* setup SQ command write back timeout */
644 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
645 
646 	/* allocate the ATQ */
647 	ret_code = ice_init_sq(hw, cq);
648 	if (ret_code)
649 		return ret_code;
650 
651 	/* allocate the ARQ */
652 	ret_code = ice_init_rq(hw, cq);
653 	if (ret_code)
654 		goto init_ctrlq_free_sq;
655 
656 	/* success! */
657 	return ICE_SUCCESS;
658 
659 init_ctrlq_free_sq:
660 	ice_shutdown_sq(hw, cq);
661 	return ret_code;
662 }
663 
664 /**
665  * ice_shutdown_ctrlq - shutdown routine for any control queue
666  * @hw: pointer to the hardware structure
667  * @q_type: specific Control queue type
668  *
669  * NOTE: this function does not destroy the control queue locks.
670  */
671 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
672 {
673 	struct ice_ctl_q_info *cq;
674 
675 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
676 
677 	switch (q_type) {
678 	case ICE_CTL_Q_ADMIN:
679 		cq = &hw->adminq;
680 		if (ice_check_sq_alive(hw, cq))
681 			ice_aq_q_shutdown(hw, true);
682 		break;
683 	case ICE_CTL_Q_MAILBOX:
684 		cq = &hw->mailboxq;
685 		break;
686 	default:
687 		return;
688 	}
689 
690 	ice_shutdown_sq(hw, cq);
691 	ice_shutdown_rq(hw, cq);
692 }
693 
694 /**
695  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
696  * @hw: pointer to the hardware structure
697  *
698  * NOTE: this function does not destroy the control queue locks. The driver
699  * may call this at runtime to shutdown and later restart control queues, such
700  * as in response to a reset event.
701  */
702 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
703 {
704 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
705 	/* Shutdown FW admin queue */
706 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
707 	/* Shutdown PF-VF Mailbox */
708 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
709 }
710 
711 /**
712  * ice_init_all_ctrlq - main initialization routine for all control queues
713  * @hw: pointer to the hardware structure
714  *
715  * Prior to calling this function, the driver MUST* set the following fields
716  * in the cq->structure for all control queues:
717  *     - cq->num_sq_entries
718  *     - cq->num_rq_entries
719  *     - cq->rq_buf_size
720  *     - cq->sq_buf_size
721  *
722  * NOTE: this function does not initialize the controlq locks.
723  */
724 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
725 {
726 	enum ice_status status;
727 	u32 retry = 0;
728 
729 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
730 
731 	/* Init FW admin queue */
732 	do {
733 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
734 		if (status)
735 			return status;
736 
737 		status = ice_init_check_adminq(hw);
738 		if (status != ICE_ERR_AQ_FW_CRITICAL)
739 			break;
740 
741 		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
742 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
743 		ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
744 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
745 
746 	if (status)
747 		return status;
748 	/* Init Mailbox queue */
749 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
750 }
751 
752 /**
753  * ice_init_ctrlq_locks - Initialize locks for a control queue
754  * @cq: pointer to the control queue
755  *
756  * Initializes the send and receive queue locks for a given control queue.
757  */
758 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
759 {
760 	ice_init_lock(&cq->sq_lock);
761 	ice_init_lock(&cq->rq_lock);
762 }
763 
764 /**
765  * ice_create_all_ctrlq - main initialization routine for all control queues
766  * @hw: pointer to the hardware structure
767  *
768  * Prior to calling this function, the driver *MUST* set the following fields
769  * in the cq->structure for all control queues:
770  *     - cq->num_sq_entries
771  *     - cq->num_rq_entries
772  *     - cq->rq_buf_size
773  *     - cq->sq_buf_size
774  *
775  * This function creates all the control queue locks and then calls
776  * ice_init_all_ctrlq. It should be called once during driver load. If the
777  * driver needs to re-initialize control queues at run time it should call
778  * ice_init_all_ctrlq instead.
779  */
780 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
781 {
782 	ice_init_ctrlq_locks(&hw->adminq);
783 	ice_init_ctrlq_locks(&hw->mailboxq);
784 
785 	return ice_init_all_ctrlq(hw);
786 }
787 
788 /**
789  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
790  * @cq: pointer to the control queue
791  *
792  * Destroys the send and receive queue locks for a given control queue.
793  */
794 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
795 {
796 	ice_destroy_lock(&cq->sq_lock);
797 	ice_destroy_lock(&cq->rq_lock);
798 }
799 
800 /**
801  * ice_destroy_all_ctrlq - exit routine for all control queues
802  * @hw: pointer to the hardware structure
803  *
804  * This function shuts down all the control queues and then destroys the
805  * control queue locks. It should be called once during driver unload. The
806  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
807  * reinitialize control queues, such as in response to a reset event.
808  */
809 void ice_destroy_all_ctrlq(struct ice_hw *hw)
810 {
811 	/* shut down all the control queues first */
812 	ice_shutdown_all_ctrlq(hw);
813 
814 	ice_destroy_ctrlq_locks(&hw->adminq);
815 	ice_destroy_ctrlq_locks(&hw->mailboxq);
816 }
817 
818 /**
819  * ice_clean_sq - cleans Admin send queue (ATQ)
820  * @hw: pointer to the hardware structure
821  * @cq: pointer to the specific Control queue
822  *
823  * returns the number of free desc
824  */
825 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
826 {
827 	struct ice_ctl_q_ring *sq = &cq->sq;
828 	u16 ntc = sq->next_to_clean;
829 	struct ice_sq_cd *details;
830 	struct ice_aq_desc *desc;
831 
832 	desc = ICE_CTL_Q_DESC(*sq, ntc);
833 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
834 
835 	while (rd32(hw, cq->sq.head) != ntc) {
836 		ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
837 		ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
838 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
839 		ntc++;
840 		if (ntc == sq->count)
841 			ntc = 0;
842 		desc = ICE_CTL_Q_DESC(*sq, ntc);
843 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
844 	}
845 
846 	sq->next_to_clean = ntc;
847 
848 	return ICE_CTL_Q_DESC_UNUSED(sq);
849 }
850 
851 /**
852  * ice_debug_cq
853  * @hw: pointer to the hardware structure
854  * @desc: pointer to control queue descriptor
855  * @buf: pointer to command buffer
856  * @buf_len: max length of buf
857  *
858  * Dumps debug log about control command with descriptor contents.
859  */
860 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
861 {
862 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
863 	u16 datalen, flags;
864 
865 	if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
866 		return;
867 
868 	if (!desc)
869 		return;
870 
871 	datalen = LE16_TO_CPU(cq_desc->datalen);
872 	flags = LE16_TO_CPU(cq_desc->flags);
873 
874 	ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
875 		  LE16_TO_CPU(cq_desc->opcode), flags, datalen,
876 		  LE16_TO_CPU(cq_desc->retval));
877 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
878 		  LE32_TO_CPU(cq_desc->cookie_high),
879 		  LE32_TO_CPU(cq_desc->cookie_low));
880 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
881 		  LE32_TO_CPU(cq_desc->params.generic.param0),
882 		  LE32_TO_CPU(cq_desc->params.generic.param1));
883 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
884 		  LE32_TO_CPU(cq_desc->params.generic.addr_high),
885 		  LE32_TO_CPU(cq_desc->params.generic.addr_low));
886 	/* Dump buffer iff 1) one exists and 2) is either a response indicated
887 	 * by the DD and/or CMP flag set or a command with the RD flag set.
888 	 */
889 	if (buf && cq_desc->datalen != 0 &&
890 	    (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
891 	     flags & ICE_AQ_FLAG_RD)) {
892 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
893 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
894 				MIN_T(u16, buf_len, datalen));
895 	}
896 }
897 
898 /**
899  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
900  * @hw: pointer to the HW struct
901  * @cq: pointer to the specific Control queue
902  *
903  * Returns true if the firmware has processed all descriptors on the
904  * admin send queue. Returns false if there are still requests pending.
905  */
906 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
907 {
908 	/* AQ designers suggest use of head for better
909 	 * timing reliability than DD bit
910 	 */
911 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
912 }
913 
914 /**
915  * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
916  * @hw: pointer to the HW struct
917  * @cq: pointer to the specific Control queue
918  * @desc: prefilled descriptor describing the command (non DMA mem)
919  * @buf: buffer to use for indirect commands (or NULL for direct commands)
920  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
921  * @cd: pointer to command details structure
922  *
923  * This is the main send command routine for the ATQ. It runs the queue,
924  * cleans the queue, etc.
925  */
926 static enum ice_status
927 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
928 		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
929 		       struct ice_sq_cd *cd)
930 {
931 	struct ice_dma_mem *dma_buf = NULL;
932 	struct ice_aq_desc *desc_on_ring;
933 	bool cmd_completed = false;
934 	enum ice_status status = ICE_SUCCESS;
935 	struct ice_sq_cd *details;
936 	u32 total_delay = 0;
937 	u16 retval = 0;
938 	u32 val = 0;
939 
940 	/* if reset is in progress return a soft error */
941 	if (hw->reset_ongoing)
942 		return ICE_ERR_RESET_ONGOING;
943 
944 	cq->sq_last_status = ICE_AQ_RC_OK;
945 
946 	if (!cq->sq.count) {
947 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
948 		status = ICE_ERR_AQ_EMPTY;
949 		goto sq_send_command_error;
950 	}
951 
952 	if ((buf && !buf_size) || (!buf && buf_size)) {
953 		status = ICE_ERR_PARAM;
954 		goto sq_send_command_error;
955 	}
956 
957 	if (buf) {
958 		if (buf_size > cq->sq_buf_size) {
959 			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
960 				  buf_size);
961 			status = ICE_ERR_INVAL_SIZE;
962 			goto sq_send_command_error;
963 		}
964 
965 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
966 		if (buf_size > ICE_AQ_LG_BUF)
967 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
968 	}
969 
970 	val = rd32(hw, cq->sq.head);
971 	if (val >= cq->num_sq_entries) {
972 		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
973 			  val);
974 		status = ICE_ERR_AQ_EMPTY;
975 		goto sq_send_command_error;
976 	}
977 
978 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
979 	if (cd)
980 		*details = *cd;
981 	else
982 		ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
983 
984 	/* Call clean and check queue available function to reclaim the
985 	 * descriptors that were processed by FW/MBX; the function returns the
986 	 * number of desc available. The clean function called here could be
987 	 * called in a separate thread in case of asynchronous completions.
988 	 */
989 	if (ice_clean_sq(hw, cq) == 0) {
990 		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
991 		status = ICE_ERR_AQ_FULL;
992 		goto sq_send_command_error;
993 	}
994 
995 	/* initialize the temp desc pointer with the right desc */
996 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
997 
998 	/* if the desc is available copy the temp desc to the right place */
999 	ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
1000 		   ICE_NONDMA_TO_DMA);
1001 
1002 	/* if buf is not NULL assume indirect command */
1003 	if (buf) {
1004 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1005 		/* copy the user buf into the respective DMA buf */
1006 		ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
1007 		desc_on_ring->datalen = CPU_TO_LE16(buf_size);
1008 
1009 		/* Update the address values in the desc with the pa value
1010 		 * for respective buffer
1011 		 */
1012 		desc_on_ring->params.generic.addr_high =
1013 			CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1014 		desc_on_ring->params.generic.addr_low =
1015 			CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1016 	}
1017 
1018 	/* Debug desc and buffer */
1019 	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1020 
1021 	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1022 
1023 	(cq->sq.next_to_use)++;
1024 	if (cq->sq.next_to_use == cq->sq.count)
1025 		cq->sq.next_to_use = 0;
1026 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1027 
1028 	do {
1029 		if (ice_sq_done(hw, cq))
1030 			break;
1031 
1032 		ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
1033 		total_delay++;
1034 	} while (total_delay < cq->sq_cmd_timeout);
1035 
1036 	/* if ready, copy the desc back to temp */
1037 	if (ice_sq_done(hw, cq)) {
1038 		ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1039 			   ICE_DMA_TO_NONDMA);
1040 		if (buf) {
1041 			/* get returned length to copy */
1042 			u16 copy_size = LE16_TO_CPU(desc->datalen);
1043 
1044 			if (copy_size > buf_size) {
1045 				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1046 					  copy_size, buf_size);
1047 				status = ICE_ERR_AQ_ERROR;
1048 			} else {
1049 				ice_memcpy(buf, dma_buf->va, copy_size,
1050 					   ICE_DMA_TO_NONDMA);
1051 			}
1052 		}
1053 		retval = LE16_TO_CPU(desc->retval);
1054 		if (retval) {
1055 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1056 				  LE16_TO_CPU(desc->opcode),
1057 				  retval);
1058 
1059 			/* strip off FW internal code */
1060 			retval &= 0xff;
1061 		}
1062 		cmd_completed = true;
1063 		if (!status && retval != ICE_AQ_RC_OK)
1064 			status = ICE_ERR_AQ_ERROR;
1065 		cq->sq_last_status = (enum ice_aq_err)retval;
1066 	}
1067 
1068 	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1069 
1070 	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1071 
1072 	/* save writeback AQ if requested */
1073 	if (details->wb_desc)
1074 		ice_memcpy(details->wb_desc, desc_on_ring,
1075 			   sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1076 
1077 	/* update the error if time out occurred */
1078 	if (!cmd_completed) {
1079 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1080 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1081 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1082 			status = ICE_ERR_AQ_FW_CRITICAL;
1083 		} else {
1084 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1085 			status = ICE_ERR_AQ_TIMEOUT;
1086 		}
1087 	}
1088 
1089 sq_send_command_error:
1090 	return status;
1091 }
1092 
1093 /**
1094  * ice_sq_send_cmd - send command to Control Queue (ATQ)
1095  * @hw: pointer to the HW struct
1096  * @cq: pointer to the specific Control queue
1097  * @desc: prefilled descriptor describing the command
1098  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1099  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1100  * @cd: pointer to command details structure
1101  *
1102  * This is the main send command routine for the ATQ. It runs the queue,
1103  * cleans the queue, etc.
1104  */
1105 enum ice_status
1106 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1107 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
1108 		struct ice_sq_cd *cd)
1109 {
1110 	enum ice_status status = ICE_SUCCESS;
1111 
1112 	/* if reset is in progress return a soft error */
1113 	if (hw->reset_ongoing)
1114 		return ICE_ERR_RESET_ONGOING;
1115 
1116 	ice_acquire_lock(&cq->sq_lock);
1117 	status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1118 	ice_release_lock(&cq->sq_lock);
1119 
1120 	return status;
1121 }
1122 
1123 /**
1124  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1125  * @desc: pointer to the temp descriptor (non DMA mem)
1126  * @opcode: the opcode can be used to decide which flags to turn off or on
1127  *
1128  * Fill the desc with default values
1129  */
1130 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1131 {
1132 	/* zero out the desc */
1133 	ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1134 	desc->opcode = CPU_TO_LE16(opcode);
1135 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1136 }
1137 
1138 /**
1139  * ice_clean_rq_elem
1140  * @hw: pointer to the HW struct
1141  * @cq: pointer to the specific Control queue
1142  * @e: event info from the receive descriptor, includes any buffers
1143  * @pending: number of events that could be left to process
1144  *
1145  * This function cleans one Admin Receive Queue element and returns
1146  * the contents through e. It can also return how many events are
1147  * left to process through 'pending'.
1148  */
1149 enum ice_status
1150 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1151 		  struct ice_rq_event_info *e, u16 *pending)
1152 {
1153 	u16 ntc = cq->rq.next_to_clean;
1154 	enum ice_aq_err rq_last_status;
1155 	enum ice_status ret_code = ICE_SUCCESS;
1156 	struct ice_aq_desc *desc;
1157 	struct ice_dma_mem *bi;
1158 	u16 desc_idx;
1159 	u16 datalen;
1160 	u16 flags;
1161 	u16 ntu;
1162 
1163 	/* pre-clean the event info */
1164 	ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1165 
1166 	/* take the lock before we start messing with the ring */
1167 	ice_acquire_lock(&cq->rq_lock);
1168 
1169 	if (!cq->rq.count) {
1170 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1171 		ret_code = ICE_ERR_AQ_EMPTY;
1172 		goto clean_rq_elem_err;
1173 	}
1174 
1175 	/* set next_to_use to head */
1176 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1177 
1178 	if (ntu == ntc) {
1179 		/* nothing to do - shouldn't need to update ring's values */
1180 		ret_code = ICE_ERR_AQ_NO_WORK;
1181 		goto clean_rq_elem_out;
1182 	}
1183 
1184 	/* now clean the next descriptor */
1185 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1186 	desc_idx = ntc;
1187 
1188 	rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1189 	flags = LE16_TO_CPU(desc->flags);
1190 	if (flags & ICE_AQ_FLAG_ERR) {
1191 		ret_code = ICE_ERR_AQ_ERROR;
1192 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1193 			  LE16_TO_CPU(desc->opcode), rq_last_status);
1194 	}
1195 	ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1196 	datalen = LE16_TO_CPU(desc->datalen);
1197 	e->msg_len = MIN_T(u16, datalen, e->buf_len);
1198 	if (e->msg_buf && e->msg_len)
1199 		ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1200 			   e->msg_len, ICE_DMA_TO_NONDMA);
1201 
1202 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1203 
1204 	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1205 
1206 	/* Restore the original datalen and buffer address in the desc,
1207 	 * FW updates datalen to indicate the event message size
1208 	 */
1209 	bi = &cq->rq.r.rq_bi[ntc];
1210 	ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1211 
1212 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1213 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1214 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1215 	desc->datalen = CPU_TO_LE16(bi->size);
1216 	desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1217 	desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1218 
1219 	/* set tail = the last cleaned desc index. */
1220 	wr32(hw, cq->rq.tail, ntc);
1221 	/* ntc is updated to tail + 1 */
1222 	ntc++;
1223 	if (ntc == cq->num_rq_entries)
1224 		ntc = 0;
1225 	cq->rq.next_to_clean = ntc;
1226 	cq->rq.next_to_use = ntu;
1227 
1228 clean_rq_elem_out:
1229 	/* Set pending if needed, unlock and return */
1230 	if (pending) {
1231 		/* re-read HW head to calculate actual pending messages */
1232 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1233 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1234 	}
1235 clean_rq_elem_err:
1236 	ice_release_lock(&cq->rq_lock);
1237 
1238 	return ret_code;
1239 }
1240