xref: /freebsd/sys/dev/ice/ice_controlq.c (revision f377a0c7dfa97035844e58c2aec810001bebce17)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2024, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "ice_common.h"
33 
34 #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
35 do {								\
36 	(qinfo)->sq.head = prefix##_ATQH;			\
37 	(qinfo)->sq.tail = prefix##_ATQT;			\
38 	(qinfo)->sq.len = prefix##_ATQLEN;			\
39 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
40 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
41 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
42 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
43 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
44 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
45 	(qinfo)->rq.head = prefix##_ARQH;			\
46 	(qinfo)->rq.tail = prefix##_ARQT;			\
47 	(qinfo)->rq.len = prefix##_ARQLEN;			\
48 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
49 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
50 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
51 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
52 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
53 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
54 } while (0)
55 
56 /**
57  * ice_adminq_init_regs - Initialize AdminQ registers
58  * @hw: pointer to the hardware structure
59  *
60  * This assumes the alloc_sq and alloc_rq functions have already been called
61  */
62 static void ice_adminq_init_regs(struct ice_hw *hw)
63 {
64 	struct ice_ctl_q_info *cq = &hw->adminq;
65 
66 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
67 
68 	ICE_CQ_INIT_REGS(cq, PF_FW);
69 }
70 
71 /**
72  * ice_mailbox_init_regs - Initialize Mailbox registers
73  * @hw: pointer to the hardware structure
74  *
75  * This assumes the alloc_sq and alloc_rq functions have already been called
76  */
77 static void ice_mailbox_init_regs(struct ice_hw *hw)
78 {
79 	struct ice_ctl_q_info *cq = &hw->mailboxq;
80 
81 	ICE_CQ_INIT_REGS(cq, PF_MBX);
82 }
83 
84 /**
85  * ice_sb_init_regs - Initialize Sideband registers
86  * @hw: pointer to the hardware structure
87  *
88  * This assumes the alloc_sq and alloc_rq functions have already been called
89  */
90 static void ice_sb_init_regs(struct ice_hw *hw)
91 {
92 	struct ice_ctl_q_info *cq = &hw->sbq;
93 
94 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
95 
96 	ICE_CQ_INIT_REGS(cq, PF_SB);
97 }
98 
99 /**
100  * ice_check_sq_alive
101  * @hw: pointer to the HW struct
102  * @cq: pointer to the specific Control queue
103  *
104  * Returns true if Queue is enabled else false.
105  */
106 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
107 {
108 	/* check both queue-length and queue-enable fields */
109 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
110 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
111 						cq->sq.len_ena_mask)) ==
112 			(cq->num_sq_entries | cq->sq.len_ena_mask);
113 
114 	return false;
115 }
116 
117 /**
118  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
119  * @hw: pointer to the hardware structure
120  * @cq: pointer to the specific Control queue
121  */
122 static int
123 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
124 {
125 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
126 
127 	cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
128 	if (!cq->sq.desc_buf.va)
129 		return ICE_ERR_NO_MEMORY;
130 
131 	return 0;
132 }
133 
134 /**
135  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
136  * @hw: pointer to the hardware structure
137  * @cq: pointer to the specific Control queue
138  */
139 static int
140 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
141 {
142 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
143 
144 	cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
145 	if (!cq->rq.desc_buf.va)
146 		return ICE_ERR_NO_MEMORY;
147 	return 0;
148 }
149 
150 /**
151  * ice_free_cq_ring - Free control queue ring
152  * @hw: pointer to the hardware structure
153  * @ring: pointer to the specific control queue ring
154  *
155  * This assumes the posted buffers have already been cleaned
156  * and de-allocated
157  */
158 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
159 {
160 	ice_free_dma_mem(hw, &ring->desc_buf);
161 }
162 
163 /**
164  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
165  * @hw: pointer to the hardware structure
166  * @cq: pointer to the specific Control queue
167  */
168 static int
169 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
170 {
171 	int i;
172 
173 	/* We'll be allocating the buffer info memory first, then we can
174 	 * allocate the mapped buffers for the event processing
175 	 */
176 	cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
177 				     sizeof(cq->rq.desc_buf));
178 	if (!cq->rq.dma_head)
179 		return ICE_ERR_NO_MEMORY;
180 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
181 
182 	/* allocate the mapped buffers */
183 	for (i = 0; i < cq->num_rq_entries; i++) {
184 		struct ice_aq_desc *desc;
185 		struct ice_dma_mem *bi;
186 
187 		bi = &cq->rq.r.rq_bi[i];
188 		bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
189 		if (!bi->va)
190 			goto unwind_alloc_rq_bufs;
191 
192 		/* now configure the descriptors for use */
193 		desc = ICE_CTL_Q_DESC(cq->rq, i);
194 
195 		desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
196 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
197 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
198 		desc->opcode = 0;
199 		/* This is in accordance with control queue design, there is no
200 		 * register for buffer size configuration
201 		 */
202 		desc->datalen = CPU_TO_LE16(bi->size);
203 		desc->retval = 0;
204 		desc->cookie_high = 0;
205 		desc->cookie_low = 0;
206 		desc->params.generic.addr_high =
207 			CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
208 		desc->params.generic.addr_low =
209 			CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
210 		desc->params.generic.param0 = 0;
211 		desc->params.generic.param1 = 0;
212 	}
213 	return 0;
214 
215 unwind_alloc_rq_bufs:
216 	/* don't try to free the one that failed... */
217 	i--;
218 	for (; i >= 0; i--)
219 		ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
220 	cq->rq.r.rq_bi = NULL;
221 	ice_free(hw, cq->rq.dma_head);
222 	cq->rq.dma_head = NULL;
223 
224 	return ICE_ERR_NO_MEMORY;
225 }
226 
227 /**
228  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
229  * @hw: pointer to the hardware structure
230  * @cq: pointer to the specific Control queue
231  */
232 static int
233 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
234 {
235 	int i;
236 
237 	/* No mapped memory needed yet, just the buffer info structures */
238 	cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
239 				     sizeof(cq->sq.desc_buf));
240 	if (!cq->sq.dma_head)
241 		return ICE_ERR_NO_MEMORY;
242 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
243 
244 	/* allocate the mapped buffers */
245 	for (i = 0; i < cq->num_sq_entries; i++) {
246 		struct ice_dma_mem *bi;
247 
248 		bi = &cq->sq.r.sq_bi[i];
249 		bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
250 		if (!bi->va)
251 			goto unwind_alloc_sq_bufs;
252 	}
253 	return 0;
254 
255 unwind_alloc_sq_bufs:
256 	/* don't try to free the one that failed... */
257 	i--;
258 	for (; i >= 0; i--)
259 		ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
260 	cq->sq.r.sq_bi = NULL;
261 	ice_free(hw, cq->sq.dma_head);
262 	cq->sq.dma_head = NULL;
263 
264 	return ICE_ERR_NO_MEMORY;
265 }
266 
267 static int
268 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
269 {
270 	/* Clear Head and Tail */
271 	wr32(hw, ring->head, 0);
272 	wr32(hw, ring->tail, 0);
273 
274 	/* set starting point */
275 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
276 	wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
277 	wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
278 
279 	/* Check one register to verify that config was applied */
280 	if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
281 		return ICE_ERR_AQ_ERROR;
282 
283 	return 0;
284 }
285 
286 /**
287  * ice_cfg_sq_regs - configure Control ATQ registers
288  * @hw: pointer to the hardware structure
289  * @cq: pointer to the specific Control queue
290  *
291  * Configure base address and length registers for the transmit queue
292  */
293 static int
294 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
295 {
296 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
297 }
298 
299 /**
300  * ice_cfg_rq_regs - configure Control ARQ register
301  * @hw: pointer to the hardware structure
302  * @cq: pointer to the specific Control queue
303  *
304  * Configure base address and length registers for the receive (event queue)
305  */
306 static int
307 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
308 {
309 	int status;
310 
311 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
312 	if (status)
313 		return status;
314 
315 	/* Update tail in the HW to post pre-allocated buffers */
316 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
317 
318 	return 0;
319 }
320 
321 #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
322 do {									\
323 	/* free descriptors */						\
324 	if ((qi)->ring.r.ring##_bi) {					\
325 		int i;							\
326 									\
327 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
328 			if ((qi)->ring.r.ring##_bi[i].pa)		\
329 				ice_free_dma_mem((hw),			\
330 					&(qi)->ring.r.ring##_bi[i]);	\
331 	}								\
332 	/* free DMA head */						\
333 	ice_free(hw, (qi)->ring.dma_head);				\
334 } while (0)
335 
336 /**
337  * ice_init_sq - main initialization routine for Control ATQ
338  * @hw: pointer to the hardware structure
339  * @cq: pointer to the specific Control queue
340  *
341  * This is the main initialization routine for the Control Send Queue
342  * Prior to calling this function, the driver *MUST* set the following fields
343  * in the cq->structure:
344  *     - cq->num_sq_entries
345  *     - cq->sq_buf_size
346  *
347  * Do *NOT* hold the lock when calling this as the memory allocation routines
348  * called are not going to be atomic context safe
349  */
350 static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
351 {
352 	int ret_code;
353 
354 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
355 
356 	if (cq->sq.count > 0) {
357 		/* queue already initialized */
358 		ret_code = ICE_ERR_NOT_READY;
359 		goto init_ctrlq_exit;
360 	}
361 
362 	/* verify input for valid configuration */
363 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
364 		ret_code = ICE_ERR_CFG;
365 		goto init_ctrlq_exit;
366 	}
367 
368 	cq->sq.next_to_use = 0;
369 	cq->sq.next_to_clean = 0;
370 
371 	/* allocate the ring memory */
372 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
373 	if (ret_code)
374 		goto init_ctrlq_exit;
375 
376 	/* allocate buffers in the rings */
377 	ret_code = ice_alloc_sq_bufs(hw, cq);
378 	if (ret_code)
379 		goto init_ctrlq_free_rings;
380 
381 	/* initialize base registers */
382 	ret_code = ice_cfg_sq_regs(hw, cq);
383 	if (ret_code)
384 		goto init_ctrlq_free_rings;
385 
386 	/* success! */
387 	cq->sq.count = cq->num_sq_entries;
388 	goto init_ctrlq_exit;
389 
390 init_ctrlq_free_rings:
391 	ICE_FREE_CQ_BUFS(hw, cq, sq);
392 	ice_free_cq_ring(hw, &cq->sq);
393 
394 init_ctrlq_exit:
395 	return ret_code;
396 }
397 
398 /**
399  * ice_init_rq - initialize receive side of a control queue
400  * @hw: pointer to the hardware structure
401  * @cq: pointer to the specific Control queue
402  *
403  * The main initialization routine for Receive side of a control queue.
404  * Prior to calling this function, the driver *MUST* set the following fields
405  * in the cq->structure:
406  *     - cq->num_rq_entries
407  *     - cq->rq_buf_size
408  *
409  * Do *NOT* hold the lock when calling this as the memory allocation routines
410  * called are not going to be atomic context safe
411  */
412 static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
413 {
414 	int ret_code;
415 
416 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
417 
418 	if (cq->rq.count > 0) {
419 		/* queue already initialized */
420 		ret_code = ICE_ERR_NOT_READY;
421 		goto init_ctrlq_exit;
422 	}
423 
424 	/* verify input for valid configuration */
425 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
426 		ret_code = ICE_ERR_CFG;
427 		goto init_ctrlq_exit;
428 	}
429 
430 	cq->rq.next_to_use = 0;
431 	cq->rq.next_to_clean = 0;
432 
433 	/* allocate the ring memory */
434 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
435 	if (ret_code)
436 		goto init_ctrlq_exit;
437 
438 	/* allocate buffers in the rings */
439 	ret_code = ice_alloc_rq_bufs(hw, cq);
440 	if (ret_code)
441 		goto init_ctrlq_free_rings;
442 
443 	/* initialize base registers */
444 	ret_code = ice_cfg_rq_regs(hw, cq);
445 	if (ret_code)
446 		goto init_ctrlq_free_rings;
447 
448 	/* success! */
449 	cq->rq.count = cq->num_rq_entries;
450 	goto init_ctrlq_exit;
451 
452 init_ctrlq_free_rings:
453 	ICE_FREE_CQ_BUFS(hw, cq, rq);
454 	ice_free_cq_ring(hw, &cq->rq);
455 
456 init_ctrlq_exit:
457 	return ret_code;
458 }
459 
460 /**
461  * ice_shutdown_sq - shutdown the transmit side of a control queue
462  * @hw: pointer to the hardware structure
463  * @cq: pointer to the specific Control queue
464  *
465  * The main shutdown routine for the Control Transmit Queue
466  */
467 static int
468 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
469 {
470 	int ret_code = 0;
471 
472 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
473 
474 	ice_acquire_lock(&cq->sq_lock);
475 
476 	if (!cq->sq.count) {
477 		ret_code = ICE_ERR_NOT_READY;
478 		goto shutdown_sq_out;
479 	}
480 
481 	/* Stop processing of the control queue */
482 	wr32(hw, cq->sq.head, 0);
483 	wr32(hw, cq->sq.tail, 0);
484 	wr32(hw, cq->sq.len, 0);
485 	wr32(hw, cq->sq.bal, 0);
486 	wr32(hw, cq->sq.bah, 0);
487 
488 	cq->sq.count = 0;	/* to indicate uninitialized queue */
489 
490 	/* free ring buffers and the ring itself */
491 	ICE_FREE_CQ_BUFS(hw, cq, sq);
492 	ice_free_cq_ring(hw, &cq->sq);
493 
494 shutdown_sq_out:
495 	ice_release_lock(&cq->sq_lock);
496 	return ret_code;
497 }
498 
499 /**
500  * ice_aq_ver_check - Check the reported AQ API version
501  * @hw: pointer to the hardware structure
502  *
503  * Checks if the driver should load on a given AQ API version.
504  *
505  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
506  */
507 static bool ice_aq_ver_check(struct ice_hw *hw)
508 {
509 	u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
510 	u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
511 
512 	if (hw->api_maj_ver > exp_fw_api_ver_major) {
513 		/* Major API version is newer than expected, don't load */
514 		ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
515 		return false;
516 	} else if (hw->api_maj_ver == exp_fw_api_ver_major) {
517 		if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
518 			ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
519 				 hw->api_maj_ver, hw->api_min_ver,
520 				 exp_fw_api_ver_major, exp_fw_api_ver_minor);
521 		else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
522 			ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
523 				 hw->api_maj_ver, hw->api_min_ver,
524 				 exp_fw_api_ver_major, exp_fw_api_ver_minor);
525 	} else {
526 		/* Major API version is older than expected, log a warning */
527 		ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
528 			 hw->api_maj_ver, hw->api_min_ver,
529 			 exp_fw_api_ver_major, exp_fw_api_ver_minor);
530 	}
531 	return true;
532 }
533 
534 /**
535  * ice_shutdown_rq - shutdown Control ARQ
536  * @hw: pointer to the hardware structure
537  * @cq: pointer to the specific Control queue
538  *
539  * The main shutdown routine for the Control Receive Queue
540  */
541 static int
542 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
543 {
544 	int ret_code = 0;
545 
546 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
547 
548 	ice_acquire_lock(&cq->rq_lock);
549 
550 	if (!cq->rq.count) {
551 		ret_code = ICE_ERR_NOT_READY;
552 		goto shutdown_rq_out;
553 	}
554 
555 	/* Stop Control Queue processing */
556 	wr32(hw, cq->rq.head, 0);
557 	wr32(hw, cq->rq.tail, 0);
558 	wr32(hw, cq->rq.len, 0);
559 	wr32(hw, cq->rq.bal, 0);
560 	wr32(hw, cq->rq.bah, 0);
561 
562 	/* set rq.count to 0 to indicate uninitialized queue */
563 	cq->rq.count = 0;
564 
565 	/* free ring buffers and the ring itself */
566 	ICE_FREE_CQ_BUFS(hw, cq, rq);
567 	ice_free_cq_ring(hw, &cq->rq);
568 
569 shutdown_rq_out:
570 	ice_release_lock(&cq->rq_lock);
571 	return ret_code;
572 }
573 
574 /**
575  * ice_idle_aq - stop ARQ/ATQ processing momentarily
576  * @hw: pointer to the hardware structure
577  * @cq: pointer to the specific Control queue
578  */
579 void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
580 {
581 	wr32(hw, cq->sq.len, 0);
582 	wr32(hw, cq->rq.len, 0);
583 
584 	ice_msec_delay(2, false);
585 }
586 
587 /**
588  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
589  * @hw: pointer to the hardware structure
590  */
591 static int ice_init_check_adminq(struct ice_hw *hw)
592 {
593 	struct ice_ctl_q_info *cq = &hw->adminq;
594 	int status;
595 
596 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
597 
598 	status = ice_aq_get_fw_ver(hw, NULL);
599 	if (status)
600 		goto init_ctrlq_free_rq;
601 
602 	if (!ice_aq_ver_check(hw)) {
603 		status = ICE_ERR_FW_API_VER;
604 		goto init_ctrlq_free_rq;
605 	}
606 
607 	return 0;
608 
609 init_ctrlq_free_rq:
610 	ice_shutdown_rq(hw, cq);
611 	ice_shutdown_sq(hw, cq);
612 	return status;
613 }
614 
615 /**
616  * ice_init_ctrlq - main initialization routine for any control Queue
617  * @hw: pointer to the hardware structure
618  * @q_type: specific Control queue type
619  *
620  * Prior to calling this function, the driver *MUST* set the following fields
621  * in the cq->structure:
622  *     - cq->num_sq_entries
623  *     - cq->num_rq_entries
624  *     - cq->rq_buf_size
625  *     - cq->sq_buf_size
626  *
627  * NOTE: this function does not initialize the controlq locks
628  */
629 static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
630 {
631 	struct ice_ctl_q_info *cq;
632 	int ret_code;
633 
634 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
635 
636 	switch (q_type) {
637 	case ICE_CTL_Q_ADMIN:
638 		ice_adminq_init_regs(hw);
639 		cq = &hw->adminq;
640 		break;
641 	case ICE_CTL_Q_SB:
642 		ice_sb_init_regs(hw);
643 		cq = &hw->sbq;
644 		break;
645 	case ICE_CTL_Q_MAILBOX:
646 		ice_mailbox_init_regs(hw);
647 		cq = &hw->mailboxq;
648 		break;
649 	default:
650 		return ICE_ERR_PARAM;
651 	}
652 	cq->qtype = q_type;
653 
654 	/* verify input for valid configuration */
655 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
656 	    !cq->rq_buf_size || !cq->sq_buf_size) {
657 		return ICE_ERR_CFG;
658 	}
659 
660 	/* setup SQ command write back timeout */
661 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
662 
663 	/* allocate the ATQ */
664 	ret_code = ice_init_sq(hw, cq);
665 	if (ret_code)
666 		return ret_code;
667 
668 	/* allocate the ARQ */
669 	ret_code = ice_init_rq(hw, cq);
670 	if (ret_code)
671 		goto init_ctrlq_free_sq;
672 
673 	/* success! */
674 	return 0;
675 
676 init_ctrlq_free_sq:
677 	ice_shutdown_sq(hw, cq);
678 	return ret_code;
679 }
680 
681 /**
682  * ice_is_sbq_supported - is the sideband queue supported
683  * @hw: pointer to the hardware structure
684  *
685  * Returns true if the sideband control queue interface is
686  * supported for the device, false otherwise
687  */
688 static bool ice_is_sbq_supported(struct ice_hw *hw)
689 {
690 	return ice_is_generic_mac(hw);
691 }
692 
693 /**
694  * ice_shutdown_ctrlq - shutdown routine for any control queue
695  * @hw: pointer to the hardware structure
696  * @q_type: specific Control queue type
697  * @unloading: is the driver unloading itself
698  *
699  * NOTE: this function does not destroy the control queue locks.
700  */
701 static void
702 ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
703 		   bool unloading)
704 {
705 	struct ice_ctl_q_info *cq;
706 
707 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
708 
709 	switch (q_type) {
710 	case ICE_CTL_Q_ADMIN:
711 		cq = &hw->adminq;
712 		if (ice_check_sq_alive(hw, cq))
713 			ice_aq_q_shutdown(hw, unloading);
714 		break;
715 	case ICE_CTL_Q_SB:
716 		cq = &hw->sbq;
717 		break;
718 	case ICE_CTL_Q_MAILBOX:
719 		cq = &hw->mailboxq;
720 		break;
721 	default:
722 		return;
723 	}
724 
725 	ice_shutdown_sq(hw, cq);
726 	ice_shutdown_rq(hw, cq);
727 }
728 
729 /**
730  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
731  * @hw: pointer to the hardware structure
732  * @unloading: is the driver unloading itself
733  *
734  * NOTE: this function does not destroy the control queue locks. The driver
735  * may call this at runtime to shutdown and later restart control queues, such
736  * as in response to a reset event.
737  */
738 void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
739 {
740 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
741 	/* Shutdown FW admin queue */
742 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
743 	/* Shutdown PHY Sideband */
744 	if (ice_is_sbq_supported(hw))
745 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
746 	/* Shutdown PF-VF Mailbox */
747 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
748 }
749 
750 /**
751  * ice_init_all_ctrlq - main initialization routine for all control queues
752  * @hw: pointer to the hardware structure
753  *
754  * Prior to calling this function, the driver MUST* set the following fields
755  * in the cq->structure for all control queues:
756  *     - cq->num_sq_entries
757  *     - cq->num_rq_entries
758  *     - cq->rq_buf_size
759  *     - cq->sq_buf_size
760  *
761  * NOTE: this function does not initialize the controlq locks.
762  */
763 int ice_init_all_ctrlq(struct ice_hw *hw)
764 {
765 	u32 retry = 0;
766 	int status;
767 
768 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
769 
770 	/* Init FW admin queue */
771 	do {
772 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
773 		if (status)
774 			return status;
775 
776 		status = ice_init_check_adminq(hw);
777 		if (status != ICE_ERR_AQ_FW_CRITICAL)
778 			break;
779 
780 		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
781 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
782 		ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
783 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
784 
785 	if (status)
786 		return status;
787 	/* sideband control queue (SBQ) interface is not supported on some
788 	 * devices. Initialize if supported, else fallback to the admin queue
789 	 * interface
790 	 */
791 	if (ice_is_sbq_supported(hw)) {
792 		status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
793 		if (status)
794 			return status;
795 	}
796 	/* Init Mailbox queue */
797 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
798 }
799 
800 /**
801  * ice_init_ctrlq_locks - Initialize locks for a control queue
802  * @cq: pointer to the control queue
803  *
804  * Initializes the send and receive queue locks for a given control queue.
805  */
806 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
807 {
808 	ice_init_lock(&cq->sq_lock);
809 	ice_init_lock(&cq->rq_lock);
810 }
811 
812 /**
813  * ice_create_all_ctrlq - main initialization routine for all control queues
814  * @hw: pointer to the hardware structure
815  *
816  * Prior to calling this function, the driver *MUST* set the following fields
817  * in the cq->structure for all control queues:
818  *     - cq->num_sq_entries
819  *     - cq->num_rq_entries
820  *     - cq->rq_buf_size
821  *     - cq->sq_buf_size
822  *
823  * This function creates all the control queue locks and then calls
824  * ice_init_all_ctrlq. It should be called once during driver load. If the
825  * driver needs to re-initialize control queues at run time it should call
826  * ice_init_all_ctrlq instead.
827  */
828 int ice_create_all_ctrlq(struct ice_hw *hw)
829 {
830 	ice_init_ctrlq_locks(&hw->adminq);
831 	if (ice_is_sbq_supported(hw))
832 		ice_init_ctrlq_locks(&hw->sbq);
833 	ice_init_ctrlq_locks(&hw->mailboxq);
834 
835 	return ice_init_all_ctrlq(hw);
836 }
837 
838 /**
839  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
840  * @cq: pointer to the control queue
841  *
842  * Destroys the send and receive queue locks for a given control queue.
843  */
844 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
845 {
846 	ice_destroy_lock(&cq->sq_lock);
847 	ice_destroy_lock(&cq->rq_lock);
848 }
849 
850 /**
851  * ice_destroy_all_ctrlq - exit routine for all control queues
852  * @hw: pointer to the hardware structure
853  *
854  * This function shuts down all the control queues and then destroys the
855  * control queue locks. It should be called once during driver unload. The
856  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
857  * reinitialize control queues, such as in response to a reset event.
858  */
859 void ice_destroy_all_ctrlq(struct ice_hw *hw)
860 {
861 	/* shut down all the control queues first */
862 	ice_shutdown_all_ctrlq(hw, true);
863 
864 	ice_destroy_ctrlq_locks(&hw->adminq);
865 	if (ice_is_sbq_supported(hw))
866 		ice_destroy_ctrlq_locks(&hw->sbq);
867 	ice_destroy_ctrlq_locks(&hw->mailboxq);
868 }
869 
870 /**
871  * ice_clean_sq - cleans send side of a control queue
872  * @hw: pointer to the hardware structure
873  * @cq: pointer to the specific Control queue
874  *
875  * returns the number of free desc
876  */
877 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
878 {
879 	struct ice_ctl_q_ring *sq = &cq->sq;
880 	u16 ntc = sq->next_to_clean;
881 	struct ice_aq_desc *desc;
882 	u32 head;
883 
884 	desc = ICE_CTL_Q_DESC(*sq, ntc);
885 
886 	head = rd32(hw, sq->head);
887 	if (head >= sq->count) {
888 		ice_debug(hw, ICE_DBG_AQ_MSG,
889 			  "Read head value (%d) exceeds allowed range.\n",
890 			  head);
891 		return 0;
892 	}
893 
894 	while (head != ntc) {
895 		ice_debug(hw, ICE_DBG_AQ_MSG,
896 			  "ntc %d head %d.\n",
897 			  ntc, head);
898 		ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
899 		ntc++;
900 		if (ntc == sq->count)
901 			ntc = 0;
902 		desc = ICE_CTL_Q_DESC(*sq, ntc);
903 
904 		head = rd32(hw, sq->head);
905 		if (head >= sq->count) {
906 			ice_debug(hw, ICE_DBG_AQ_MSG,
907 				  "Read head value (%d) exceeds allowed range.\n",
908 				  head);
909 			return 0;
910 		}
911 	}
912 
913 	sq->next_to_clean = ntc;
914 
915 	return ICE_CTL_Q_DESC_UNUSED(sq);
916 }
917 
918 /**
919  * ice_ctl_q_str - Convert control queue type to string
920  * @qtype: the control queue type
921  *
922  * Returns: A string name for the given control queue type.
923  */
924 static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
925 {
926 	switch (qtype) {
927 	case ICE_CTL_Q_UNKNOWN:
928 		return "Unknown CQ";
929 	case ICE_CTL_Q_ADMIN:
930 		return "AQ";
931 	case ICE_CTL_Q_MAILBOX:
932 		return "MBXQ";
933 	case ICE_CTL_Q_SB:
934 		return "SBQ";
935 	default:
936 		return "Unrecognized CQ";
937 	}
938 }
939 
940 /**
941  * ice_debug_cq
942  * @hw: pointer to the hardware structure
943  * @cq: pointer to the specific Control queue
944  * @desc: pointer to control queue descriptor
945  * @buf: pointer to command buffer
946  * @buf_len: max length of buf
947  * @response: true if this is the writeback response
948  *
949  * Dumps debug log about control command with descriptor contents.
950  */
951 static void
952 ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
953 	     void *desc, void *buf, u16 buf_len, bool response)
954 {
955 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
956 	u16 datalen, flags;
957 
958 	if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
959 		return;
960 
961 	if (!desc)
962 		return;
963 
964 	datalen = LE16_TO_CPU(cq_desc->datalen);
965 	flags = LE16_TO_CPU(cq_desc->flags);
966 
967 	ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
968 		  ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
969 		  LE16_TO_CPU(cq_desc->opcode), flags, datalen,
970 		  LE16_TO_CPU(cq_desc->retval));
971 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
972 		  LE32_TO_CPU(cq_desc->cookie_high),
973 		  LE32_TO_CPU(cq_desc->cookie_low));
974 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
975 		  LE32_TO_CPU(cq_desc->params.generic.param0),
976 		  LE32_TO_CPU(cq_desc->params.generic.param1));
977 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
978 		  LE32_TO_CPU(cq_desc->params.generic.addr_high),
979 		  LE32_TO_CPU(cq_desc->params.generic.addr_low));
980 	/* Dump buffer iff 1) one exists and 2) is either a response indicated
981 	 * by the DD and/or CMP flag set or a command with the RD flag set.
982 	 */
983 	if (buf && cq_desc->datalen != 0 &&
984 	    (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
985 	     flags & ICE_AQ_FLAG_RD)) {
986 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
987 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
988 				MIN_T(u16, buf_len, datalen));
989 	}
990 }
991 
992 /**
993  * ice_sq_done - check if the last send on a control queue has completed
994  * @hw: pointer to the HW struct
995  * @cq: pointer to the specific Control queue
996  *
997  * Returns: true if all the descriptors on the send side of a control queue
998  *          are finished processing, false otherwise.
999  */
1000 bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1001 {
1002 	/* control queue designers suggest use of head for better
1003 	 * timing reliability than DD bit
1004 	 */
1005 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
1006 }
1007 
1008 /**
1009  * ice_sq_send_cmd_nolock - send command to a control queue
1010  * @hw: pointer to the HW struct
1011  * @cq: pointer to the specific Control queue
1012  * @desc: prefilled descriptor describing the command (non DMA mem)
1013  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1014  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1015  * @cd: pointer to command details structure
1016  *
1017  * This is the main send command routine for a control queue. It prepares the
1018  * command into a descriptor, bumps the send queue tail, waits for the command
1019  * to complete, captures status and data for the command, etc.
1020  */
1021 int
1022 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1023 		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
1024 		       struct ice_sq_cd *cd)
1025 {
1026 	struct ice_dma_mem *dma_buf = NULL;
1027 	struct ice_aq_desc *desc_on_ring;
1028 	bool cmd_completed = false;
1029 	u32 total_delay = 0;
1030 	int status = 0;
1031 	u16 retval = 0;
1032 	u32 val = 0;
1033 
1034 	/* if reset is in progress return a soft error */
1035 	if (hw->reset_ongoing)
1036 		return ICE_ERR_RESET_ONGOING;
1037 
1038 	cq->sq_last_status = ICE_AQ_RC_OK;
1039 
1040 	if (!cq->sq.count) {
1041 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
1042 		status = ICE_ERR_AQ_EMPTY;
1043 		goto sq_send_command_error;
1044 	}
1045 
1046 	if ((buf && !buf_size) || (!buf && buf_size)) {
1047 		status = ICE_ERR_PARAM;
1048 		goto sq_send_command_error;
1049 	}
1050 
1051 	if (buf) {
1052 		if (buf_size > cq->sq_buf_size) {
1053 			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
1054 				  buf_size);
1055 			status = ICE_ERR_INVAL_SIZE;
1056 			goto sq_send_command_error;
1057 		}
1058 
1059 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1060 		if (buf_size > ICE_AQ_LG_BUF)
1061 			desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1062 	}
1063 
1064 	val = rd32(hw, cq->sq.head);
1065 	if (val >= cq->num_sq_entries) {
1066 		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
1067 			  val);
1068 		status = ICE_ERR_AQ_EMPTY;
1069 		goto sq_send_command_error;
1070 	}
1071 
1072 	/* Call clean and check queue available function to reclaim the
1073 	 * descriptors that were processed by FW/MBX; the function returns the
1074 	 * number of desc available. The clean function called here could be
1075 	 * called in a separate thread in case of asynchronous completions.
1076 	 */
1077 	if (ice_clean_sq(hw, cq) == 0) {
1078 		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1079 		status = ICE_ERR_AQ_FULL;
1080 		goto sq_send_command_error;
1081 	}
1082 
1083 	/* initialize the temp desc pointer with the right desc */
1084 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1085 
1086 	/* if the desc is available copy the temp desc to the right place */
1087 	ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
1088 		   ICE_NONDMA_TO_DMA);
1089 
1090 	/* if buf is not NULL assume indirect command */
1091 	if (buf) {
1092 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1093 		/* copy the user buf into the respective DMA buf */
1094 		ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
1095 		desc_on_ring->datalen = CPU_TO_LE16(buf_size);
1096 
1097 		/* Update the address values in the desc with the pa value
1098 		 * for respective buffer
1099 		 */
1100 		desc_on_ring->params.generic.addr_high =
1101 			CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1102 		desc_on_ring->params.generic.addr_low =
1103 			CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1104 	}
1105 
1106 	/* Debug desc and buffer */
1107 	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1108 	ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
1109 
1110 	(cq->sq.next_to_use)++;
1111 	if (cq->sq.next_to_use == cq->sq.count)
1112 		cq->sq.next_to_use = 0;
1113 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1114 	ice_flush(hw);
1115 
1116 	/* Wait a short time before initial ice_sq_done() check, to allow
1117 	 * hardware time for completion.
1118 	 */
1119 	ice_usec_delay(5, false);
1120 
1121 	do {
1122 		if (ice_sq_done(hw, cq))
1123 			break;
1124 
1125 		ice_usec_delay(10, false);
1126 		total_delay++;
1127 	} while (total_delay < cq->sq_cmd_timeout);
1128 
1129 	/* if ready, copy the desc back to temp */
1130 	if (ice_sq_done(hw, cq)) {
1131 		ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1132 			   ICE_DMA_TO_NONDMA);
1133 		if (buf) {
1134 			/* get returned length to copy */
1135 			u16 copy_size = LE16_TO_CPU(desc->datalen);
1136 
1137 			if (copy_size > buf_size) {
1138 				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1139 					  copy_size, buf_size);
1140 				status = ICE_ERR_AQ_ERROR;
1141 			} else {
1142 				ice_memcpy(buf, dma_buf->va, copy_size,
1143 					   ICE_DMA_TO_NONDMA);
1144 			}
1145 		}
1146 		retval = LE16_TO_CPU(desc->retval);
1147 		if (retval) {
1148 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1149 				  LE16_TO_CPU(desc->opcode),
1150 				  retval);
1151 
1152 			/* strip off FW internal code */
1153 			retval &= 0xff;
1154 		}
1155 		cmd_completed = true;
1156 		if (!status && retval != ICE_AQ_RC_OK)
1157 			status = ICE_ERR_AQ_ERROR;
1158 		cq->sq_last_status = (enum ice_aq_err)retval;
1159 	}
1160 
1161 	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1162 	ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
1163 
1164 	/* save writeback AQ if requested */
1165 	if (cd && cd->wb_desc)
1166 		ice_memcpy(cd->wb_desc, desc_on_ring,
1167 			   sizeof(*cd->wb_desc), ICE_DMA_TO_NONDMA);
1168 
1169 	/* update the error if time out occurred */
1170 	if (!cmd_completed) {
1171 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1172 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1173 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1174 			status = ICE_ERR_AQ_FW_CRITICAL;
1175 		} else {
1176 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1177 			status = ICE_ERR_AQ_TIMEOUT;
1178 		}
1179 	}
1180 
1181 sq_send_command_error:
1182 	return status;
1183 }
1184 
1185 /**
1186  * ice_sq_send_cmd - send command to a control queue
1187  * @hw: pointer to the HW struct
1188  * @cq: pointer to the specific Control queue
1189  * @desc: prefilled descriptor describing the command
1190  * @buf: buffer to use for indirect commands (or NULL for direct commands)
1191  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1192  * @cd: pointer to command details structure
1193  *
1194  * Main command for the transmit side of a control queue. It puts the command
1195  * on the queue, bumps the tail, waits for processing of the command, captures
1196  * command status and results, etc.
1197  */
1198 int
1199 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1200 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
1201 		struct ice_sq_cd *cd)
1202 {
1203 	int status = 0;
1204 
1205 	/* if reset is in progress return a soft error */
1206 	if (hw->reset_ongoing)
1207 		return ICE_ERR_RESET_ONGOING;
1208 
1209 	ice_acquire_lock(&cq->sq_lock);
1210 	status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1211 	ice_release_lock(&cq->sq_lock);
1212 
1213 	return status;
1214 }
1215 
1216 /**
1217  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1218  * @desc: pointer to the temp descriptor (non DMA mem)
1219  * @opcode: the opcode can be used to decide which flags to turn off or on
1220  *
1221  * Fill the desc with default values
1222  */
1223 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1224 {
1225 	/* zero out the desc */
1226 	ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1227 	desc->opcode = CPU_TO_LE16(opcode);
1228 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1229 }
1230 
1231 /**
1232  * ice_clean_rq_elem
1233  * @hw: pointer to the HW struct
1234  * @cq: pointer to the specific Control queue
1235  * @e: event info from the receive descriptor, includes any buffers
1236  * @pending: number of events that could be left to process
1237  *
1238  * Clean one element from the receive side of a control queue. On return 'e'
1239  * contains contents of the message, and 'pending' contains the number of
1240  * events left to process.
1241  */
1242 int
1243 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1244 		  struct ice_rq_event_info *e, u16 *pending)
1245 {
1246 	u16 ntc = cq->rq.next_to_clean;
1247 	enum ice_aq_err rq_last_status;
1248 	struct ice_aq_desc *desc;
1249 	struct ice_dma_mem *bi;
1250 	int ret_code = 0;
1251 	u16 desc_idx;
1252 	u16 datalen;
1253 	u16 flags;
1254 	u16 ntu;
1255 
1256 	/* pre-clean the event info */
1257 	ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1258 
1259 	/* take the lock before we start messing with the ring */
1260 	ice_acquire_lock(&cq->rq_lock);
1261 
1262 	if (!cq->rq.count) {
1263 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1264 		ret_code = ICE_ERR_AQ_EMPTY;
1265 		goto clean_rq_elem_err;
1266 	}
1267 
1268 	/* set next_to_use to head */
1269 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1270 
1271 	if (ntu == ntc) {
1272 		/* nothing to do - shouldn't need to update ring's values */
1273 		ret_code = ICE_ERR_AQ_NO_WORK;
1274 		goto clean_rq_elem_out;
1275 	}
1276 
1277 	/* now clean the next descriptor */
1278 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1279 	desc_idx = ntc;
1280 
1281 	rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1282 	flags = LE16_TO_CPU(desc->flags);
1283 	if (flags & ICE_AQ_FLAG_ERR) {
1284 		ret_code = ICE_ERR_AQ_ERROR;
1285 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1286 			  LE16_TO_CPU(desc->opcode), rq_last_status);
1287 	}
1288 	ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1289 	datalen = LE16_TO_CPU(desc->datalen);
1290 	e->msg_len = MIN_T(u16, datalen, e->buf_len);
1291 	if (e->msg_buf && e->msg_len)
1292 		ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1293 			   e->msg_len, ICE_DMA_TO_NONDMA);
1294 
1295 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1296 	ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
1297 
1298 	/* Restore the original datalen and buffer address in the desc,
1299 	 * FW updates datalen to indicate the event message size
1300 	 */
1301 	bi = &cq->rq.r.rq_bi[ntc];
1302 	ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1303 
1304 	desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1305 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1306 		desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1307 	desc->datalen = CPU_TO_LE16(bi->size);
1308 	desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1309 	desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1310 
1311 	/* set tail = the last cleaned desc index. */
1312 	wr32(hw, cq->rq.tail, ntc);
1313 	/* ntc is updated to tail + 1 */
1314 	ntc++;
1315 	if (ntc == cq->num_rq_entries)
1316 		ntc = 0;
1317 	cq->rq.next_to_clean = ntc;
1318 	cq->rq.next_to_use = ntu;
1319 
1320 clean_rq_elem_out:
1321 	/* Set pending if needed, unlock and return */
1322 	if (pending) {
1323 		/* re-read HW head to calculate actual pending messages */
1324 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1325 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1326 	}
1327 clean_rq_elem_err:
1328 	ice_release_lock(&cq->rq_lock);
1329 
1330 	return ret_code;
1331 }
1332