xref: /linux/drivers/net/ethernet/intel/ice/ice_controlq.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 
6 #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
7 do {								\
8 	(qinfo)->sq.head = prefix##_ATQH;			\
9 	(qinfo)->sq.tail = prefix##_ATQT;			\
10 	(qinfo)->sq.len = prefix##_ATQLEN;			\
11 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
12 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
13 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
14 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
15 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
16 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
17 	(qinfo)->rq.head = prefix##_ARQH;			\
18 	(qinfo)->rq.tail = prefix##_ARQT;			\
19 	(qinfo)->rq.len = prefix##_ARQLEN;			\
20 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
21 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
22 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
23 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
24 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
25 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
26 } while (0)
27 
28 /**
29  * ice_adminq_init_regs - Initialize AdminQ registers
30  * @hw: pointer to the hardware structure
31  *
32  * This assumes the alloc_sq and alloc_rq functions have already been called
33  */
34 static void ice_adminq_init_regs(struct ice_hw *hw)
35 {
36 	struct ice_ctl_q_info *cq = &hw->adminq;
37 
38 	ICE_CQ_INIT_REGS(cq, PF_FW);
39 }
40 
41 /**
42  * ice_mailbox_init_regs - Initialize Mailbox registers
43  * @hw: pointer to the hardware structure
44  *
45  * This assumes the alloc_sq and alloc_rq functions have already been called
46  */
47 static void ice_mailbox_init_regs(struct ice_hw *hw)
48 {
49 	struct ice_ctl_q_info *cq = &hw->mailboxq;
50 
51 	ICE_CQ_INIT_REGS(cq, PF_MBX);
52 }
53 
54 /**
55  * ice_sb_init_regs - Initialize Sideband registers
56  * @hw: pointer to the hardware structure
57  *
58  * This assumes the alloc_sq and alloc_rq functions have already been called
59  */
60 static void ice_sb_init_regs(struct ice_hw *hw)
61 {
62 	struct ice_ctl_q_info *cq = &hw->sbq;
63 
64 	ICE_CQ_INIT_REGS(cq, PF_SB);
65 }
66 
67 /**
68  * ice_check_sq_alive
69  * @hw: pointer to the HW struct
70  * @cq: pointer to the specific Control queue
71  *
72  * Returns true if Queue is enabled else false.
73  */
74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
75 {
76 	/* check both queue-length and queue-enable fields */
77 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
78 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
79 						cq->sq.len_ena_mask)) ==
80 			(cq->num_sq_entries | cq->sq.len_ena_mask);
81 
82 	return false;
83 }
84 
85 /**
86  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
87  * @hw: pointer to the hardware structure
88  * @cq: pointer to the specific Control queue
89  */
90 static int
91 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
92 {
93 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
94 
95 	cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
96 						 &cq->sq.desc_buf.pa,
97 						 GFP_KERNEL | __GFP_ZERO);
98 	if (!cq->sq.desc_buf.va)
99 		return -ENOMEM;
100 	cq->sq.desc_buf.size = size;
101 
102 	cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
103 				      sizeof(struct ice_sq_cd), GFP_KERNEL);
104 	if (!cq->sq.cmd_buf) {
105 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
106 				   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
107 		cq->sq.desc_buf.va = NULL;
108 		cq->sq.desc_buf.pa = 0;
109 		cq->sq.desc_buf.size = 0;
110 		return -ENOMEM;
111 	}
112 
113 	return 0;
114 }
115 
116 /**
117  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
118  * @hw: pointer to the hardware structure
119  * @cq: pointer to the specific Control queue
120  */
121 static int
122 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
123 {
124 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
125 
126 	cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
127 						 &cq->rq.desc_buf.pa,
128 						 GFP_KERNEL | __GFP_ZERO);
129 	if (!cq->rq.desc_buf.va)
130 		return -ENOMEM;
131 	cq->rq.desc_buf.size = size;
132 	return 0;
133 }
134 
135 /**
136  * ice_free_cq_ring - Free control queue ring
137  * @hw: pointer to the hardware structure
138  * @ring: pointer to the specific control queue ring
139  *
140  * This assumes the posted buffers have already been cleaned
141  * and de-allocated
142  */
143 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
144 {
145 	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
146 			   ring->desc_buf.va, ring->desc_buf.pa);
147 	ring->desc_buf.va = NULL;
148 	ring->desc_buf.pa = 0;
149 	ring->desc_buf.size = 0;
150 }
151 
152 /**
153  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
154  * @hw: pointer to the hardware structure
155  * @cq: pointer to the specific Control queue
156  */
157 static int
158 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
159 {
160 	int i;
161 
162 	/* We'll be allocating the buffer info memory first, then we can
163 	 * allocate the mapped buffers for the event processing
164 	 */
165 	cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
166 				       sizeof(cq->rq.desc_buf), GFP_KERNEL);
167 	if (!cq->rq.dma_head)
168 		return -ENOMEM;
169 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
170 
171 	/* allocate the mapped buffers */
172 	for (i = 0; i < cq->num_rq_entries; i++) {
173 		struct ice_aq_desc *desc;
174 		struct ice_dma_mem *bi;
175 
176 		bi = &cq->rq.r.rq_bi[i];
177 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
178 					     cq->rq_buf_size, &bi->pa,
179 					     GFP_KERNEL | __GFP_ZERO);
180 		if (!bi->va)
181 			goto unwind_alloc_rq_bufs;
182 		bi->size = cq->rq_buf_size;
183 
184 		/* now configure the descriptors for use */
185 		desc = ICE_CTL_Q_DESC(cq->rq, i);
186 
187 		desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
188 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
189 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
190 		desc->opcode = 0;
191 		/* This is in accordance with Admin queue design, there is no
192 		 * register for buffer size configuration
193 		 */
194 		desc->datalen = cpu_to_le16(bi->size);
195 		desc->retval = 0;
196 		desc->cookie_high = 0;
197 		desc->cookie_low = 0;
198 		desc->params.generic.addr_high =
199 			cpu_to_le32(upper_32_bits(bi->pa));
200 		desc->params.generic.addr_low =
201 			cpu_to_le32(lower_32_bits(bi->pa));
202 		desc->params.generic.param0 = 0;
203 		desc->params.generic.param1 = 0;
204 	}
205 	return 0;
206 
207 unwind_alloc_rq_bufs:
208 	/* don't try to free the one that failed... */
209 	i--;
210 	for (; i >= 0; i--) {
211 		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
212 				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
213 		cq->rq.r.rq_bi[i].va = NULL;
214 		cq->rq.r.rq_bi[i].pa = 0;
215 		cq->rq.r.rq_bi[i].size = 0;
216 	}
217 	cq->rq.r.rq_bi = NULL;
218 	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
219 	cq->rq.dma_head = NULL;
220 
221 	return -ENOMEM;
222 }
223 
224 /**
225  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
226  * @hw: pointer to the hardware structure
227  * @cq: pointer to the specific Control queue
228  */
229 static int
230 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
231 {
232 	int i;
233 
234 	/* No mapped memory needed yet, just the buffer info structures */
235 	cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
236 				       sizeof(cq->sq.desc_buf), GFP_KERNEL);
237 	if (!cq->sq.dma_head)
238 		return -ENOMEM;
239 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
240 
241 	/* allocate the mapped buffers */
242 	for (i = 0; i < cq->num_sq_entries; i++) {
243 		struct ice_dma_mem *bi;
244 
245 		bi = &cq->sq.r.sq_bi[i];
246 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
247 					     cq->sq_buf_size, &bi->pa,
248 					     GFP_KERNEL | __GFP_ZERO);
249 		if (!bi->va)
250 			goto unwind_alloc_sq_bufs;
251 		bi->size = cq->sq_buf_size;
252 	}
253 	return 0;
254 
255 unwind_alloc_sq_bufs:
256 	/* don't try to free the one that failed... */
257 	i--;
258 	for (; i >= 0; i--) {
259 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
260 				   cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
261 		cq->sq.r.sq_bi[i].va = NULL;
262 		cq->sq.r.sq_bi[i].pa = 0;
263 		cq->sq.r.sq_bi[i].size = 0;
264 	}
265 	cq->sq.r.sq_bi = NULL;
266 	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
267 	cq->sq.dma_head = NULL;
268 
269 	return -ENOMEM;
270 }
271 
272 static int
273 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
274 {
275 	/* Clear Head and Tail */
276 	wr32(hw, ring->head, 0);
277 	wr32(hw, ring->tail, 0);
278 
279 	/* set starting point */
280 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
281 	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
282 	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
283 
284 	/* Check one register to verify that config was applied */
285 	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
286 		return -EIO;
287 
288 	return 0;
289 }
290 
291 /**
292  * ice_cfg_sq_regs - configure Control ATQ registers
293  * @hw: pointer to the hardware structure
294  * @cq: pointer to the specific Control queue
295  *
296  * Configure base address and length registers for the transmit queue
297  */
298 static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
299 {
300 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
301 }
302 
303 /**
304  * ice_cfg_rq_regs - configure Control ARQ register
305  * @hw: pointer to the hardware structure
306  * @cq: pointer to the specific Control queue
307  *
308  * Configure base address and length registers for the receive (event queue)
309  */
310 static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
311 {
312 	int status;
313 
314 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
315 	if (status)
316 		return status;
317 
318 	/* Update tail in the HW to post pre-allocated buffers */
319 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
320 
321 	return 0;
322 }
323 
324 #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
325 do {									\
326 	/* free descriptors */						\
327 	if ((qi)->ring.r.ring##_bi) {					\
328 		int i;							\
329 									\
330 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
331 			if ((qi)->ring.r.ring##_bi[i].pa) {		\
332 				dmam_free_coherent(ice_hw_to_dev(hw),	\
333 					(qi)->ring.r.ring##_bi[i].size,	\
334 					(qi)->ring.r.ring##_bi[i].va,	\
335 					(qi)->ring.r.ring##_bi[i].pa);	\
336 					(qi)->ring.r.ring##_bi[i].va = NULL;\
337 					(qi)->ring.r.ring##_bi[i].pa = 0;\
338 					(qi)->ring.r.ring##_bi[i].size = 0;\
339 		}							\
340 	}								\
341 	/* free the buffer info list */					\
342 	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);		\
343 	/* free DMA head */						\
344 	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
345 } while (0)
346 
347 /**
348  * ice_init_sq - main initialization routine for Control ATQ
349  * @hw: pointer to the hardware structure
350  * @cq: pointer to the specific Control queue
351  *
352  * This is the main initialization routine for the Control Send Queue
353  * Prior to calling this function, the driver *MUST* set the following fields
354  * in the cq->structure:
355  *     - cq->num_sq_entries
356  *     - cq->sq_buf_size
357  *
358  * Do *NOT* hold the lock when calling this as the memory allocation routines
359  * called are not going to be atomic context safe
360  */
361 static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
362 {
363 	int ret_code;
364 
365 	if (cq->sq.count > 0) {
366 		/* queue already initialized */
367 		ret_code = -EBUSY;
368 		goto init_ctrlq_exit;
369 	}
370 
371 	/* verify input for valid configuration */
372 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
373 		ret_code = -EIO;
374 		goto init_ctrlq_exit;
375 	}
376 
377 	cq->sq.next_to_use = 0;
378 	cq->sq.next_to_clean = 0;
379 
380 	/* allocate the ring memory */
381 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
382 	if (ret_code)
383 		goto init_ctrlq_exit;
384 
385 	/* allocate buffers in the rings */
386 	ret_code = ice_alloc_sq_bufs(hw, cq);
387 	if (ret_code)
388 		goto init_ctrlq_free_rings;
389 
390 	/* initialize base registers */
391 	ret_code = ice_cfg_sq_regs(hw, cq);
392 	if (ret_code)
393 		goto init_ctrlq_free_rings;
394 
395 	/* success! */
396 	cq->sq.count = cq->num_sq_entries;
397 	goto init_ctrlq_exit;
398 
399 init_ctrlq_free_rings:
400 	ICE_FREE_CQ_BUFS(hw, cq, sq);
401 	ice_free_cq_ring(hw, &cq->sq);
402 
403 init_ctrlq_exit:
404 	return ret_code;
405 }
406 
407 /**
408  * ice_init_rq - initialize ARQ
409  * @hw: pointer to the hardware structure
410  * @cq: pointer to the specific Control queue
411  *
412  * The main initialization routine for the Admin Receive (Event) Queue.
413  * Prior to calling this function, the driver *MUST* set the following fields
414  * in the cq->structure:
415  *     - cq->num_rq_entries
416  *     - cq->rq_buf_size
417  *
418  * Do *NOT* hold the lock when calling this as the memory allocation routines
419  * called are not going to be atomic context safe
420  */
421 static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
422 {
423 	int ret_code;
424 
425 	if (cq->rq.count > 0) {
426 		/* queue already initialized */
427 		ret_code = -EBUSY;
428 		goto init_ctrlq_exit;
429 	}
430 
431 	/* verify input for valid configuration */
432 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
433 		ret_code = -EIO;
434 		goto init_ctrlq_exit;
435 	}
436 
437 	cq->rq.next_to_use = 0;
438 	cq->rq.next_to_clean = 0;
439 
440 	/* allocate the ring memory */
441 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
442 	if (ret_code)
443 		goto init_ctrlq_exit;
444 
445 	/* allocate buffers in the rings */
446 	ret_code = ice_alloc_rq_bufs(hw, cq);
447 	if (ret_code)
448 		goto init_ctrlq_free_rings;
449 
450 	/* initialize base registers */
451 	ret_code = ice_cfg_rq_regs(hw, cq);
452 	if (ret_code)
453 		goto init_ctrlq_free_rings;
454 
455 	/* success! */
456 	cq->rq.count = cq->num_rq_entries;
457 	goto init_ctrlq_exit;
458 
459 init_ctrlq_free_rings:
460 	ICE_FREE_CQ_BUFS(hw, cq, rq);
461 	ice_free_cq_ring(hw, &cq->rq);
462 
463 init_ctrlq_exit:
464 	return ret_code;
465 }
466 
467 /**
468  * ice_shutdown_sq - shutdown the Control ATQ
469  * @hw: pointer to the hardware structure
470  * @cq: pointer to the specific Control queue
471  *
472  * The main shutdown routine for the Control Transmit Queue
473  */
474 static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
475 {
476 	int ret_code = 0;
477 
478 	mutex_lock(&cq->sq_lock);
479 
480 	if (!cq->sq.count) {
481 		ret_code = -EBUSY;
482 		goto shutdown_sq_out;
483 	}
484 
485 	/* Stop firmware AdminQ processing */
486 	wr32(hw, cq->sq.head, 0);
487 	wr32(hw, cq->sq.tail, 0);
488 	wr32(hw, cq->sq.len, 0);
489 	wr32(hw, cq->sq.bal, 0);
490 	wr32(hw, cq->sq.bah, 0);
491 
492 	cq->sq.count = 0;	/* to indicate uninitialized queue */
493 
494 	/* free ring buffers and the ring itself */
495 	ICE_FREE_CQ_BUFS(hw, cq, sq);
496 	ice_free_cq_ring(hw, &cq->sq);
497 
498 shutdown_sq_out:
499 	mutex_unlock(&cq->sq_lock);
500 	return ret_code;
501 }
502 
503 /**
504  * ice_aq_ver_check - Check the reported AQ API version.
505  * @hw: pointer to the hardware structure
506  *
507  * Checks if the driver should load on a given AQ API version.
508  *
509  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
510  */
511 static bool ice_aq_ver_check(struct ice_hw *hw)
512 {
513 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
514 		/* Major API version is newer than expected, don't load */
515 		dev_warn(ice_hw_to_dev(hw),
516 			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
517 		return false;
518 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
519 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
520 			dev_info(ice_hw_to_dev(hw),
521 				 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
522 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
523 			dev_info(ice_hw_to_dev(hw),
524 				 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
525 	} else {
526 		/* Major API version is older than expected, log a warning */
527 		dev_info(ice_hw_to_dev(hw),
528 			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
529 	}
530 	return true;
531 }
532 
533 /**
534  * ice_shutdown_rq - shutdown Control ARQ
535  * @hw: pointer to the hardware structure
536  * @cq: pointer to the specific Control queue
537  *
538  * The main shutdown routine for the Control Receive Queue
539  */
540 static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
541 {
542 	int ret_code = 0;
543 
544 	mutex_lock(&cq->rq_lock);
545 
546 	if (!cq->rq.count) {
547 		ret_code = -EBUSY;
548 		goto shutdown_rq_out;
549 	}
550 
551 	/* Stop Control Queue processing */
552 	wr32(hw, cq->rq.head, 0);
553 	wr32(hw, cq->rq.tail, 0);
554 	wr32(hw, cq->rq.len, 0);
555 	wr32(hw, cq->rq.bal, 0);
556 	wr32(hw, cq->rq.bah, 0);
557 
558 	/* set rq.count to 0 to indicate uninitialized queue */
559 	cq->rq.count = 0;
560 
561 	/* free ring buffers and the ring itself */
562 	ICE_FREE_CQ_BUFS(hw, cq, rq);
563 	ice_free_cq_ring(hw, &cq->rq);
564 
565 shutdown_rq_out:
566 	mutex_unlock(&cq->rq_lock);
567 	return ret_code;
568 }
569 
570 /**
571  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
572  * @hw: pointer to the hardware structure
573  */
574 static int ice_init_check_adminq(struct ice_hw *hw)
575 {
576 	struct ice_ctl_q_info *cq = &hw->adminq;
577 	int status;
578 
579 	status = ice_aq_get_fw_ver(hw, NULL);
580 	if (status)
581 		goto init_ctrlq_free_rq;
582 
583 	if (!ice_aq_ver_check(hw)) {
584 		status = -EIO;
585 		goto init_ctrlq_free_rq;
586 	}
587 
588 	return 0;
589 
590 init_ctrlq_free_rq:
591 	ice_shutdown_rq(hw, cq);
592 	ice_shutdown_sq(hw, cq);
593 	return status;
594 }
595 
596 /**
597  * ice_init_ctrlq - main initialization routine for any control Queue
598  * @hw: pointer to the hardware structure
599  * @q_type: specific Control queue type
600  *
601  * Prior to calling this function, the driver *MUST* set the following fields
602  * in the cq->structure:
603  *     - cq->num_sq_entries
604  *     - cq->num_rq_entries
605  *     - cq->rq_buf_size
606  *     - cq->sq_buf_size
607  *
608  * NOTE: this function does not initialize the controlq locks
609  */
610 static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
611 {
612 	struct ice_ctl_q_info *cq;
613 	int ret_code;
614 
615 	switch (q_type) {
616 	case ICE_CTL_Q_ADMIN:
617 		ice_adminq_init_regs(hw);
618 		cq = &hw->adminq;
619 		break;
620 	case ICE_CTL_Q_SB:
621 		ice_sb_init_regs(hw);
622 		cq = &hw->sbq;
623 		break;
624 	case ICE_CTL_Q_MAILBOX:
625 		ice_mailbox_init_regs(hw);
626 		cq = &hw->mailboxq;
627 		break;
628 	default:
629 		return -EINVAL;
630 	}
631 	cq->qtype = q_type;
632 
633 	/* verify input for valid configuration */
634 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
635 	    !cq->rq_buf_size || !cq->sq_buf_size) {
636 		return -EIO;
637 	}
638 
639 	/* allocate the ATQ */
640 	ret_code = ice_init_sq(hw, cq);
641 	if (ret_code)
642 		return ret_code;
643 
644 	/* allocate the ARQ */
645 	ret_code = ice_init_rq(hw, cq);
646 	if (ret_code)
647 		goto init_ctrlq_free_sq;
648 
649 	/* success! */
650 	return 0;
651 
652 init_ctrlq_free_sq:
653 	ice_shutdown_sq(hw, cq);
654 	return ret_code;
655 }
656 
657 /**
658  * ice_is_sbq_supported - is the sideband queue supported
659  * @hw: pointer to the hardware structure
660  *
661  * Returns true if the sideband control queue interface is
662  * supported for the device, false otherwise
663  */
664 bool ice_is_sbq_supported(struct ice_hw *hw)
665 {
666 	/* The device sideband queue is only supported on devices with the
667 	 * generic MAC type.
668 	 */
669 	return hw->mac_type == ICE_MAC_GENERIC;
670 }
671 
672 /**
673  * ice_get_sbq - returns the right control queue to use for sideband
674  * @hw: pointer to the hardware structure
675  */
676 struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
677 {
678 	if (ice_is_sbq_supported(hw))
679 		return &hw->sbq;
680 	return &hw->adminq;
681 }
682 
683 /**
684  * ice_shutdown_ctrlq - shutdown routine for any control queue
685  * @hw: pointer to the hardware structure
686  * @q_type: specific Control queue type
687  *
688  * NOTE: this function does not destroy the control queue locks.
689  */
690 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
691 {
692 	struct ice_ctl_q_info *cq;
693 
694 	switch (q_type) {
695 	case ICE_CTL_Q_ADMIN:
696 		cq = &hw->adminq;
697 		if (ice_check_sq_alive(hw, cq))
698 			ice_aq_q_shutdown(hw, true);
699 		break;
700 	case ICE_CTL_Q_SB:
701 		cq = &hw->sbq;
702 		break;
703 	case ICE_CTL_Q_MAILBOX:
704 		cq = &hw->mailboxq;
705 		break;
706 	default:
707 		return;
708 	}
709 
710 	ice_shutdown_sq(hw, cq);
711 	ice_shutdown_rq(hw, cq);
712 }
713 
714 /**
715  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
716  * @hw: pointer to the hardware structure
717  *
718  * NOTE: this function does not destroy the control queue locks. The driver
719  * may call this at runtime to shutdown and later restart control queues, such
720  * as in response to a reset event.
721  */
722 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
723 {
724 	/* Shutdown FW admin queue */
725 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
726 	/* Shutdown PHY Sideband */
727 	if (ice_is_sbq_supported(hw))
728 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
729 	/* Shutdown PF-VF Mailbox */
730 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
731 }
732 
733 /**
734  * ice_init_all_ctrlq - main initialization routine for all control queues
735  * @hw: pointer to the hardware structure
736  *
737  * Prior to calling this function, the driver MUST* set the following fields
738  * in the cq->structure for all control queues:
739  *     - cq->num_sq_entries
740  *     - cq->num_rq_entries
741  *     - cq->rq_buf_size
742  *     - cq->sq_buf_size
743  *
744  * NOTE: this function does not initialize the controlq locks.
745  */
746 int ice_init_all_ctrlq(struct ice_hw *hw)
747 {
748 	u32 retry = 0;
749 	int status;
750 
751 	/* Init FW admin queue */
752 	do {
753 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
754 		if (status)
755 			return status;
756 
757 		status = ice_init_check_adminq(hw);
758 		if (status != -EIO)
759 			break;
760 
761 		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
762 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
763 		msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
764 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
765 
766 	if (status)
767 		return status;
768 	/* sideband control queue (SBQ) interface is not supported on some
769 	 * devices. Initialize if supported, else fallback to the admin queue
770 	 * interface
771 	 */
772 	if (ice_is_sbq_supported(hw)) {
773 		status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
774 		if (status)
775 			return status;
776 	}
777 	/* Init Mailbox queue */
778 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
779 }
780 
781 /**
782  * ice_init_ctrlq_locks - Initialize locks for a control queue
783  * @cq: pointer to the control queue
784  *
785  * Initializes the send and receive queue locks for a given control queue.
786  */
787 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
788 {
789 	mutex_init(&cq->sq_lock);
790 	mutex_init(&cq->rq_lock);
791 }
792 
793 /**
794  * ice_create_all_ctrlq - main initialization routine for all control queues
795  * @hw: pointer to the hardware structure
796  *
797  * Prior to calling this function, the driver *MUST* set the following fields
798  * in the cq->structure for all control queues:
799  *     - cq->num_sq_entries
800  *     - cq->num_rq_entries
801  *     - cq->rq_buf_size
802  *     - cq->sq_buf_size
803  *
804  * This function creates all the control queue locks and then calls
805  * ice_init_all_ctrlq. It should be called once during driver load. If the
806  * driver needs to re-initialize control queues at run time it should call
807  * ice_init_all_ctrlq instead.
808  */
809 int ice_create_all_ctrlq(struct ice_hw *hw)
810 {
811 	ice_init_ctrlq_locks(&hw->adminq);
812 	if (ice_is_sbq_supported(hw))
813 		ice_init_ctrlq_locks(&hw->sbq);
814 	ice_init_ctrlq_locks(&hw->mailboxq);
815 
816 	return ice_init_all_ctrlq(hw);
817 }
818 
819 /**
820  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
821  * @cq: pointer to the control queue
822  *
823  * Destroys the send and receive queue locks for a given control queue.
824  */
825 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
826 {
827 	mutex_destroy(&cq->sq_lock);
828 	mutex_destroy(&cq->rq_lock);
829 }
830 
831 /**
832  * ice_destroy_all_ctrlq - exit routine for all control queues
833  * @hw: pointer to the hardware structure
834  *
835  * This function shuts down all the control queues and then destroys the
836  * control queue locks. It should be called once during driver unload. The
837  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
838  * reinitialize control queues, such as in response to a reset event.
839  */
840 void ice_destroy_all_ctrlq(struct ice_hw *hw)
841 {
842 	/* shut down all the control queues first */
843 	ice_shutdown_all_ctrlq(hw);
844 
845 	ice_destroy_ctrlq_locks(&hw->adminq);
846 	if (ice_is_sbq_supported(hw))
847 		ice_destroy_ctrlq_locks(&hw->sbq);
848 	ice_destroy_ctrlq_locks(&hw->mailboxq);
849 }
850 
851 /**
852  * ice_clean_sq - cleans Admin send queue (ATQ)
853  * @hw: pointer to the hardware structure
854  * @cq: pointer to the specific Control queue
855  *
856  * returns the number of free desc
857  */
858 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
859 {
860 	struct ice_ctl_q_ring *sq = &cq->sq;
861 	u16 ntc = sq->next_to_clean;
862 	struct ice_sq_cd *details;
863 	struct ice_aq_desc *desc;
864 
865 	desc = ICE_CTL_Q_DESC(*sq, ntc);
866 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
867 
868 	while (rd32(hw, cq->sq.head) != ntc) {
869 		ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
870 		memset(desc, 0, sizeof(*desc));
871 		memset(details, 0, sizeof(*details));
872 		ntc++;
873 		if (ntc == sq->count)
874 			ntc = 0;
875 		desc = ICE_CTL_Q_DESC(*sq, ntc);
876 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
877 	}
878 
879 	sq->next_to_clean = ntc;
880 
881 	return ICE_CTL_Q_DESC_UNUSED(sq);
882 }
883 
884 /**
885  * ice_debug_cq
886  * @hw: pointer to the hardware structure
887  * @desc: pointer to control queue descriptor
888  * @buf: pointer to command buffer
889  * @buf_len: max length of buf
890  *
891  * Dumps debug log about control command with descriptor contents.
892  */
893 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
894 {
895 	struct ice_aq_desc *cq_desc = desc;
896 	u16 len;
897 
898 	if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
899 	    !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
900 		return;
901 
902 	if (!desc)
903 		return;
904 
905 	len = le16_to_cpu(cq_desc->datalen);
906 
907 	ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
908 		  le16_to_cpu(cq_desc->opcode),
909 		  le16_to_cpu(cq_desc->flags),
910 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
911 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
912 		  le32_to_cpu(cq_desc->cookie_high),
913 		  le32_to_cpu(cq_desc->cookie_low));
914 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
915 		  le32_to_cpu(cq_desc->params.generic.param0),
916 		  le32_to_cpu(cq_desc->params.generic.param1));
917 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
918 		  le32_to_cpu(cq_desc->params.generic.addr_high),
919 		  le32_to_cpu(cq_desc->params.generic.addr_low));
920 	if (buf && cq_desc->datalen != 0) {
921 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
922 		if (buf_len < len)
923 			len = buf_len;
924 
925 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
926 	}
927 }
928 
929 /**
930  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
931  * @hw: pointer to the HW struct
932  * @cq: pointer to the specific Control queue
933  *
934  * Returns true if the firmware has processed all descriptors on the
935  * admin send queue. Returns false if there are still requests pending.
936  */
937 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
938 {
939 	/* AQ designers suggest use of head for better
940 	 * timing reliability than DD bit
941 	 */
942 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
943 }
944 
945 /**
946  * ice_sq_send_cmd - send command to Control Queue (ATQ)
947  * @hw: pointer to the HW struct
948  * @cq: pointer to the specific Control queue
949  * @desc: prefilled descriptor describing the command
950  * @buf: buffer to use for indirect commands (or NULL for direct commands)
951  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
952  * @cd: pointer to command details structure
953  *
954  * This is the main send command routine for the ATQ. It runs the queue,
955  * cleans the queue, etc.
956  */
957 int
958 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
959 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
960 		struct ice_sq_cd *cd)
961 {
962 	struct ice_dma_mem *dma_buf = NULL;
963 	struct ice_aq_desc *desc_on_ring;
964 	bool cmd_completed = false;
965 	struct ice_sq_cd *details;
966 	unsigned long timeout;
967 	int status = 0;
968 	u16 retval = 0;
969 	u32 val = 0;
970 
971 	/* if reset is in progress return a soft error */
972 	if (hw->reset_ongoing)
973 		return -EBUSY;
974 	mutex_lock(&cq->sq_lock);
975 
976 	cq->sq_last_status = ICE_AQ_RC_OK;
977 
978 	if (!cq->sq.count) {
979 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
980 		status = -EIO;
981 		goto sq_send_command_error;
982 	}
983 
984 	if ((buf && !buf_size) || (!buf && buf_size)) {
985 		status = -EINVAL;
986 		goto sq_send_command_error;
987 	}
988 
989 	if (buf) {
990 		if (buf_size > cq->sq_buf_size) {
991 			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
992 				  buf_size);
993 			status = -EINVAL;
994 			goto sq_send_command_error;
995 		}
996 
997 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
998 		if (buf_size > ICE_AQ_LG_BUF)
999 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1000 	}
1001 
1002 	val = rd32(hw, cq->sq.head);
1003 	if (val >= cq->num_sq_entries) {
1004 		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
1005 			  val);
1006 		status = -EIO;
1007 		goto sq_send_command_error;
1008 	}
1009 
1010 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
1011 	if (cd)
1012 		*details = *cd;
1013 	else
1014 		memset(details, 0, sizeof(*details));
1015 
1016 	/* Call clean and check queue available function to reclaim the
1017 	 * descriptors that were processed by FW/MBX; the function returns the
1018 	 * number of desc available. The clean function called here could be
1019 	 * called in a separate thread in case of asynchronous completions.
1020 	 */
1021 	if (ice_clean_sq(hw, cq) == 0) {
1022 		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1023 		status = -ENOSPC;
1024 		goto sq_send_command_error;
1025 	}
1026 
1027 	/* initialize the temp desc pointer with the right desc */
1028 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1029 
1030 	/* if the desc is available copy the temp desc to the right place */
1031 	memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
1032 
1033 	/* if buf is not NULL assume indirect command */
1034 	if (buf) {
1035 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1036 		/* copy the user buf into the respective DMA buf */
1037 		memcpy(dma_buf->va, buf, buf_size);
1038 		desc_on_ring->datalen = cpu_to_le16(buf_size);
1039 
1040 		/* Update the address values in the desc with the pa value
1041 		 * for respective buffer
1042 		 */
1043 		desc_on_ring->params.generic.addr_high =
1044 			cpu_to_le32(upper_32_bits(dma_buf->pa));
1045 		desc_on_ring->params.generic.addr_low =
1046 			cpu_to_le32(lower_32_bits(dma_buf->pa));
1047 	}
1048 
1049 	/* Debug desc and buffer */
1050 	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1051 
1052 	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1053 
1054 	(cq->sq.next_to_use)++;
1055 	if (cq->sq.next_to_use == cq->sq.count)
1056 		cq->sq.next_to_use = 0;
1057 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1058 	ice_flush(hw);
1059 
1060 	/* Wait a short time before initial ice_sq_done() check, to allow
1061 	 * hardware time for completion.
1062 	 */
1063 	udelay(5);
1064 
1065 	timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
1066 	do {
1067 		if (ice_sq_done(hw, cq))
1068 			break;
1069 
1070 		usleep_range(100, 150);
1071 	} while (time_before(jiffies, timeout));
1072 
1073 	/* if ready, copy the desc back to temp */
1074 	if (ice_sq_done(hw, cq)) {
1075 		memcpy(desc, desc_on_ring, sizeof(*desc));
1076 		if (buf) {
1077 			/* get returned length to copy */
1078 			u16 copy_size = le16_to_cpu(desc->datalen);
1079 
1080 			if (copy_size > buf_size) {
1081 				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1082 					  copy_size, buf_size);
1083 				status = -EIO;
1084 			} else {
1085 				memcpy(buf, dma_buf->va, copy_size);
1086 			}
1087 		}
1088 		retval = le16_to_cpu(desc->retval);
1089 		if (retval) {
1090 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1091 				  le16_to_cpu(desc->opcode),
1092 				  retval);
1093 
1094 			/* strip off FW internal code */
1095 			retval &= 0xff;
1096 		}
1097 		cmd_completed = true;
1098 		if (!status && retval != ICE_AQ_RC_OK)
1099 			status = -EIO;
1100 		cq->sq_last_status = (enum ice_aq_err)retval;
1101 	}
1102 
1103 	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1104 
1105 	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1106 
1107 	/* save writeback AQ if requested */
1108 	if (details->wb_desc)
1109 		memcpy(details->wb_desc, desc_on_ring,
1110 		       sizeof(*details->wb_desc));
1111 
1112 	/* update the error if time out occurred */
1113 	if (!cmd_completed) {
1114 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1115 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1116 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1117 			status = -EIO;
1118 		} else {
1119 			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1120 			status = -EIO;
1121 		}
1122 	}
1123 
1124 sq_send_command_error:
1125 	mutex_unlock(&cq->sq_lock);
1126 	return status;
1127 }
1128 
1129 /**
1130  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1131  * @desc: pointer to the temp descriptor (non DMA mem)
1132  * @opcode: the opcode can be used to decide which flags to turn off or on
1133  *
1134  * Fill the desc with default values
1135  */
1136 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1137 {
1138 	/* zero out the desc */
1139 	memset(desc, 0, sizeof(*desc));
1140 	desc->opcode = cpu_to_le16(opcode);
1141 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1142 }
1143 
1144 /**
1145  * ice_clean_rq_elem
1146  * @hw: pointer to the HW struct
1147  * @cq: pointer to the specific Control queue
1148  * @e: event info from the receive descriptor, includes any buffers
1149  * @pending: number of events that could be left to process
1150  *
1151  * This function cleans one Admin Receive Queue element and returns
1152  * the contents through e. It can also return how many events are
1153  * left to process through 'pending'.
1154  */
1155 int
1156 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1157 		  struct ice_rq_event_info *e, u16 *pending)
1158 {
1159 	u16 ntc = cq->rq.next_to_clean;
1160 	enum ice_aq_err rq_last_status;
1161 	struct ice_aq_desc *desc;
1162 	struct ice_dma_mem *bi;
1163 	int ret_code = 0;
1164 	u16 desc_idx;
1165 	u16 datalen;
1166 	u16 flags;
1167 	u16 ntu;
1168 
1169 	/* pre-clean the event info */
1170 	memset(&e->desc, 0, sizeof(e->desc));
1171 
1172 	/* take the lock before we start messing with the ring */
1173 	mutex_lock(&cq->rq_lock);
1174 
1175 	if (!cq->rq.count) {
1176 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1177 		ret_code = -EIO;
1178 		goto clean_rq_elem_err;
1179 	}
1180 
1181 	/* set next_to_use to head */
1182 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1183 
1184 	if (ntu == ntc) {
1185 		/* nothing to do - shouldn't need to update ring's values */
1186 		ret_code = -EALREADY;
1187 		goto clean_rq_elem_out;
1188 	}
1189 
1190 	/* now clean the next descriptor */
1191 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1192 	desc_idx = ntc;
1193 
1194 	rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1195 	flags = le16_to_cpu(desc->flags);
1196 	if (flags & ICE_AQ_FLAG_ERR) {
1197 		ret_code = -EIO;
1198 		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1199 			  le16_to_cpu(desc->opcode), rq_last_status);
1200 	}
1201 	memcpy(&e->desc, desc, sizeof(e->desc));
1202 	datalen = le16_to_cpu(desc->datalen);
1203 	e->msg_len = min_t(u16, datalen, e->buf_len);
1204 	if (e->msg_buf && e->msg_len)
1205 		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1206 
1207 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1208 
1209 	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1210 
1211 	/* Restore the original datalen and buffer address in the desc,
1212 	 * FW updates datalen to indicate the event message size
1213 	 */
1214 	bi = &cq->rq.r.rq_bi[ntc];
1215 	memset(desc, 0, sizeof(*desc));
1216 
1217 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1218 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1219 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1220 	desc->datalen = cpu_to_le16(bi->size);
1221 	desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1222 	desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1223 
1224 	/* set tail = the last cleaned desc index. */
1225 	wr32(hw, cq->rq.tail, ntc);
1226 	/* ntc is updated to tail + 1 */
1227 	ntc++;
1228 	if (ntc == cq->num_rq_entries)
1229 		ntc = 0;
1230 	cq->rq.next_to_clean = ntc;
1231 	cq->rq.next_to_use = ntu;
1232 
1233 clean_rq_elem_out:
1234 	/* Set pending if needed, unlock and return */
1235 	if (pending) {
1236 		/* re-read HW head to calculate actual pending messages */
1237 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1238 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1239 	}
1240 clean_rq_elem_err:
1241 	mutex_unlock(&cq->rq_lock);
1242 
1243 	return ret_code;
1244 }
1245