xref: /linux/drivers/net/ethernet/intel/ice/ice_controlq.c (revision 3b5584afeef05319ade0fbf5f634a64fd3e5772b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 
6 #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
7 do {								\
8 	(qinfo)->sq.head = prefix##_ATQH;			\
9 	(qinfo)->sq.tail = prefix##_ATQT;			\
10 	(qinfo)->sq.len = prefix##_ATQLEN;			\
11 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
12 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
13 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
14 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
15 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
16 	(qinfo)->rq.head = prefix##_ARQH;			\
17 	(qinfo)->rq.tail = prefix##_ARQT;			\
18 	(qinfo)->rq.len = prefix##_ARQLEN;			\
19 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
20 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
21 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
22 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
23 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
24 } while (0)
25 
26 /**
27  * ice_adminq_init_regs - Initialize AdminQ registers
28  * @hw: pointer to the hardware structure
29  *
30  * This assumes the alloc_sq and alloc_rq functions have already been called
31  */
32 static void ice_adminq_init_regs(struct ice_hw *hw)
33 {
34 	struct ice_ctl_q_info *cq = &hw->adminq;
35 
36 	ICE_CQ_INIT_REGS(cq, PF_FW);
37 }
38 
39 /**
40  * ice_mailbox_init_regs - Initialize Mailbox registers
41  * @hw: pointer to the hardware structure
42  *
43  * This assumes the alloc_sq and alloc_rq functions have already been called
44  */
45 static void ice_mailbox_init_regs(struct ice_hw *hw)
46 {
47 	struct ice_ctl_q_info *cq = &hw->mailboxq;
48 
49 	ICE_CQ_INIT_REGS(cq, PF_MBX);
50 }
51 
52 /**
53  * ice_check_sq_alive
54  * @hw: pointer to the HW struct
55  * @cq: pointer to the specific Control queue
56  *
57  * Returns true if Queue is enabled else false.
58  */
59 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
60 {
61 	/* check both queue-length and queue-enable fields */
62 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
63 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
64 						cq->sq.len_ena_mask)) ==
65 			(cq->num_sq_entries | cq->sq.len_ena_mask);
66 
67 	return false;
68 }
69 
70 /**
71  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
72  * @hw: pointer to the hardware structure
73  * @cq: pointer to the specific Control queue
74  */
75 static enum ice_status
76 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
77 {
78 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
79 
80 	cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
81 						 &cq->sq.desc_buf.pa,
82 						 GFP_KERNEL | __GFP_ZERO);
83 	if (!cq->sq.desc_buf.va)
84 		return ICE_ERR_NO_MEMORY;
85 	cq->sq.desc_buf.size = size;
86 
87 	cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
88 				      sizeof(struct ice_sq_cd), GFP_KERNEL);
89 	if (!cq->sq.cmd_buf) {
90 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
91 				   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
92 		cq->sq.desc_buf.va = NULL;
93 		cq->sq.desc_buf.pa = 0;
94 		cq->sq.desc_buf.size = 0;
95 		return ICE_ERR_NO_MEMORY;
96 	}
97 
98 	return 0;
99 }
100 
101 /**
102  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
103  * @hw: pointer to the hardware structure
104  * @cq: pointer to the specific Control queue
105  */
106 static enum ice_status
107 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
108 {
109 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
110 
111 	cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
112 						 &cq->rq.desc_buf.pa,
113 						 GFP_KERNEL | __GFP_ZERO);
114 	if (!cq->rq.desc_buf.va)
115 		return ICE_ERR_NO_MEMORY;
116 	cq->rq.desc_buf.size = size;
117 	return 0;
118 }
119 
120 /**
121  * ice_free_cq_ring - Free control queue ring
122  * @hw: pointer to the hardware structure
123  * @ring: pointer to the specific control queue ring
124  *
125  * This assumes the posted buffers have already been cleaned
126  * and de-allocated
127  */
128 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
129 {
130 	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
131 			   ring->desc_buf.va, ring->desc_buf.pa);
132 	ring->desc_buf.va = NULL;
133 	ring->desc_buf.pa = 0;
134 	ring->desc_buf.size = 0;
135 }
136 
137 /**
138  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
139  * @hw: pointer to the hardware structure
140  * @cq: pointer to the specific Control queue
141  */
142 static enum ice_status
143 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
144 {
145 	int i;
146 
147 	/* We'll be allocating the buffer info memory first, then we can
148 	 * allocate the mapped buffers for the event processing
149 	 */
150 	cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
151 				       sizeof(cq->rq.desc_buf), GFP_KERNEL);
152 	if (!cq->rq.dma_head)
153 		return ICE_ERR_NO_MEMORY;
154 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
155 
156 	/* allocate the mapped buffers */
157 	for (i = 0; i < cq->num_rq_entries; i++) {
158 		struct ice_aq_desc *desc;
159 		struct ice_dma_mem *bi;
160 
161 		bi = &cq->rq.r.rq_bi[i];
162 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
163 					     cq->rq_buf_size, &bi->pa,
164 					     GFP_KERNEL | __GFP_ZERO);
165 		if (!bi->va)
166 			goto unwind_alloc_rq_bufs;
167 		bi->size = cq->rq_buf_size;
168 
169 		/* now configure the descriptors for use */
170 		desc = ICE_CTL_Q_DESC(cq->rq, i);
171 
172 		desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
173 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
174 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
175 		desc->opcode = 0;
176 		/* This is in accordance with Admin queue design, there is no
177 		 * register for buffer size configuration
178 		 */
179 		desc->datalen = cpu_to_le16(bi->size);
180 		desc->retval = 0;
181 		desc->cookie_high = 0;
182 		desc->cookie_low = 0;
183 		desc->params.generic.addr_high =
184 			cpu_to_le32(upper_32_bits(bi->pa));
185 		desc->params.generic.addr_low =
186 			cpu_to_le32(lower_32_bits(bi->pa));
187 		desc->params.generic.param0 = 0;
188 		desc->params.generic.param1 = 0;
189 	}
190 	return 0;
191 
192 unwind_alloc_rq_bufs:
193 	/* don't try to free the one that failed... */
194 	i--;
195 	for (; i >= 0; i--) {
196 		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
197 				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
198 		cq->rq.r.rq_bi[i].va = NULL;
199 		cq->rq.r.rq_bi[i].pa = 0;
200 		cq->rq.r.rq_bi[i].size = 0;
201 	}
202 	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
203 
204 	return ICE_ERR_NO_MEMORY;
205 }
206 
207 /**
208  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
209  * @hw: pointer to the hardware structure
210  * @cq: pointer to the specific Control queue
211  */
212 static enum ice_status
213 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
214 {
215 	int i;
216 
217 	/* No mapped memory needed yet, just the buffer info structures */
218 	cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
219 				       sizeof(cq->sq.desc_buf), GFP_KERNEL);
220 	if (!cq->sq.dma_head)
221 		return ICE_ERR_NO_MEMORY;
222 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
223 
224 	/* allocate the mapped buffers */
225 	for (i = 0; i < cq->num_sq_entries; i++) {
226 		struct ice_dma_mem *bi;
227 
228 		bi = &cq->sq.r.sq_bi[i];
229 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
230 					     cq->sq_buf_size, &bi->pa,
231 					     GFP_KERNEL | __GFP_ZERO);
232 		if (!bi->va)
233 			goto unwind_alloc_sq_bufs;
234 		bi->size = cq->sq_buf_size;
235 	}
236 	return 0;
237 
238 unwind_alloc_sq_bufs:
239 	/* don't try to free the one that failed... */
240 	i--;
241 	for (; i >= 0; i--) {
242 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
243 				   cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
244 		cq->sq.r.sq_bi[i].va = NULL;
245 		cq->sq.r.sq_bi[i].pa = 0;
246 		cq->sq.r.sq_bi[i].size = 0;
247 	}
248 	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
249 
250 	return ICE_ERR_NO_MEMORY;
251 }
252 
253 static enum ice_status
254 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
255 {
256 	/* Clear Head and Tail */
257 	wr32(hw, ring->head, 0);
258 	wr32(hw, ring->tail, 0);
259 
260 	/* set starting point */
261 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
262 	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
263 	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
264 
265 	/* Check one register to verify that config was applied */
266 	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
267 		return ICE_ERR_AQ_ERROR;
268 
269 	return 0;
270 }
271 
272 /**
273  * ice_cfg_sq_regs - configure Control ATQ registers
274  * @hw: pointer to the hardware structure
275  * @cq: pointer to the specific Control queue
276  *
277  * Configure base address and length registers for the transmit queue
278  */
279 static enum ice_status
280 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
281 {
282 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
283 }
284 
285 /**
286  * ice_cfg_rq_regs - configure Control ARQ register
287  * @hw: pointer to the hardware structure
288  * @cq: pointer to the specific Control queue
289  *
290  * Configure base address and length registers for the receive (event queue)
291  */
292 static enum ice_status
293 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
294 {
295 	enum ice_status status;
296 
297 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
298 	if (status)
299 		return status;
300 
301 	/* Update tail in the HW to post pre-allocated buffers */
302 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
303 
304 	return 0;
305 }
306 
307 /**
308  * ice_init_sq - main initialization routine for Control ATQ
309  * @hw: pointer to the hardware structure
310  * @cq: pointer to the specific Control queue
311  *
312  * This is the main initialization routine for the Control Send Queue
313  * Prior to calling this function, the driver *MUST* set the following fields
314  * in the cq->structure:
315  *     - cq->num_sq_entries
316  *     - cq->sq_buf_size
317  *
318  * Do *NOT* hold the lock when calling this as the memory allocation routines
319  * called are not going to be atomic context safe
320  */
321 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
322 {
323 	enum ice_status ret_code;
324 
325 	if (cq->sq.count > 0) {
326 		/* queue already initialized */
327 		ret_code = ICE_ERR_NOT_READY;
328 		goto init_ctrlq_exit;
329 	}
330 
331 	/* verify input for valid configuration */
332 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
333 		ret_code = ICE_ERR_CFG;
334 		goto init_ctrlq_exit;
335 	}
336 
337 	cq->sq.next_to_use = 0;
338 	cq->sq.next_to_clean = 0;
339 
340 	/* allocate the ring memory */
341 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
342 	if (ret_code)
343 		goto init_ctrlq_exit;
344 
345 	/* allocate buffers in the rings */
346 	ret_code = ice_alloc_sq_bufs(hw, cq);
347 	if (ret_code)
348 		goto init_ctrlq_free_rings;
349 
350 	/* initialize base registers */
351 	ret_code = ice_cfg_sq_regs(hw, cq);
352 	if (ret_code)
353 		goto init_ctrlq_free_rings;
354 
355 	/* success! */
356 	cq->sq.count = cq->num_sq_entries;
357 	goto init_ctrlq_exit;
358 
359 init_ctrlq_free_rings:
360 	ice_free_cq_ring(hw, &cq->sq);
361 
362 init_ctrlq_exit:
363 	return ret_code;
364 }
365 
366 /**
367  * ice_init_rq - initialize ARQ
368  * @hw: pointer to the hardware structure
369  * @cq: pointer to the specific Control queue
370  *
371  * The main initialization routine for the Admin Receive (Event) Queue.
372  * Prior to calling this function, the driver *MUST* set the following fields
373  * in the cq->structure:
374  *     - cq->num_rq_entries
375  *     - cq->rq_buf_size
376  *
377  * Do *NOT* hold the lock when calling this as the memory allocation routines
378  * called are not going to be atomic context safe
379  */
380 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
381 {
382 	enum ice_status ret_code;
383 
384 	if (cq->rq.count > 0) {
385 		/* queue already initialized */
386 		ret_code = ICE_ERR_NOT_READY;
387 		goto init_ctrlq_exit;
388 	}
389 
390 	/* verify input for valid configuration */
391 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
392 		ret_code = ICE_ERR_CFG;
393 		goto init_ctrlq_exit;
394 	}
395 
396 	cq->rq.next_to_use = 0;
397 	cq->rq.next_to_clean = 0;
398 
399 	/* allocate the ring memory */
400 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
401 	if (ret_code)
402 		goto init_ctrlq_exit;
403 
404 	/* allocate buffers in the rings */
405 	ret_code = ice_alloc_rq_bufs(hw, cq);
406 	if (ret_code)
407 		goto init_ctrlq_free_rings;
408 
409 	/* initialize base registers */
410 	ret_code = ice_cfg_rq_regs(hw, cq);
411 	if (ret_code)
412 		goto init_ctrlq_free_rings;
413 
414 	/* success! */
415 	cq->rq.count = cq->num_rq_entries;
416 	goto init_ctrlq_exit;
417 
418 init_ctrlq_free_rings:
419 	ice_free_cq_ring(hw, &cq->rq);
420 
421 init_ctrlq_exit:
422 	return ret_code;
423 }
424 
425 #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
426 do {									\
427 	int i;								\
428 	/* free descriptors */						\
429 	for (i = 0; i < (qi)->num_##ring##_entries; i++)		\
430 		if ((qi)->ring.r.ring##_bi[i].pa) {			\
431 			dmam_free_coherent(ice_hw_to_dev(hw),		\
432 					   (qi)->ring.r.ring##_bi[i].size,\
433 					   (qi)->ring.r.ring##_bi[i].va,\
434 					   (qi)->ring.r.ring##_bi[i].pa);\
435 			(qi)->ring.r.ring##_bi[i].va = NULL;		\
436 			(qi)->ring.r.ring##_bi[i].pa = 0;		\
437 			(qi)->ring.r.ring##_bi[i].size = 0;		\
438 		}							\
439 	/* free the buffer info list */					\
440 	if ((qi)->ring.cmd_buf)						\
441 		devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);	\
442 	/* free DMA head */						\
443 	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
444 } while (0)
445 
446 /**
447  * ice_shutdown_sq - shutdown the Control ATQ
448  * @hw: pointer to the hardware structure
449  * @cq: pointer to the specific Control queue
450  *
451  * The main shutdown routine for the Control Transmit Queue
452  */
453 static enum ice_status
454 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
455 {
456 	enum ice_status ret_code = 0;
457 
458 	mutex_lock(&cq->sq_lock);
459 
460 	if (!cq->sq.count) {
461 		ret_code = ICE_ERR_NOT_READY;
462 		goto shutdown_sq_out;
463 	}
464 
465 	/* Stop firmware AdminQ processing */
466 	wr32(hw, cq->sq.head, 0);
467 	wr32(hw, cq->sq.tail, 0);
468 	wr32(hw, cq->sq.len, 0);
469 	wr32(hw, cq->sq.bal, 0);
470 	wr32(hw, cq->sq.bah, 0);
471 
472 	cq->sq.count = 0;	/* to indicate uninitialized queue */
473 
474 	/* free ring buffers and the ring itself */
475 	ICE_FREE_CQ_BUFS(hw, cq, sq);
476 	ice_free_cq_ring(hw, &cq->sq);
477 
478 shutdown_sq_out:
479 	mutex_unlock(&cq->sq_lock);
480 	return ret_code;
481 }
482 
483 /**
484  * ice_aq_ver_check - Check the reported AQ API version.
485  * @hw: pointer to the hardware structure
486  *
487  * Checks if the driver should load on a given AQ API version.
488  *
489  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
490  */
491 static bool ice_aq_ver_check(struct ice_hw *hw)
492 {
493 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
494 		/* Major API version is newer than expected, don't load */
495 		dev_warn(ice_hw_to_dev(hw),
496 			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
497 		return false;
498 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
499 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
500 			dev_info(ice_hw_to_dev(hw),
501 				 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
502 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
503 			dev_info(ice_hw_to_dev(hw),
504 				 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
505 	} else {
506 		/* Major API version is older than expected, log a warning */
507 		dev_info(ice_hw_to_dev(hw),
508 			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
509 	}
510 	return true;
511 }
512 
513 /**
514  * ice_shutdown_rq - shutdown Control ARQ
515  * @hw: pointer to the hardware structure
516  * @cq: pointer to the specific Control queue
517  *
518  * The main shutdown routine for the Control Receive Queue
519  */
520 static enum ice_status
521 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
522 {
523 	enum ice_status ret_code = 0;
524 
525 	mutex_lock(&cq->rq_lock);
526 
527 	if (!cq->rq.count) {
528 		ret_code = ICE_ERR_NOT_READY;
529 		goto shutdown_rq_out;
530 	}
531 
532 	/* Stop Control Queue processing */
533 	wr32(hw, cq->rq.head, 0);
534 	wr32(hw, cq->rq.tail, 0);
535 	wr32(hw, cq->rq.len, 0);
536 	wr32(hw, cq->rq.bal, 0);
537 	wr32(hw, cq->rq.bah, 0);
538 
539 	/* set rq.count to 0 to indicate uninitialized queue */
540 	cq->rq.count = 0;
541 
542 	/* free ring buffers and the ring itself */
543 	ICE_FREE_CQ_BUFS(hw, cq, rq);
544 	ice_free_cq_ring(hw, &cq->rq);
545 
546 shutdown_rq_out:
547 	mutex_unlock(&cq->rq_lock);
548 	return ret_code;
549 }
550 
551 /**
552  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
553  * @hw: pointer to the hardware structure
554  */
555 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
556 {
557 	struct ice_ctl_q_info *cq = &hw->adminq;
558 	enum ice_status status;
559 
560 	status = ice_aq_get_fw_ver(hw, NULL);
561 	if (status)
562 		goto init_ctrlq_free_rq;
563 
564 	if (!ice_aq_ver_check(hw)) {
565 		status = ICE_ERR_FW_API_VER;
566 		goto init_ctrlq_free_rq;
567 	}
568 
569 	return 0;
570 
571 init_ctrlq_free_rq:
572 	ice_shutdown_rq(hw, cq);
573 	ice_shutdown_sq(hw, cq);
574 	return status;
575 }
576 
577 /**
578  * ice_init_ctrlq - main initialization routine for any control Queue
579  * @hw: pointer to the hardware structure
580  * @q_type: specific Control queue type
581  *
582  * Prior to calling this function, the driver *MUST* set the following fields
583  * in the cq->structure:
584  *     - cq->num_sq_entries
585  *     - cq->num_rq_entries
586  *     - cq->rq_buf_size
587  *     - cq->sq_buf_size
588  *
589  * NOTE: this function does not initialize the controlq locks
590  */
591 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
592 {
593 	struct ice_ctl_q_info *cq;
594 	enum ice_status ret_code;
595 
596 	switch (q_type) {
597 	case ICE_CTL_Q_ADMIN:
598 		ice_adminq_init_regs(hw);
599 		cq = &hw->adminq;
600 		break;
601 	case ICE_CTL_Q_MAILBOX:
602 		ice_mailbox_init_regs(hw);
603 		cq = &hw->mailboxq;
604 		break;
605 	default:
606 		return ICE_ERR_PARAM;
607 	}
608 	cq->qtype = q_type;
609 
610 	/* verify input for valid configuration */
611 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
612 	    !cq->rq_buf_size || !cq->sq_buf_size) {
613 		return ICE_ERR_CFG;
614 	}
615 
616 	/* setup SQ command write back timeout */
617 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
618 
619 	/* allocate the ATQ */
620 	ret_code = ice_init_sq(hw, cq);
621 	if (ret_code)
622 		return ret_code;
623 
624 	/* allocate the ARQ */
625 	ret_code = ice_init_rq(hw, cq);
626 	if (ret_code)
627 		goto init_ctrlq_free_sq;
628 
629 	/* success! */
630 	return 0;
631 
632 init_ctrlq_free_sq:
633 	ice_shutdown_sq(hw, cq);
634 	return ret_code;
635 }
636 
637 /**
638  * ice_init_all_ctrlq - main initialization routine for all control queues
639  * @hw: pointer to the hardware structure
640  *
641  * Prior to calling this function, the driver MUST* set the following fields
642  * in the cq->structure for all control queues:
643  *     - cq->num_sq_entries
644  *     - cq->num_rq_entries
645  *     - cq->rq_buf_size
646  *     - cq->sq_buf_size
647  *
648  * NOTE: this function does not initialize the controlq locks.
649  */
650 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
651 {
652 	enum ice_status ret_code;
653 
654 	/* Init FW admin queue */
655 	ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
656 	if (ret_code)
657 		return ret_code;
658 
659 	ret_code = ice_init_check_adminq(hw);
660 	if (ret_code)
661 		return ret_code;
662 
663 	/* Init Mailbox queue */
664 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
665 }
666 
667 /**
668  * ice_init_ctrlq_locks - Initialize locks for a control queue
669  * @cq: pointer to the control queue
670  *
671  * Initializes the send and receive queue locks for a given control queue.
672  */
673 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
674 {
675 	mutex_init(&cq->sq_lock);
676 	mutex_init(&cq->rq_lock);
677 }
678 
679 /**
680  * ice_create_all_ctrlq - main initialization routine for all control queues
681  * @hw: pointer to the hardware structure
682  *
683  * Prior to calling this function, the driver *MUST* set the following fields
684  * in the cq->structure for all control queues:
685  *     - cq->num_sq_entries
686  *     - cq->num_rq_entries
687  *     - cq->rq_buf_size
688  *     - cq->sq_buf_size
689  *
690  * This function creates all the control queue locks and then calls
691  * ice_init_all_ctrlq. It should be called once during driver load. If the
692  * driver needs to re-initialize control queues at run time it should call
693  * ice_init_all_ctrlq instead.
694  */
695 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
696 {
697 	ice_init_ctrlq_locks(&hw->adminq);
698 	ice_init_ctrlq_locks(&hw->mailboxq);
699 
700 	return ice_init_all_ctrlq(hw);
701 }
702 
703 /**
704  * ice_shutdown_ctrlq - shutdown routine for any control queue
705  * @hw: pointer to the hardware structure
706  * @q_type: specific Control queue type
707  *
708  * NOTE: this function does not destroy the control queue locks.
709  */
710 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
711 {
712 	struct ice_ctl_q_info *cq;
713 
714 	switch (q_type) {
715 	case ICE_CTL_Q_ADMIN:
716 		cq = &hw->adminq;
717 		if (ice_check_sq_alive(hw, cq))
718 			ice_aq_q_shutdown(hw, true);
719 		break;
720 	case ICE_CTL_Q_MAILBOX:
721 		cq = &hw->mailboxq;
722 		break;
723 	default:
724 		return;
725 	}
726 
727 	ice_shutdown_sq(hw, cq);
728 	ice_shutdown_rq(hw, cq);
729 }
730 
731 /**
732  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
733  * @hw: pointer to the hardware structure
734  *
735  * NOTE: this function does not destroy the control queue locks. The driver
736  * may call this at runtime to shutdown and later restart control queues, such
737  * as in response to a reset event.
738  */
739 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
740 {
741 	/* Shutdown FW admin queue */
742 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
743 	/* Shutdown PF-VF Mailbox */
744 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
745 }
746 
747 /**
748  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
749  * @cq: pointer to the control queue
750  *
751  * Destroys the send and receive queue locks for a given control queue.
752  */
753 static void
754 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
755 {
756 	mutex_destroy(&cq->sq_lock);
757 	mutex_destroy(&cq->rq_lock);
758 }
759 
760 /**
761  * ice_destroy_all_ctrlq - exit routine for all control queues
762  * @hw: pointer to the hardware structure
763  *
764  * This function shuts down all the control queues and then destroys the
765  * control queue locks. It should be called once during driver unload. The
766  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
767  * reinitialize control queues, such as in response to a reset event.
768  */
769 void ice_destroy_all_ctrlq(struct ice_hw *hw)
770 {
771 	/* shut down all the control queues first */
772 	ice_shutdown_all_ctrlq(hw);
773 
774 	ice_destroy_ctrlq_locks(&hw->adminq);
775 	ice_destroy_ctrlq_locks(&hw->mailboxq);
776 }
777 
778 /**
779  * ice_clean_sq - cleans Admin send queue (ATQ)
780  * @hw: pointer to the hardware structure
781  * @cq: pointer to the specific Control queue
782  *
783  * returns the number of free desc
784  */
785 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
786 {
787 	struct ice_ctl_q_ring *sq = &cq->sq;
788 	u16 ntc = sq->next_to_clean;
789 	struct ice_sq_cd *details;
790 	struct ice_aq_desc *desc;
791 
792 	desc = ICE_CTL_Q_DESC(*sq, ntc);
793 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
794 
795 	while (rd32(hw, cq->sq.head) != ntc) {
796 		ice_debug(hw, ICE_DBG_AQ_MSG,
797 			  "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
798 		memset(desc, 0, sizeof(*desc));
799 		memset(details, 0, sizeof(*details));
800 		ntc++;
801 		if (ntc == sq->count)
802 			ntc = 0;
803 		desc = ICE_CTL_Q_DESC(*sq, ntc);
804 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
805 	}
806 
807 	sq->next_to_clean = ntc;
808 
809 	return ICE_CTL_Q_DESC_UNUSED(sq);
810 }
811 
812 /**
813  * ice_debug_cq
814  * @hw: pointer to the hardware structure
815  * @desc: pointer to control queue descriptor
816  * @buf: pointer to command buffer
817  * @buf_len: max length of buf
818  *
819  * Dumps debug log about control command with descriptor contents.
820  */
821 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
822 {
823 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
824 	u16 len;
825 
826 	if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
827 	    !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
828 		return;
829 
830 	if (!desc)
831 		return;
832 
833 	len = le16_to_cpu(cq_desc->datalen);
834 
835 	ice_debug(hw, ICE_DBG_AQ_DESC,
836 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
837 		  le16_to_cpu(cq_desc->opcode),
838 		  le16_to_cpu(cq_desc->flags),
839 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
840 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
841 		  le32_to_cpu(cq_desc->cookie_high),
842 		  le32_to_cpu(cq_desc->cookie_low));
843 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
844 		  le32_to_cpu(cq_desc->params.generic.param0),
845 		  le32_to_cpu(cq_desc->params.generic.param1));
846 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
847 		  le32_to_cpu(cq_desc->params.generic.addr_high),
848 		  le32_to_cpu(cq_desc->params.generic.addr_low));
849 	if (buf && cq_desc->datalen != 0) {
850 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
851 		if (buf_len < len)
852 			len = buf_len;
853 
854 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
855 	}
856 }
857 
858 /**
859  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
860  * @hw: pointer to the HW struct
861  * @cq: pointer to the specific Control queue
862  *
863  * Returns true if the firmware has processed all descriptors on the
864  * admin send queue. Returns false if there are still requests pending.
865  */
866 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
867 {
868 	/* AQ designers suggest use of head for better
869 	 * timing reliability than DD bit
870 	 */
871 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
872 }
873 
874 /**
875  * ice_sq_send_cmd - send command to Control Queue (ATQ)
876  * @hw: pointer to the HW struct
877  * @cq: pointer to the specific Control queue
878  * @desc: prefilled descriptor describing the command (non DMA mem)
879  * @buf: buffer to use for indirect commands (or NULL for direct commands)
880  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
881  * @cd: pointer to command details structure
882  *
883  * This is the main send command routine for the ATQ. It runs the queue,
884  * cleans the queue, etc.
885  */
886 enum ice_status
887 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
888 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
889 		struct ice_sq_cd *cd)
890 {
891 	struct ice_dma_mem *dma_buf = NULL;
892 	struct ice_aq_desc *desc_on_ring;
893 	bool cmd_completed = false;
894 	enum ice_status status = 0;
895 	struct ice_sq_cd *details;
896 	u32 total_delay = 0;
897 	u16 retval = 0;
898 	u32 val = 0;
899 
900 	/* if reset is in progress return a soft error */
901 	if (hw->reset_ongoing)
902 		return ICE_ERR_RESET_ONGOING;
903 	mutex_lock(&cq->sq_lock);
904 
905 	cq->sq_last_status = ICE_AQ_RC_OK;
906 
907 	if (!cq->sq.count) {
908 		ice_debug(hw, ICE_DBG_AQ_MSG,
909 			  "Control Send queue not initialized.\n");
910 		status = ICE_ERR_AQ_EMPTY;
911 		goto sq_send_command_error;
912 	}
913 
914 	if ((buf && !buf_size) || (!buf && buf_size)) {
915 		status = ICE_ERR_PARAM;
916 		goto sq_send_command_error;
917 	}
918 
919 	if (buf) {
920 		if (buf_size > cq->sq_buf_size) {
921 			ice_debug(hw, ICE_DBG_AQ_MSG,
922 				  "Invalid buffer size for Control Send queue: %d.\n",
923 				  buf_size);
924 			status = ICE_ERR_INVAL_SIZE;
925 			goto sq_send_command_error;
926 		}
927 
928 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
929 		if (buf_size > ICE_AQ_LG_BUF)
930 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
931 	}
932 
933 	val = rd32(hw, cq->sq.head);
934 	if (val >= cq->num_sq_entries) {
935 		ice_debug(hw, ICE_DBG_AQ_MSG,
936 			  "head overrun at %d in the Control Send Queue ring\n",
937 			  val);
938 		status = ICE_ERR_AQ_EMPTY;
939 		goto sq_send_command_error;
940 	}
941 
942 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
943 	if (cd)
944 		*details = *cd;
945 	else
946 		memset(details, 0, sizeof(*details));
947 
948 	/* Call clean and check queue available function to reclaim the
949 	 * descriptors that were processed by FW/MBX; the function returns the
950 	 * number of desc available. The clean function called here could be
951 	 * called in a separate thread in case of asynchronous completions.
952 	 */
953 	if (ice_clean_sq(hw, cq) == 0) {
954 		ice_debug(hw, ICE_DBG_AQ_MSG,
955 			  "Error: Control Send Queue is full.\n");
956 		status = ICE_ERR_AQ_FULL;
957 		goto sq_send_command_error;
958 	}
959 
960 	/* initialize the temp desc pointer with the right desc */
961 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
962 
963 	/* if the desc is available copy the temp desc to the right place */
964 	memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
965 
966 	/* if buf is not NULL assume indirect command */
967 	if (buf) {
968 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
969 		/* copy the user buf into the respective DMA buf */
970 		memcpy(dma_buf->va, buf, buf_size);
971 		desc_on_ring->datalen = cpu_to_le16(buf_size);
972 
973 		/* Update the address values in the desc with the pa value
974 		 * for respective buffer
975 		 */
976 		desc_on_ring->params.generic.addr_high =
977 			cpu_to_le32(upper_32_bits(dma_buf->pa));
978 		desc_on_ring->params.generic.addr_low =
979 			cpu_to_le32(lower_32_bits(dma_buf->pa));
980 	}
981 
982 	/* Debug desc and buffer */
983 	ice_debug(hw, ICE_DBG_AQ_DESC,
984 		  "ATQ: Control Send queue desc and buffer:\n");
985 
986 	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
987 
988 	(cq->sq.next_to_use)++;
989 	if (cq->sq.next_to_use == cq->sq.count)
990 		cq->sq.next_to_use = 0;
991 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
992 
993 	do {
994 		if (ice_sq_done(hw, cq))
995 			break;
996 
997 		udelay(ICE_CTL_Q_SQ_CMD_USEC);
998 		total_delay++;
999 	} while (total_delay < cq->sq_cmd_timeout);
1000 
1001 	/* if ready, copy the desc back to temp */
1002 	if (ice_sq_done(hw, cq)) {
1003 		memcpy(desc, desc_on_ring, sizeof(*desc));
1004 		if (buf) {
1005 			/* get returned length to copy */
1006 			u16 copy_size = le16_to_cpu(desc->datalen);
1007 
1008 			if (copy_size > buf_size) {
1009 				ice_debug(hw, ICE_DBG_AQ_MSG,
1010 					  "Return len %d > than buf len %d\n",
1011 					  copy_size, buf_size);
1012 				status = ICE_ERR_AQ_ERROR;
1013 			} else {
1014 				memcpy(buf, dma_buf->va, copy_size);
1015 			}
1016 		}
1017 		retval = le16_to_cpu(desc->retval);
1018 		if (retval) {
1019 			ice_debug(hw, ICE_DBG_AQ_MSG,
1020 				  "Control Send Queue command 0x%04X completed with error 0x%X\n",
1021 				  le16_to_cpu(desc->opcode),
1022 				  retval);
1023 
1024 			/* strip off FW internal code */
1025 			retval &= 0xff;
1026 		}
1027 		cmd_completed = true;
1028 		if (!status && retval != ICE_AQ_RC_OK)
1029 			status = ICE_ERR_AQ_ERROR;
1030 		cq->sq_last_status = (enum ice_aq_err)retval;
1031 	}
1032 
1033 	ice_debug(hw, ICE_DBG_AQ_MSG,
1034 		  "ATQ: desc and buffer writeback:\n");
1035 
1036 	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1037 
1038 	/* save writeback AQ if requested */
1039 	if (details->wb_desc)
1040 		memcpy(details->wb_desc, desc_on_ring,
1041 		       sizeof(*details->wb_desc));
1042 
1043 	/* update the error if time out occurred */
1044 	if (!cmd_completed) {
1045 		ice_debug(hw, ICE_DBG_AQ_MSG,
1046 			  "Control Send Queue Writeback timeout.\n");
1047 		status = ICE_ERR_AQ_TIMEOUT;
1048 	}
1049 
1050 sq_send_command_error:
1051 	mutex_unlock(&cq->sq_lock);
1052 	return status;
1053 }
1054 
1055 /**
1056  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1057  * @desc: pointer to the temp descriptor (non DMA mem)
1058  * @opcode: the opcode can be used to decide which flags to turn off or on
1059  *
1060  * Fill the desc with default values
1061  */
1062 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1063 {
1064 	/* zero out the desc */
1065 	memset(desc, 0, sizeof(*desc));
1066 	desc->opcode = cpu_to_le16(opcode);
1067 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1068 }
1069 
1070 /**
1071  * ice_clean_rq_elem
1072  * @hw: pointer to the HW struct
1073  * @cq: pointer to the specific Control queue
1074  * @e: event info from the receive descriptor, includes any buffers
1075  * @pending: number of events that could be left to process
1076  *
1077  * This function cleans one Admin Receive Queue element and returns
1078  * the contents through e. It can also return how many events are
1079  * left to process through 'pending'.
1080  */
1081 enum ice_status
1082 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1083 		  struct ice_rq_event_info *e, u16 *pending)
1084 {
1085 	u16 ntc = cq->rq.next_to_clean;
1086 	enum ice_status ret_code = 0;
1087 	struct ice_aq_desc *desc;
1088 	struct ice_dma_mem *bi;
1089 	u16 desc_idx;
1090 	u16 datalen;
1091 	u16 flags;
1092 	u16 ntu;
1093 
1094 	/* pre-clean the event info */
1095 	memset(&e->desc, 0, sizeof(e->desc));
1096 
1097 	/* take the lock before we start messing with the ring */
1098 	mutex_lock(&cq->rq_lock);
1099 
1100 	if (!cq->rq.count) {
1101 		ice_debug(hw, ICE_DBG_AQ_MSG,
1102 			  "Control Receive queue not initialized.\n");
1103 		ret_code = ICE_ERR_AQ_EMPTY;
1104 		goto clean_rq_elem_err;
1105 	}
1106 
1107 	/* set next_to_use to head */
1108 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1109 
1110 	if (ntu == ntc) {
1111 		/* nothing to do - shouldn't need to update ring's values */
1112 		ret_code = ICE_ERR_AQ_NO_WORK;
1113 		goto clean_rq_elem_out;
1114 	}
1115 
1116 	/* now clean the next descriptor */
1117 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1118 	desc_idx = ntc;
1119 
1120 	cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1121 	flags = le16_to_cpu(desc->flags);
1122 	if (flags & ICE_AQ_FLAG_ERR) {
1123 		ret_code = ICE_ERR_AQ_ERROR;
1124 		ice_debug(hw, ICE_DBG_AQ_MSG,
1125 			  "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1126 			  le16_to_cpu(desc->opcode),
1127 			  cq->rq_last_status);
1128 	}
1129 	memcpy(&e->desc, desc, sizeof(e->desc));
1130 	datalen = le16_to_cpu(desc->datalen);
1131 	e->msg_len = min(datalen, e->buf_len);
1132 	if (e->msg_buf && e->msg_len)
1133 		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1134 
1135 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1136 
1137 	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1138 
1139 	/* Restore the original datalen and buffer address in the desc,
1140 	 * FW updates datalen to indicate the event message size
1141 	 */
1142 	bi = &cq->rq.r.rq_bi[ntc];
1143 	memset(desc, 0, sizeof(*desc));
1144 
1145 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1146 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1147 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1148 	desc->datalen = cpu_to_le16(bi->size);
1149 	desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1150 	desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1151 
1152 	/* set tail = the last cleaned desc index. */
1153 	wr32(hw, cq->rq.tail, ntc);
1154 	/* ntc is updated to tail + 1 */
1155 	ntc++;
1156 	if (ntc == cq->num_rq_entries)
1157 		ntc = 0;
1158 	cq->rq.next_to_clean = ntc;
1159 	cq->rq.next_to_use = ntu;
1160 
1161 clean_rq_elem_out:
1162 	/* Set pending if needed, unlock and return */
1163 	if (pending) {
1164 		/* re-read HW head to calculate actual pending messages */
1165 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1166 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1167 	}
1168 clean_rq_elem_err:
1169 	mutex_unlock(&cq->rq_lock);
1170 
1171 	return ret_code;
1172 }
1173