xref: /freebsd/sys/dev/iavf/iavf_adminq.c (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "iavf_status.h"
33 #include "iavf_type.h"
34 #include "iavf_register.h"
35 #include "iavf_adminq.h"
36 #include "iavf_prototype.h"
37 
38 /**
39  *  iavf_adminq_init_regs - Initialize AdminQ registers
40  *  @hw: pointer to the hardware structure
41  *
42  *  This assumes the alloc_asq and alloc_arq functions have already been called
43  **/
44 STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
45 {
46 	/* set head and tail registers in our local struct */
47 	hw->aq.asq.tail = IAVF_VF_ATQT1;
48 	hw->aq.asq.head = IAVF_VF_ATQH1;
49 	hw->aq.asq.len  = IAVF_VF_ATQLEN1;
50 	hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
51 	hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
52 	hw->aq.arq.tail = IAVF_VF_ARQT1;
53 	hw->aq.arq.head = IAVF_VF_ARQH1;
54 	hw->aq.arq.len  = IAVF_VF_ARQLEN1;
55 	hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
56 	hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
57 }
58 
59 /**
60  *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
61  *  @hw: pointer to the hardware structure
62  **/
63 enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
64 {
65 	enum iavf_status ret_code;
66 
67 	ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
68 					 iavf_mem_atq_ring,
69 					 (hw->aq.num_asq_entries *
70 					 sizeof(struct iavf_aq_desc)),
71 					 IAVF_ADMINQ_DESC_ALIGNMENT);
72 	if (ret_code)
73 		return ret_code;
74 
75 	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
76 					  (hw->aq.num_asq_entries *
77 					  sizeof(struct iavf_asq_cmd_details)));
78 	if (ret_code) {
79 		iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
80 		return ret_code;
81 	}
82 
83 	return ret_code;
84 }
85 
86 /**
87  *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
88  *  @hw: pointer to the hardware structure
89  **/
90 enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
91 {
92 	enum iavf_status ret_code;
93 
94 	ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
95 					 iavf_mem_arq_ring,
96 					 (hw->aq.num_arq_entries *
97 					 sizeof(struct iavf_aq_desc)),
98 					 IAVF_ADMINQ_DESC_ALIGNMENT);
99 
100 	return ret_code;
101 }
102 
103 /**
104  *  iavf_free_adminq_asq - Free Admin Queue send rings
105  *  @hw: pointer to the hardware structure
106  *
107  *  This assumes the posted send buffers have already been cleaned
108  *  and de-allocated
109  **/
110 void iavf_free_adminq_asq(struct iavf_hw *hw)
111 {
112 	iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
113 	iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
114 }
115 
116 /**
117  *  iavf_free_adminq_arq - Free Admin Queue receive rings
118  *  @hw: pointer to the hardware structure
119  *
120  *  This assumes the posted receive buffers have already been cleaned
121  *  and de-allocated
122  **/
123 void iavf_free_adminq_arq(struct iavf_hw *hw)
124 {
125 	iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
126 }
127 
128 /**
129  *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
130  *  @hw: pointer to the hardware structure
131  **/
132 STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
133 {
134 	enum iavf_status ret_code;
135 	struct iavf_aq_desc *desc;
136 	struct iavf_dma_mem *bi;
137 	int i;
138 
139 	/* We'll be allocating the buffer info memory first, then we can
140 	 * allocate the mapped buffers for the event processing
141 	 */
142 
143 	/* buffer_info structures do not need alignment */
144 	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
145 		(hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
146 	if (ret_code)
147 		goto alloc_arq_bufs;
148 	hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
149 
150 	/* allocate the mapped buffers */
151 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
152 		bi = &hw->aq.arq.r.arq_bi[i];
153 		ret_code = iavf_allocate_dma_mem(hw, bi,
154 						 iavf_mem_arq_buf,
155 						 hw->aq.arq_buf_size,
156 						 IAVF_ADMINQ_DESC_ALIGNMENT);
157 		if (ret_code)
158 			goto unwind_alloc_arq_bufs;
159 
160 		/* now configure the descriptors for use */
161 		desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
162 
163 		desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
164 		if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
165 			desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
166 		desc->opcode = 0;
167 		/* This is in accordance with Admin queue design, there is no
168 		 * register for buffer size configuration
169 		 */
170 		desc->datalen = CPU_TO_LE16((u16)bi->size);
171 		desc->retval = 0;
172 		desc->cookie_high = 0;
173 		desc->cookie_low = 0;
174 		desc->params.external.addr_high =
175 			CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
176 		desc->params.external.addr_low =
177 			CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
178 		desc->params.external.param0 = 0;
179 		desc->params.external.param1 = 0;
180 	}
181 
182 alloc_arq_bufs:
183 	return ret_code;
184 
185 unwind_alloc_arq_bufs:
186 	/* don't try to free the one that failed... */
187 	i--;
188 	for (; i >= 0; i--)
189 		iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
190 	iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
191 
192 	return ret_code;
193 }
194 
195 /**
196  *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
197  *  @hw: pointer to the hardware structure
198  **/
199 STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
200 {
201 	enum iavf_status ret_code;
202 	struct iavf_dma_mem *bi;
203 	int i;
204 
205 	/* No mapped memory needed yet, just the buffer info structures */
206 	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
207 		(hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
208 	if (ret_code)
209 		goto alloc_asq_bufs;
210 	hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
211 
212 	/* allocate the mapped buffers */
213 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
214 		bi = &hw->aq.asq.r.asq_bi[i];
215 		ret_code = iavf_allocate_dma_mem(hw, bi,
216 						 iavf_mem_asq_buf,
217 						 hw->aq.asq_buf_size,
218 						 IAVF_ADMINQ_DESC_ALIGNMENT);
219 		if (ret_code)
220 			goto unwind_alloc_asq_bufs;
221 	}
222 alloc_asq_bufs:
223 	return ret_code;
224 
225 unwind_alloc_asq_bufs:
226 	/* don't try to free the one that failed... */
227 	i--;
228 	for (; i >= 0; i--)
229 		iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
230 	iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
231 
232 	return ret_code;
233 }
234 
235 /**
236  *  iavf_free_arq_bufs - Free receive queue buffer info elements
237  *  @hw: pointer to the hardware structure
238  **/
239 STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
240 {
241 	int i;
242 
243 	/* free descriptors */
244 	for (i = 0; i < hw->aq.num_arq_entries; i++)
245 		iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
246 
247 	/* free the descriptor memory */
248 	iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
249 
250 	/* free the dma header */
251 	iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
252 }
253 
254 /**
255  *  iavf_free_asq_bufs - Free send queue buffer info elements
256  *  @hw: pointer to the hardware structure
257  **/
258 STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
259 {
260 	int i;
261 
262 	/* only unmap if the address is non-NULL */
263 	for (i = 0; i < hw->aq.num_asq_entries; i++)
264 		if (hw->aq.asq.r.asq_bi[i].pa)
265 			iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
266 
267 	/* free the buffer info list */
268 	iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
269 
270 	/* free the descriptor memory */
271 	iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
272 
273 	/* free the dma header */
274 	iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
275 }
276 
277 /**
278  *  iavf_config_asq_regs - configure ASQ registers
279  *  @hw: pointer to the hardware structure
280  *
281  *  Configure base address and length registers for the transmit queue
282  **/
283 STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
284 {
285 	enum iavf_status ret_code = IAVF_SUCCESS;
286 	u32 reg = 0;
287 
288 	/* Clear Head and Tail */
289 	wr32(hw, hw->aq.asq.head, 0);
290 	wr32(hw, hw->aq.asq.tail, 0);
291 
292 	/* set starting point */
293 	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
294 				  IAVF_VF_ATQLEN1_ATQENABLE_MASK));
295 	wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
296 	wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
297 
298 	/* Check one register to verify that config was applied */
299 	reg = rd32(hw, hw->aq.asq.bal);
300 	if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
301 		ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
302 
303 	return ret_code;
304 }
305 
306 /**
307  *  iavf_config_arq_regs - ARQ register configuration
308  *  @hw: pointer to the hardware structure
309  *
310  * Configure base address and length registers for the receive (event queue)
311  **/
312 STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
313 {
314 	enum iavf_status ret_code = IAVF_SUCCESS;
315 	u32 reg = 0;
316 
317 	/* Clear Head and Tail */
318 	wr32(hw, hw->aq.arq.head, 0);
319 	wr32(hw, hw->aq.arq.tail, 0);
320 
321 	/* set starting point */
322 	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
323 				  IAVF_VF_ARQLEN1_ARQENABLE_MASK));
324 	wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
325 	wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
326 
327 	/* Update tail in the HW to post pre-allocated buffers */
328 	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
329 
330 	/* Check one register to verify that config was applied */
331 	reg = rd32(hw, hw->aq.arq.bal);
332 	if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
333 		ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
334 
335 	return ret_code;
336 }
337 
338 /**
339  *  iavf_init_asq - main initialization routine for ASQ
340  *  @hw: pointer to the hardware structure
341  *
342  *  This is the main initialization routine for the Admin Send Queue
343  *  Prior to calling this function, drivers *MUST* set the following fields
344  *  in the hw->aq structure:
345  *     - hw->aq.num_asq_entries
346  *     - hw->aq.arq_buf_size
347  *
348  *  Do *NOT* hold the lock when calling this as the memory allocation routines
349  *  called are not going to be atomic context safe
350  **/
351 enum iavf_status iavf_init_asq(struct iavf_hw *hw)
352 {
353 	enum iavf_status ret_code = IAVF_SUCCESS;
354 
355 	if (hw->aq.asq.count > 0) {
356 		/* queue already initialized */
357 		ret_code = IAVF_ERR_NOT_READY;
358 		goto init_adminq_exit;
359 	}
360 
361 	/* verify input for valid configuration */
362 	if ((hw->aq.num_asq_entries == 0) ||
363 	    (hw->aq.asq_buf_size == 0)) {
364 		ret_code = IAVF_ERR_CONFIG;
365 		goto init_adminq_exit;
366 	}
367 
368 	hw->aq.asq.next_to_use = 0;
369 	hw->aq.asq.next_to_clean = 0;
370 
371 	/* allocate the ring memory */
372 	ret_code = iavf_alloc_adminq_asq_ring(hw);
373 	if (ret_code != IAVF_SUCCESS)
374 		goto init_adminq_exit;
375 
376 	/* allocate buffers in the rings */
377 	ret_code = iavf_alloc_asq_bufs(hw);
378 	if (ret_code != IAVF_SUCCESS)
379 		goto init_adminq_free_rings;
380 
381 	/* initialize base registers */
382 	ret_code = iavf_config_asq_regs(hw);
383 	if (ret_code != IAVF_SUCCESS)
384 		goto init_config_regs;
385 
386 	/* success! */
387 	hw->aq.asq.count = hw->aq.num_asq_entries;
388 	goto init_adminq_exit;
389 
390 init_adminq_free_rings:
391 	iavf_free_adminq_asq(hw);
392 	return ret_code;
393 
394 init_config_regs:
395 	iavf_free_asq_bufs(hw);
396 
397 init_adminq_exit:
398 	return ret_code;
399 }
400 
401 /**
402  *  iavf_init_arq - initialize ARQ
403  *  @hw: pointer to the hardware structure
404  *
405  *  The main initialization routine for the Admin Receive (Event) Queue.
406  *  Prior to calling this function, drivers *MUST* set the following fields
407  *  in the hw->aq structure:
408  *     - hw->aq.num_asq_entries
409  *     - hw->aq.arq_buf_size
410  *
411  *  Do *NOT* hold the lock when calling this as the memory allocation routines
412  *  called are not going to be atomic context safe
413  **/
414 enum iavf_status iavf_init_arq(struct iavf_hw *hw)
415 {
416 	enum iavf_status ret_code = IAVF_SUCCESS;
417 
418 	if (hw->aq.arq.count > 0) {
419 		/* queue already initialized */
420 		ret_code = IAVF_ERR_NOT_READY;
421 		goto init_adminq_exit;
422 	}
423 
424 	/* verify input for valid configuration */
425 	if ((hw->aq.num_arq_entries == 0) ||
426 	    (hw->aq.arq_buf_size == 0)) {
427 		ret_code = IAVF_ERR_CONFIG;
428 		goto init_adminq_exit;
429 	}
430 
431 	hw->aq.arq.next_to_use = 0;
432 	hw->aq.arq.next_to_clean = 0;
433 
434 	/* allocate the ring memory */
435 	ret_code = iavf_alloc_adminq_arq_ring(hw);
436 	if (ret_code != IAVF_SUCCESS)
437 		goto init_adminq_exit;
438 
439 	/* allocate buffers in the rings */
440 	ret_code = iavf_alloc_arq_bufs(hw);
441 	if (ret_code != IAVF_SUCCESS)
442 		goto init_adminq_free_rings;
443 
444 	/* initialize base registers */
445 	ret_code = iavf_config_arq_regs(hw);
446 	if (ret_code != IAVF_SUCCESS)
447 		goto init_adminq_free_rings;
448 
449 	/* success! */
450 	hw->aq.arq.count = hw->aq.num_arq_entries;
451 	goto init_adminq_exit;
452 
453 init_adminq_free_rings:
454 	iavf_free_adminq_arq(hw);
455 
456 init_adminq_exit:
457 	return ret_code;
458 }
459 
460 /**
461  *  iavf_shutdown_asq - shutdown the ASQ
462  *  @hw: pointer to the hardware structure
463  *
464  *  The main shutdown routine for the Admin Send Queue
465  **/
466 enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
467 {
468 	enum iavf_status ret_code = IAVF_SUCCESS;
469 
470 	iavf_acquire_spinlock(&hw->aq.asq_spinlock);
471 
472 	if (hw->aq.asq.count == 0) {
473 		ret_code = IAVF_ERR_NOT_READY;
474 		goto shutdown_asq_out;
475 	}
476 
477 	/* Stop firmware AdminQ processing */
478 	wr32(hw, hw->aq.asq.head, 0);
479 	wr32(hw, hw->aq.asq.tail, 0);
480 	wr32(hw, hw->aq.asq.len, 0);
481 	wr32(hw, hw->aq.asq.bal, 0);
482 	wr32(hw, hw->aq.asq.bah, 0);
483 
484 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
485 
486 	/* free ring buffers */
487 	iavf_free_asq_bufs(hw);
488 
489 shutdown_asq_out:
490 	iavf_release_spinlock(&hw->aq.asq_spinlock);
491 	return ret_code;
492 }
493 
494 /**
495  *  iavf_shutdown_arq - shutdown ARQ
496  *  @hw: pointer to the hardware structure
497  *
498  *  The main shutdown routine for the Admin Receive Queue
499  **/
500 enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
501 {
502 	enum iavf_status ret_code = IAVF_SUCCESS;
503 
504 	iavf_acquire_spinlock(&hw->aq.arq_spinlock);
505 
506 	if (hw->aq.arq.count == 0) {
507 		ret_code = IAVF_ERR_NOT_READY;
508 		goto shutdown_arq_out;
509 	}
510 
511 	/* Stop firmware AdminQ processing */
512 	wr32(hw, hw->aq.arq.head, 0);
513 	wr32(hw, hw->aq.arq.tail, 0);
514 	wr32(hw, hw->aq.arq.len, 0);
515 	wr32(hw, hw->aq.arq.bal, 0);
516 	wr32(hw, hw->aq.arq.bah, 0);
517 
518 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
519 
520 	/* free ring buffers */
521 	iavf_free_arq_bufs(hw);
522 
523 shutdown_arq_out:
524 	iavf_release_spinlock(&hw->aq.arq_spinlock);
525 	return ret_code;
526 }
527 
528 /**
529  *  iavf_init_adminq - main initialization routine for Admin Queue
530  *  @hw: pointer to the hardware structure
531  *
532  *  Prior to calling this function, drivers *MUST* set the following fields
533  *  in the hw->aq structure:
534  *     - hw->aq.num_asq_entries
535  *     - hw->aq.num_arq_entries
536  *     - hw->aq.arq_buf_size
537  *     - hw->aq.asq_buf_size
538  **/
539 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
540 {
541 	enum iavf_status ret_code;
542 
543 	/* verify input for valid configuration */
544 	if ((hw->aq.num_arq_entries == 0) ||
545 	    (hw->aq.num_asq_entries == 0) ||
546 	    (hw->aq.arq_buf_size == 0) ||
547 	    (hw->aq.asq_buf_size == 0)) {
548 		ret_code = IAVF_ERR_CONFIG;
549 		goto init_adminq_exit;
550 	}
551 	iavf_init_spinlock(&hw->aq.asq_spinlock);
552 	iavf_init_spinlock(&hw->aq.arq_spinlock);
553 
554 	/* Set up register offsets */
555 	iavf_adminq_init_regs(hw);
556 
557 	/* setup ASQ command write back timeout */
558 	hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
559 
560 	/* allocate the ASQ */
561 	ret_code = iavf_init_asq(hw);
562 	if (ret_code != IAVF_SUCCESS)
563 		goto init_adminq_destroy_spinlocks;
564 
565 	/* allocate the ARQ */
566 	ret_code = iavf_init_arq(hw);
567 	if (ret_code != IAVF_SUCCESS)
568 		goto init_adminq_free_asq;
569 
570 	/* success! */
571 	goto init_adminq_exit;
572 
573 init_adminq_free_asq:
574 	iavf_shutdown_asq(hw);
575 init_adminq_destroy_spinlocks:
576 	iavf_destroy_spinlock(&hw->aq.asq_spinlock);
577 	iavf_destroy_spinlock(&hw->aq.arq_spinlock);
578 
579 init_adminq_exit:
580 	return ret_code;
581 }
582 
583 /**
584  *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
585  *  @hw: pointer to the hardware structure
586  **/
587 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
588 {
589 	enum iavf_status ret_code = IAVF_SUCCESS;
590 
591 	if (iavf_check_asq_alive(hw))
592 		iavf_aq_queue_shutdown(hw, true);
593 
594 	iavf_shutdown_asq(hw);
595 	iavf_shutdown_arq(hw);
596 	iavf_destroy_spinlock(&hw->aq.asq_spinlock);
597 	iavf_destroy_spinlock(&hw->aq.arq_spinlock);
598 
599 	return ret_code;
600 }
601 
602 /**
603  *  iavf_clean_asq - cleans Admin send queue
604  *  @hw: pointer to the hardware structure
605  *
606  *  returns the number of free desc
607  **/
608 u16 iavf_clean_asq(struct iavf_hw *hw)
609 {
610 	struct iavf_adminq_ring *asq = &(hw->aq.asq);
611 	struct iavf_asq_cmd_details *details;
612 	u16 ntc = asq->next_to_clean;
613 	struct iavf_aq_desc desc_cb;
614 	struct iavf_aq_desc *desc;
615 
616 	desc = IAVF_ADMINQ_DESC(*asq, ntc);
617 	details = IAVF_ADMINQ_DETAILS(*asq, ntc);
618 	while (rd32(hw, hw->aq.asq.head) != ntc) {
619 		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
620 			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
621 
622 		if (details->callback) {
623 			IAVF_ADMINQ_CALLBACK cb_func =
624 					(IAVF_ADMINQ_CALLBACK)details->callback;
625 			iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
626 				    IAVF_DMA_TO_DMA);
627 			cb_func(hw, &desc_cb);
628 		}
629 		iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
630 		iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
631 		ntc++;
632 		if (ntc == asq->count)
633 			ntc = 0;
634 		desc = IAVF_ADMINQ_DESC(*asq, ntc);
635 		details = IAVF_ADMINQ_DETAILS(*asq, ntc);
636 	}
637 
638 	asq->next_to_clean = ntc;
639 
640 	return IAVF_DESC_UNUSED(asq);
641 }
642 
643 /**
644  *  iavf_asq_done - check if FW has processed the Admin Send Queue
645  *  @hw: pointer to the hw struct
646  *
647  *  Returns true if the firmware has processed all descriptors on the
648  *  admin send queue. Returns false if there are still requests pending.
649  **/
650 bool iavf_asq_done(struct iavf_hw *hw)
651 {
652 	/* AQ designers suggest use of head for better
653 	 * timing reliability than DD bit
654 	 */
655 	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
656 
657 }
658 
659 /**
660  *  iavf_asq_send_command - send command to Admin Queue
661  *  @hw: pointer to the hw struct
662  *  @desc: prefilled descriptor describing the command (non DMA mem)
663  *  @buff: buffer to use for indirect commands
664  *  @buff_size: size of buffer for indirect commands
665  *  @cmd_details: pointer to command details structure
666  *
667  *  This is the main send command driver routine for the Admin Queue send
668  *  queue.  It runs the queue, cleans the queue, etc
669  **/
670 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
671 				struct iavf_aq_desc *desc,
672 				void *buff, /* can be NULL */
673 				u16  buff_size,
674 				struct iavf_asq_cmd_details *cmd_details)
675 {
676 	enum iavf_status status = IAVF_SUCCESS;
677 	struct iavf_dma_mem *dma_buff = NULL;
678 	struct iavf_asq_cmd_details *details;
679 	struct iavf_aq_desc *desc_on_ring;
680 	bool cmd_completed = false;
681 	u16  retval = 0;
682 	u32  val = 0;
683 
684 	iavf_acquire_spinlock(&hw->aq.asq_spinlock);
685 
686 	hw->aq.asq_last_status = IAVF_AQ_RC_OK;
687 
688 	if (hw->aq.asq.count == 0) {
689 		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
690 			   "AQTX: Admin queue not initialized.\n");
691 		status = IAVF_ERR_QUEUE_EMPTY;
692 		goto asq_send_command_error;
693 	}
694 
695 	val = rd32(hw, hw->aq.asq.head);
696 	if (val >= hw->aq.num_asq_entries) {
697 		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
698 			   "AQTX: head overrun at %d\n", val);
699 		status = IAVF_ERR_QUEUE_EMPTY;
700 		goto asq_send_command_error;
701 	}
702 
703 	details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
704 	if (cmd_details) {
705 		iavf_memcpy(details,
706 			    cmd_details,
707 			    sizeof(struct iavf_asq_cmd_details),
708 			    IAVF_NONDMA_TO_NONDMA);
709 
710 		/* If the cmd_details are defined copy the cookie.  The
711 		 * CPU_TO_LE32 is not needed here because the data is ignored
712 		 * by the FW, only used by the driver
713 		 */
714 		if (details->cookie) {
715 			desc->cookie_high =
716 				CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
717 			desc->cookie_low =
718 				CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
719 		}
720 	} else {
721 		iavf_memset(details, 0,
722 			    sizeof(struct iavf_asq_cmd_details),
723 			    IAVF_NONDMA_MEM);
724 	}
725 
726 	/* clear requested flags and then set additional flags if defined */
727 	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
728 	desc->flags |= CPU_TO_LE16(details->flags_ena);
729 
730 	if (buff_size > hw->aq.asq_buf_size) {
731 		iavf_debug(hw,
732 			   IAVF_DEBUG_AQ_MESSAGE,
733 			   "AQTX: Invalid buffer size: %d.\n",
734 			   buff_size);
735 		status = IAVF_ERR_INVALID_SIZE;
736 		goto asq_send_command_error;
737 	}
738 
739 	if (details->postpone && !details->async) {
740 		iavf_debug(hw,
741 			   IAVF_DEBUG_AQ_MESSAGE,
742 			   "AQTX: Async flag not set along with postpone flag");
743 		status = IAVF_ERR_PARAM;
744 		goto asq_send_command_error;
745 	}
746 
747 	/* call clean and check queue available function to reclaim the
748 	 * descriptors that were processed by FW, the function returns the
749 	 * number of desc available
750 	 */
751 	/* the clean function called here could be called in a separate thread
752 	 * in case of asynchronous completions
753 	 */
754 	if (iavf_clean_asq(hw) == 0) {
755 		iavf_debug(hw,
756 			   IAVF_DEBUG_AQ_MESSAGE,
757 			   "AQTX: Error queue is full.\n");
758 		status = IAVF_ERR_ADMIN_QUEUE_FULL;
759 		goto asq_send_command_error;
760 	}
761 
762 	/* initialize the temp desc pointer with the right desc */
763 	desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
764 
765 	/* if the desc is available copy the temp desc to the right place */
766 	iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
767 		    IAVF_NONDMA_TO_DMA);
768 
769 	/* if buff is not NULL assume indirect command */
770 	if (buff != NULL) {
771 		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
772 		/* copy the user buff into the respective DMA buff */
773 		iavf_memcpy(dma_buff->va, buff, buff_size,
774 			    IAVF_NONDMA_TO_DMA);
775 		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
776 
777 		/* Update the address values in the desc with the pa value
778 		 * for respective buffer
779 		 */
780 		desc_on_ring->params.external.addr_high =
781 				CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
782 		desc_on_ring->params.external.addr_low =
783 				CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
784 	}
785 
786 	/* bump the tail */
787 	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
788 	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
789 		      buff, buff_size);
790 	(hw->aq.asq.next_to_use)++;
791 	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
792 		hw->aq.asq.next_to_use = 0;
793 	if (!details->postpone)
794 		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
795 
796 	/* if cmd_details are not defined or async flag is not set,
797 	 * we need to wait for desc write back
798 	 */
799 	if (!details->async && !details->postpone) {
800 		u32 total_delay = 0;
801 
802 		do {
803 			/* AQ designers suggest use of head for better
804 			 * timing reliability than DD bit
805 			 */
806 			if (iavf_asq_done(hw))
807 				break;
808 			iavf_usec_delay(50);
809 			total_delay += 50;
810 		} while (total_delay < hw->aq.asq_cmd_timeout);
811 	}
812 
813 	/* if ready, copy the desc back to temp */
814 	if (iavf_asq_done(hw)) {
815 		iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
816 			    IAVF_DMA_TO_NONDMA);
817 		if (buff != NULL)
818 			iavf_memcpy(buff, dma_buff->va, buff_size,
819 				    IAVF_DMA_TO_NONDMA);
820 		retval = LE16_TO_CPU(desc->retval);
821 		if (retval != 0) {
822 			iavf_debug(hw,
823 				   IAVF_DEBUG_AQ_MESSAGE,
824 				   "AQTX: Command completed with error 0x%X.\n",
825 				   retval);
826 
827 			/* strip off FW internal code */
828 			retval &= 0xff;
829 		}
830 		cmd_completed = true;
831 		if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
832 			status = IAVF_SUCCESS;
833 		else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
834 			status = IAVF_ERR_NOT_READY;
835 		else
836 			status = IAVF_ERR_ADMIN_QUEUE_ERROR;
837 		hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
838 	}
839 
840 	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
841 		   "AQTX: desc and buffer writeback:\n");
842 	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
843 
844 	/* save writeback aq if requested */
845 	if (details->wb_desc)
846 		iavf_memcpy(details->wb_desc, desc_on_ring,
847 			    sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
848 
849 	/* update the error if time out occurred */
850 	if ((!cmd_completed) &&
851 	    (!details->async && !details->postpone)) {
852 		if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
853 			iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
854 				   "AQTX: AQ Critical error.\n");
855 			status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
856 		} else {
857 			iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
858 				   "AQTX: Writeback timeout.\n");
859 			status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
860 		}
861 	}
862 
863 asq_send_command_error:
864 	iavf_release_spinlock(&hw->aq.asq_spinlock);
865 	return status;
866 }
867 
868 /**
869  *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
870  *  @desc:     pointer to the temp descriptor (non DMA mem)
871  *  @opcode:   the opcode can be used to decide which flags to turn off or on
872  *
873  *  Fill the desc with default values
874  **/
875 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
876 				       u16 opcode)
877 {
878 	/* zero out the desc */
879 	iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
880 		    IAVF_NONDMA_MEM);
881 	desc->opcode = CPU_TO_LE16(opcode);
882 	desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
883 }
884 
885 /**
886  *  iavf_clean_arq_element
887  *  @hw: pointer to the hw struct
888  *  @e: event info from the receive descriptor, includes any buffers
889  *  @pending: number of events that could be left to process
890  *
891  *  This function cleans one Admin Receive Queue element and returns
892  *  the contents through e.  It can also return how many events are
893  *  left to process through 'pending'
894  **/
895 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
896 					     struct iavf_arq_event_info *e,
897 					     u16 *pending)
898 {
899 	enum iavf_status ret_code = IAVF_SUCCESS;
900 	u16 ntc = hw->aq.arq.next_to_clean;
901 	struct iavf_aq_desc *desc;
902 	struct iavf_dma_mem *bi;
903 	u16 desc_idx;
904 	u16 datalen;
905 	u16 flags;
906 	u16 ntu;
907 
908 	/* pre-clean the event info */
909 	iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
910 
911 	/* take the lock before we start messing with the ring */
912 	iavf_acquire_spinlock(&hw->aq.arq_spinlock);
913 
914 	if (hw->aq.arq.count == 0) {
915 		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
916 			   "AQRX: Admin queue not initialized.\n");
917 		ret_code = IAVF_ERR_QUEUE_EMPTY;
918 		goto clean_arq_element_err;
919 	}
920 
921 	/* set next_to_use to head */
922 	ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
923 	if (ntu == ntc) {
924 		/* nothing to do - shouldn't need to update ring's values */
925 		ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
926 		goto clean_arq_element_out;
927 	}
928 
929 	/* now clean the next descriptor */
930 	desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
931 	desc_idx = ntc;
932 
933 	hw->aq.arq_last_status =
934 		(enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
935 	flags = LE16_TO_CPU(desc->flags);
936 	if (flags & IAVF_AQ_FLAG_ERR) {
937 		ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
938 		iavf_debug(hw,
939 			   IAVF_DEBUG_AQ_MESSAGE,
940 			   "AQRX: Event received with error 0x%X.\n",
941 			   hw->aq.arq_last_status);
942 	}
943 
944 	iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
945 		    IAVF_DMA_TO_NONDMA);
946 	datalen = LE16_TO_CPU(desc->datalen);
947 	e->msg_len = min(datalen, e->buf_len);
948 	if (e->msg_buf != NULL && (e->msg_len != 0))
949 		iavf_memcpy(e->msg_buf,
950 			    hw->aq.arq.r.arq_bi[desc_idx].va,
951 			    e->msg_len, IAVF_DMA_TO_NONDMA);
952 
953 	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
954 	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
955 		      hw->aq.arq_buf_size);
956 
957 	/* Restore the original datalen and buffer address in the desc,
958 	 * FW updates datalen to indicate the event message
959 	 * size
960 	 */
961 	bi = &hw->aq.arq.r.arq_bi[ntc];
962 	iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
963 
964 	desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
965 	if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
966 		desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
967 	desc->datalen = CPU_TO_LE16((u16)bi->size);
968 	desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
969 	desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
970 
971 	/* set tail = the last cleaned desc index. */
972 	wr32(hw, hw->aq.arq.tail, ntc);
973 	/* ntc is updated to tail + 1 */
974 	ntc++;
975 	if (ntc == hw->aq.num_arq_entries)
976 		ntc = 0;
977 	hw->aq.arq.next_to_clean = ntc;
978 	hw->aq.arq.next_to_use = ntu;
979 
980 clean_arq_element_out:
981 	/* Set pending if needed, unlock and return */
982 	if (pending != NULL)
983 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
984 clean_arq_element_err:
985 	iavf_release_spinlock(&hw->aq.arq_spinlock);
986 
987 	return ret_code;
988 }
989 
990