xref: /freebsd/sys/dev/ixl/i40e_adminq.c (revision 072aeeb66a0635e9d658f753a3278984cc1fcc27)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_status.h"
36 #include "i40e_type.h"
37 #include "i40e_register.h"
38 #include "i40e_adminq.h"
39 #include "i40e_prototype.h"
40 
41 /**
42  * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
43  * @desc: API request descriptor
44  **/
45 static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
46 {
47 	return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48 		desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
49 }
50 
51 /**
52  *  i40e_adminq_init_regs - Initialize AdminQ registers
53  *  @hw: pointer to the hardware structure
54  *
55  *  This assumes the alloc_asq and alloc_arq functions have already been called
56  **/
57 static void i40e_adminq_init_regs(struct i40e_hw *hw)
58 {
59 	/* set head and tail registers in our local struct */
60 	if (i40e_is_vf(hw)) {
61 		hw->aq.asq.tail = I40E_VF_ATQT1;
62 		hw->aq.asq.head = I40E_VF_ATQH1;
63 		hw->aq.asq.len  = I40E_VF_ATQLEN1;
64 		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
65 		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
66 		hw->aq.arq.tail = I40E_VF_ARQT1;
67 		hw->aq.arq.head = I40E_VF_ARQH1;
68 		hw->aq.arq.len  = I40E_VF_ARQLEN1;
69 		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
70 		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
71 	} else {
72 		hw->aq.asq.tail = I40E_PF_ATQT;
73 		hw->aq.asq.head = I40E_PF_ATQH;
74 		hw->aq.asq.len  = I40E_PF_ATQLEN;
75 		hw->aq.asq.bal  = I40E_PF_ATQBAL;
76 		hw->aq.asq.bah  = I40E_PF_ATQBAH;
77 		hw->aq.arq.tail = I40E_PF_ARQT;
78 		hw->aq.arq.head = I40E_PF_ARQH;
79 		hw->aq.arq.len  = I40E_PF_ARQLEN;
80 		hw->aq.arq.bal  = I40E_PF_ARQBAL;
81 		hw->aq.arq.bah  = I40E_PF_ARQBAH;
82 	}
83 }
84 
85 /**
86  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
87  *  @hw: pointer to the hardware structure
88  **/
89 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
90 {
91 	enum i40e_status_code ret_code;
92 
93 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
94 					 i40e_mem_atq_ring,
95 					 (hw->aq.num_asq_entries *
96 					 sizeof(struct i40e_aq_desc)),
97 					 I40E_ADMINQ_DESC_ALIGNMENT);
98 	if (ret_code)
99 		return ret_code;
100 
101 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
102 					  (hw->aq.num_asq_entries *
103 					  sizeof(struct i40e_asq_cmd_details)));
104 	if (ret_code) {
105 		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
106 		return ret_code;
107 	}
108 
109 	return ret_code;
110 }
111 
112 /**
113  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
114  *  @hw: pointer to the hardware structure
115  **/
116 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
117 {
118 	enum i40e_status_code ret_code;
119 
120 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
121 					 i40e_mem_arq_ring,
122 					 (hw->aq.num_arq_entries *
123 					 sizeof(struct i40e_aq_desc)),
124 					 I40E_ADMINQ_DESC_ALIGNMENT);
125 
126 	return ret_code;
127 }
128 
129 /**
130  *  i40e_free_adminq_asq - Free Admin Queue send rings
131  *  @hw: pointer to the hardware structure
132  *
133  *  This assumes the posted send buffers have already been cleaned
134  *  and de-allocated
135  **/
136 void i40e_free_adminq_asq(struct i40e_hw *hw)
137 {
138 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
139 }
140 
141 /**
142  *  i40e_free_adminq_arq - Free Admin Queue receive rings
143  *  @hw: pointer to the hardware structure
144  *
145  *  This assumes the posted receive buffers have already been cleaned
146  *  and de-allocated
147  **/
148 void i40e_free_adminq_arq(struct i40e_hw *hw)
149 {
150 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
151 }
152 
153 /**
154  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
155  *  @hw: pointer to the hardware structure
156  **/
157 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
158 {
159 	enum i40e_status_code ret_code;
160 	struct i40e_aq_desc *desc;
161 	struct i40e_dma_mem *bi;
162 	int i;
163 
164 	/* We'll be allocating the buffer info memory first, then we can
165 	 * allocate the mapped buffers for the event processing
166 	 */
167 
168 	/* buffer_info structures do not need alignment */
169 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
170 		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
171 	if (ret_code)
172 		goto alloc_arq_bufs;
173 	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
174 
175 	/* allocate the mapped buffers */
176 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
177 		bi = &hw->aq.arq.r.arq_bi[i];
178 		ret_code = i40e_allocate_dma_mem(hw, bi,
179 						 i40e_mem_arq_buf,
180 						 hw->aq.arq_buf_size,
181 						 I40E_ADMINQ_DESC_ALIGNMENT);
182 		if (ret_code)
183 			goto unwind_alloc_arq_bufs;
184 
185 		/* now configure the descriptors for use */
186 		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
187 
188 		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
189 		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
190 			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
191 		desc->opcode = 0;
192 		/* This is in accordance with Admin queue design, there is no
193 		 * register for buffer size configuration
194 		 */
195 		desc->datalen = CPU_TO_LE16((u16)bi->size);
196 		desc->retval = 0;
197 		desc->cookie_high = 0;
198 		desc->cookie_low = 0;
199 		desc->params.external.addr_high =
200 			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
201 		desc->params.external.addr_low =
202 			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
203 		desc->params.external.param0 = 0;
204 		desc->params.external.param1 = 0;
205 	}
206 
207 alloc_arq_bufs:
208 	return ret_code;
209 
210 unwind_alloc_arq_bufs:
211 	/* don't try to free the one that failed... */
212 	i--;
213 	for (; i >= 0; i--)
214 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
215 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
216 
217 	return ret_code;
218 }
219 
220 /**
221  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
222  *  @hw: pointer to the hardware structure
223  **/
224 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
225 {
226 	enum i40e_status_code ret_code;
227 	struct i40e_dma_mem *bi;
228 	int i;
229 
230 	/* No mapped memory needed yet, just the buffer info structures */
231 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
232 		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
233 	if (ret_code)
234 		goto alloc_asq_bufs;
235 	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
236 
237 	/* allocate the mapped buffers */
238 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
239 		bi = &hw->aq.asq.r.asq_bi[i];
240 		ret_code = i40e_allocate_dma_mem(hw, bi,
241 						 i40e_mem_asq_buf,
242 						 hw->aq.asq_buf_size,
243 						 I40E_ADMINQ_DESC_ALIGNMENT);
244 		if (ret_code)
245 			goto unwind_alloc_asq_bufs;
246 	}
247 alloc_asq_bufs:
248 	return ret_code;
249 
250 unwind_alloc_asq_bufs:
251 	/* don't try to free the one that failed... */
252 	i--;
253 	for (; i >= 0; i--)
254 		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
255 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
256 
257 	return ret_code;
258 }
259 
260 /**
261  *  i40e_free_arq_bufs - Free receive queue buffer info elements
262  *  @hw: pointer to the hardware structure
263  **/
264 static void i40e_free_arq_bufs(struct i40e_hw *hw)
265 {
266 	int i;
267 
268 	/* free descriptors */
269 	for (i = 0; i < hw->aq.num_arq_entries; i++)
270 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
271 
272 	/* free the descriptor memory */
273 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
274 
275 	/* free the dma header */
276 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
277 }
278 
279 /**
280  *  i40e_free_asq_bufs - Free send queue buffer info elements
281  *  @hw: pointer to the hardware structure
282  **/
283 static void i40e_free_asq_bufs(struct i40e_hw *hw)
284 {
285 	int i;
286 
287 	/* only unmap if the address is non-NULL */
288 	for (i = 0; i < hw->aq.num_asq_entries; i++)
289 		if (hw->aq.asq.r.asq_bi[i].pa)
290 			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
291 
292 	/* free the buffer info list */
293 	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
294 
295 	/* free the descriptor memory */
296 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
297 
298 	/* free the dma header */
299 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
300 }
301 
302 /**
303  *  i40e_config_asq_regs - configure ASQ registers
304  *  @hw: pointer to the hardware structure
305  *
306  *  Configure base address and length registers for the transmit queue
307  **/
308 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
309 {
310 	enum i40e_status_code ret_code = I40E_SUCCESS;
311 	u32 reg = 0;
312 
313 	/* Clear Head and Tail */
314 	wr32(hw, hw->aq.asq.head, 0);
315 	wr32(hw, hw->aq.asq.tail, 0);
316 
317 	/* set starting point */
318 	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
319 				  I40E_PF_ATQLEN_ATQENABLE_MASK));
320 	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
321 	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
322 
323 	/* Check one register to verify that config was applied */
324 	reg = rd32(hw, hw->aq.asq.bal);
325 	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
326 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
327 
328 	return ret_code;
329 }
330 
331 /**
332  *  i40e_config_arq_regs - ARQ register configuration
333  *  @hw: pointer to the hardware structure
334  *
335  * Configure base address and length registers for the receive (event queue)
336  **/
337 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
338 {
339 	enum i40e_status_code ret_code = I40E_SUCCESS;
340 	u32 reg = 0;
341 
342 	/* Clear Head and Tail */
343 	wr32(hw, hw->aq.arq.head, 0);
344 	wr32(hw, hw->aq.arq.tail, 0);
345 
346 	/* set starting point */
347 	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
348 				  I40E_PF_ARQLEN_ARQENABLE_MASK));
349 	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
350 	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
351 
352 	/* Update tail in the HW to post pre-allocated buffers */
353 	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
354 
355 	/* Check one register to verify that config was applied */
356 	reg = rd32(hw, hw->aq.arq.bal);
357 	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
358 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
359 
360 	return ret_code;
361 }
362 
363 /**
364  *  i40e_init_asq - main initialization routine for ASQ
365  *  @hw: pointer to the hardware structure
366  *
367  *  This is the main initialization routine for the Admin Send Queue
368  *  Prior to calling this function, drivers *MUST* set the following fields
369  *  in the hw->aq structure:
370  *     - hw->aq.num_asq_entries
371  *     - hw->aq.arq_buf_size
372  *
373  *  Do *NOT* hold the lock when calling this as the memory allocation routines
374  *  called are not going to be atomic context safe
375  **/
376 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
377 {
378 	enum i40e_status_code ret_code = I40E_SUCCESS;
379 
380 	if (hw->aq.asq.count > 0) {
381 		/* queue already initialized */
382 		ret_code = I40E_ERR_NOT_READY;
383 		goto init_adminq_exit;
384 	}
385 
386 	/* verify input for valid configuration */
387 	if ((hw->aq.num_asq_entries == 0) ||
388 	    (hw->aq.asq_buf_size == 0)) {
389 		ret_code = I40E_ERR_CONFIG;
390 		goto init_adminq_exit;
391 	}
392 
393 	hw->aq.asq.next_to_use = 0;
394 	hw->aq.asq.next_to_clean = 0;
395 	hw->aq.asq.count = hw->aq.num_asq_entries;
396 
397 	/* allocate the ring memory */
398 	ret_code = i40e_alloc_adminq_asq_ring(hw);
399 	if (ret_code != I40E_SUCCESS)
400 		goto init_adminq_exit;
401 
402 	/* allocate buffers in the rings */
403 	ret_code = i40e_alloc_asq_bufs(hw);
404 	if (ret_code != I40E_SUCCESS)
405 		goto init_adminq_free_rings;
406 
407 	/* initialize base registers */
408 	ret_code = i40e_config_asq_regs(hw);
409 	if (ret_code != I40E_SUCCESS)
410 		goto init_adminq_free_rings;
411 
412 	/* success! */
413 	goto init_adminq_exit;
414 
415 init_adminq_free_rings:
416 	i40e_free_adminq_asq(hw);
417 
418 init_adminq_exit:
419 	return ret_code;
420 }
421 
422 /**
423  *  i40e_init_arq - initialize ARQ
424  *  @hw: pointer to the hardware structure
425  *
426  *  The main initialization routine for the Admin Receive (Event) Queue.
427  *  Prior to calling this function, drivers *MUST* set the following fields
428  *  in the hw->aq structure:
429  *     - hw->aq.num_asq_entries
430  *     - hw->aq.arq_buf_size
431  *
432  *  Do *NOT* hold the lock when calling this as the memory allocation routines
433  *  called are not going to be atomic context safe
434  **/
435 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
436 {
437 	enum i40e_status_code ret_code = I40E_SUCCESS;
438 
439 	if (hw->aq.arq.count > 0) {
440 		/* queue already initialized */
441 		ret_code = I40E_ERR_NOT_READY;
442 		goto init_adminq_exit;
443 	}
444 
445 	/* verify input for valid configuration */
446 	if ((hw->aq.num_arq_entries == 0) ||
447 	    (hw->aq.arq_buf_size == 0)) {
448 		ret_code = I40E_ERR_CONFIG;
449 		goto init_adminq_exit;
450 	}
451 
452 	hw->aq.arq.next_to_use = 0;
453 	hw->aq.arq.next_to_clean = 0;
454 	hw->aq.arq.count = hw->aq.num_arq_entries;
455 
456 	/* allocate the ring memory */
457 	ret_code = i40e_alloc_adminq_arq_ring(hw);
458 	if (ret_code != I40E_SUCCESS)
459 		goto init_adminq_exit;
460 
461 	/* allocate buffers in the rings */
462 	ret_code = i40e_alloc_arq_bufs(hw);
463 	if (ret_code != I40E_SUCCESS)
464 		goto init_adminq_free_rings;
465 
466 	/* initialize base registers */
467 	ret_code = i40e_config_arq_regs(hw);
468 	if (ret_code != I40E_SUCCESS)
469 		goto init_adminq_free_rings;
470 
471 	/* success! */
472 	goto init_adminq_exit;
473 
474 init_adminq_free_rings:
475 	i40e_free_adminq_arq(hw);
476 
477 init_adminq_exit:
478 	return ret_code;
479 }
480 
481 /**
482  *  i40e_shutdown_asq - shutdown the ASQ
483  *  @hw: pointer to the hardware structure
484  *
485  *  The main shutdown routine for the Admin Send Queue
486  **/
487 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
488 {
489 	enum i40e_status_code ret_code = I40E_SUCCESS;
490 
491 	if (hw->aq.asq.count == 0)
492 		return I40E_ERR_NOT_READY;
493 
494 	/* Stop firmware AdminQ processing */
495 	wr32(hw, hw->aq.asq.head, 0);
496 	wr32(hw, hw->aq.asq.tail, 0);
497 	wr32(hw, hw->aq.asq.len, 0);
498 	wr32(hw, hw->aq.asq.bal, 0);
499 	wr32(hw, hw->aq.asq.bah, 0);
500 
501 	/* make sure spinlock is available */
502 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
503 
504 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
505 
506 	/* free ring buffers */
507 	i40e_free_asq_bufs(hw);
508 
509 	i40e_release_spinlock(&hw->aq.asq_spinlock);
510 
511 	return ret_code;
512 }
513 
514 /**
515  *  i40e_shutdown_arq - shutdown ARQ
516  *  @hw: pointer to the hardware structure
517  *
518  *  The main shutdown routine for the Admin Receive Queue
519  **/
520 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
521 {
522 	enum i40e_status_code ret_code = I40E_SUCCESS;
523 
524 	if (hw->aq.arq.count == 0)
525 		return I40E_ERR_NOT_READY;
526 
527 	/* Stop firmware AdminQ processing */
528 	wr32(hw, hw->aq.arq.head, 0);
529 	wr32(hw, hw->aq.arq.tail, 0);
530 	wr32(hw, hw->aq.arq.len, 0);
531 	wr32(hw, hw->aq.arq.bal, 0);
532 	wr32(hw, hw->aq.arq.bah, 0);
533 
534 	/* make sure spinlock is available */
535 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
536 
537 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
538 
539 	/* free ring buffers */
540 	i40e_free_arq_bufs(hw);
541 
542 	i40e_release_spinlock(&hw->aq.arq_spinlock);
543 
544 	return ret_code;
545 }
546 
547 /**
548  *  i40e_init_adminq - main initialization routine for Admin Queue
549  *  @hw: pointer to the hardware structure
550  *
551  *  Prior to calling this function, drivers *MUST* set the following fields
552  *  in the hw->aq structure:
553  *     - hw->aq.num_asq_entries
554  *     - hw->aq.num_arq_entries
555  *     - hw->aq.arq_buf_size
556  *     - hw->aq.asq_buf_size
557  **/
558 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
559 {
560 	enum i40e_status_code ret_code;
561 	u16 eetrack_lo, eetrack_hi;
562 	int retry = 0;
563 	/* verify input for valid configuration */
564 	if ((hw->aq.num_arq_entries == 0) ||
565 	    (hw->aq.num_asq_entries == 0) ||
566 	    (hw->aq.arq_buf_size == 0) ||
567 	    (hw->aq.asq_buf_size == 0)) {
568 		ret_code = I40E_ERR_CONFIG;
569 		goto init_adminq_exit;
570 	}
571 
572 	/* initialize spin locks */
573 	i40e_init_spinlock(&hw->aq.asq_spinlock);
574 	i40e_init_spinlock(&hw->aq.arq_spinlock);
575 
576 	/* Set up register offsets */
577 	i40e_adminq_init_regs(hw);
578 
579 	/* setup ASQ command write back timeout */
580 	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
581 
582 	/* allocate the ASQ */
583 	ret_code = i40e_init_asq(hw);
584 	if (ret_code != I40E_SUCCESS)
585 		goto init_adminq_destroy_spinlocks;
586 
587 	/* allocate the ARQ */
588 	ret_code = i40e_init_arq(hw);
589 	if (ret_code != I40E_SUCCESS)
590 		goto init_adminq_free_asq;
591 
592 	/* VF has no need of firmware */
593 	if (i40e_is_vf(hw))
594 		goto init_adminq_exit;
595 	/* There are some cases where the firmware may not be quite ready
596 	 * for AdminQ operations, so we retry the AdminQ setup a few times
597 	 * if we see timeouts in this first AQ call.
598 	 */
599 	do {
600 		ret_code = i40e_aq_get_firmware_version(hw,
601 							&hw->aq.fw_maj_ver,
602 							&hw->aq.fw_min_ver,
603 							&hw->aq.fw_build,
604 							&hw->aq.api_maj_ver,
605 							&hw->aq.api_min_ver,
606 							NULL);
607 		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
608 			break;
609 		retry++;
610 		i40e_msec_delay(100);
611 		i40e_resume_aq(hw);
612 	} while (retry < 10);
613 	if (ret_code != I40E_SUCCESS)
614 		goto init_adminq_free_arq;
615 
616 	/* get the NVM version info */
617 	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
618 			   &hw->nvm.version);
619 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
620 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
621 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
622 
623 	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
624 		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
625 		goto init_adminq_free_arq;
626 	}
627 
628 	/* pre-emptive resource lock release */
629 	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
630 	hw->aq.nvm_release_on_done = FALSE;
631 	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
632 
633 	ret_code = i40e_aq_set_hmc_resource_profile(hw,
634 						    I40E_HMC_PROFILE_DEFAULT,
635 						    0,
636 						    NULL);
637 	ret_code = I40E_SUCCESS;
638 
639 	/* success! */
640 	goto init_adminq_exit;
641 
642 init_adminq_free_arq:
643 	i40e_shutdown_arq(hw);
644 init_adminq_free_asq:
645 	i40e_shutdown_asq(hw);
646 init_adminq_destroy_spinlocks:
647 	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
648 	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
649 
650 init_adminq_exit:
651 	return ret_code;
652 }
653 
654 /**
655  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
656  *  @hw: pointer to the hardware structure
657  **/
658 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
659 {
660 	enum i40e_status_code ret_code = I40E_SUCCESS;
661 
662 	if (i40e_check_asq_alive(hw))
663 		i40e_aq_queue_shutdown(hw, TRUE);
664 
665 	i40e_shutdown_asq(hw);
666 	i40e_shutdown_arq(hw);
667 
668 	/* destroy the spinlocks */
669 	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
670 	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
671 
672 	return ret_code;
673 }
674 
675 /**
676  *  i40e_clean_asq - cleans Admin send queue
677  *  @hw: pointer to the hardware structure
678  *
679  *  returns the number of free desc
680  **/
681 u16 i40e_clean_asq(struct i40e_hw *hw)
682 {
683 	struct i40e_adminq_ring *asq = &(hw->aq.asq);
684 	struct i40e_asq_cmd_details *details;
685 	u16 ntc = asq->next_to_clean;
686 	struct i40e_aq_desc desc_cb;
687 	struct i40e_aq_desc *desc;
688 
689 	desc = I40E_ADMINQ_DESC(*asq, ntc);
690 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
691 	while (rd32(hw, hw->aq.asq.head) != ntc) {
692 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
693 			   "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
694 			   rd32(hw, hw->aq.asq.head));
695 
696 		if (details->callback) {
697 			I40E_ADMINQ_CALLBACK cb_func =
698 					(I40E_ADMINQ_CALLBACK)details->callback;
699 			i40e_memcpy(&desc_cb, desc,
700 			            sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
701 			cb_func(hw, &desc_cb);
702 		}
703 		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
704 		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
705 		ntc++;
706 		if (ntc == asq->count)
707 			ntc = 0;
708 		desc = I40E_ADMINQ_DESC(*asq, ntc);
709 		details = I40E_ADMINQ_DETAILS(*asq, ntc);
710 	}
711 
712 	asq->next_to_clean = ntc;
713 
714 	return I40E_DESC_UNUSED(asq);
715 }
716 
717 /**
718  *  i40e_asq_done - check if FW has processed the Admin Send Queue
719  *  @hw: pointer to the hw struct
720  *
721  *  Returns TRUE if the firmware has processed all descriptors on the
722  *  admin send queue. Returns FALSE if there are still requests pending.
723  **/
724 bool i40e_asq_done(struct i40e_hw *hw)
725 {
726 	/* AQ designers suggest use of head for better
727 	 * timing reliability than DD bit
728 	 */
729 	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
730 
731 }
732 
733 /**
734  *  i40e_asq_send_command - send command to Admin Queue
735  *  @hw: pointer to the hw struct
736  *  @desc: prefilled descriptor describing the command (non DMA mem)
737  *  @buff: buffer to use for indirect commands
738  *  @buff_size: size of buffer for indirect commands
739  *  @cmd_details: pointer to command details structure
740  *
741  *  This is the main send command driver routine for the Admin Queue send
742  *  queue.  It runs the queue, cleans the queue, etc
743  **/
744 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
745 				struct i40e_aq_desc *desc,
746 				void *buff, /* can be NULL */
747 				u16  buff_size,
748 				struct i40e_asq_cmd_details *cmd_details)
749 {
750 	enum i40e_status_code status = I40E_SUCCESS;
751 	struct i40e_dma_mem *dma_buff = NULL;
752 	struct i40e_asq_cmd_details *details;
753 	struct i40e_aq_desc *desc_on_ring;
754 	bool cmd_completed = FALSE;
755 	u16  retval = 0;
756 	u32  val = 0;
757 
758 	val = rd32(hw, hw->aq.asq.head);
759 	if (val >= hw->aq.num_asq_entries) {
760 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
761 			   "AQTX: head overrun at %d\n", val);
762 		status = I40E_ERR_QUEUE_EMPTY;
763 		goto asq_send_command_exit;
764 	}
765 
766 	if (hw->aq.asq.count == 0) {
767 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
768 			   "AQTX: Admin queue not initialized.\n");
769 		status = I40E_ERR_QUEUE_EMPTY;
770 		goto asq_send_command_exit;
771 	}
772 
773 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
774 	if (cmd_details) {
775 		i40e_memcpy(details,
776 			    cmd_details,
777 			    sizeof(struct i40e_asq_cmd_details),
778 			    I40E_NONDMA_TO_NONDMA);
779 
780 		/* If the cmd_details are defined copy the cookie.  The
781 		 * CPU_TO_LE32 is not needed here because the data is ignored
782 		 * by the FW, only used by the driver
783 		 */
784 		if (details->cookie) {
785 			desc->cookie_high =
786 				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
787 			desc->cookie_low =
788 				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
789 		}
790 	} else {
791 		i40e_memset(details, 0,
792 			    sizeof(struct i40e_asq_cmd_details),
793 			    I40E_NONDMA_MEM);
794 	}
795 
796 	/* clear requested flags and then set additional flags if defined */
797 	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
798 	desc->flags |= CPU_TO_LE16(details->flags_ena);
799 
800 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
801 
802 	if (buff_size > hw->aq.asq_buf_size) {
803 		i40e_debug(hw,
804 			   I40E_DEBUG_AQ_MESSAGE,
805 			   "AQTX: Invalid buffer size: %d.\n",
806 			   buff_size);
807 		status = I40E_ERR_INVALID_SIZE;
808 		goto asq_send_command_error;
809 	}
810 
811 	if (details->postpone && !details->async) {
812 		i40e_debug(hw,
813 			   I40E_DEBUG_AQ_MESSAGE,
814 			   "AQTX: Async flag not set along with postpone flag");
815 		status = I40E_ERR_PARAM;
816 		goto asq_send_command_error;
817 	}
818 
819 	/* call clean and check queue available function to reclaim the
820 	 * descriptors that were processed by FW, the function returns the
821 	 * number of desc available
822 	 */
823 	/* the clean function called here could be called in a separate thread
824 	 * in case of asynchronous completions
825 	 */
826 	if (i40e_clean_asq(hw) == 0) {
827 		i40e_debug(hw,
828 			   I40E_DEBUG_AQ_MESSAGE,
829 			   "AQTX: Error queue is full.\n");
830 		status = I40E_ERR_ADMIN_QUEUE_FULL;
831 		goto asq_send_command_error;
832 	}
833 
834 	/* initialize the temp desc pointer with the right desc */
835 	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
836 
837 	/* if the desc is available copy the temp desc to the right place */
838 	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
839 		    I40E_NONDMA_TO_DMA);
840 
841 	/* if buff is not NULL assume indirect command */
842 	if (buff != NULL) {
843 		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
844 		/* copy the user buff into the respective DMA buff */
845 		i40e_memcpy(dma_buff->va, buff, buff_size,
846 			    I40E_NONDMA_TO_DMA);
847 		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
848 
849 		/* Update the address values in the desc with the pa value
850 		 * for respective buffer
851 		 */
852 		desc_on_ring->params.external.addr_high =
853 				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
854 		desc_on_ring->params.external.addr_low =
855 				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
856 	}
857 
858 	/* bump the tail */
859 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
860 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
861 		      buff, buff_size);
862 	(hw->aq.asq.next_to_use)++;
863 	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
864 		hw->aq.asq.next_to_use = 0;
865 	if (!details->postpone)
866 		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
867 
868 	/* if cmd_details are not defined or async flag is not set,
869 	 * we need to wait for desc write back
870 	 */
871 	if (!details->async && !details->postpone) {
872 		u32 total_delay = 0;
873 
874 		do {
875 			/* AQ designers suggest use of head for better
876 			 * timing reliability than DD bit
877 			 */
878 			if (i40e_asq_done(hw))
879 				break;
880 			/* ugh! delay while spin_lock */
881 			i40e_msec_delay(1);
882 			total_delay++;
883 		} while (total_delay < hw->aq.asq_cmd_timeout);
884 	}
885 
886 	/* if ready, copy the desc back to temp */
887 	if (i40e_asq_done(hw)) {
888 		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
889 			    I40E_DMA_TO_NONDMA);
890 		if (buff != NULL)
891 			i40e_memcpy(buff, dma_buff->va, buff_size,
892 				    I40E_DMA_TO_NONDMA);
893 		retval = LE16_TO_CPU(desc->retval);
894 		if (retval != 0) {
895 			i40e_debug(hw,
896 				   I40E_DEBUG_AQ_MESSAGE,
897 				   "AQTX: Command completed with error 0x%X.\n",
898 				   retval);
899 
900 			/* strip off FW internal code */
901 			retval &= 0xff;
902 		}
903 		cmd_completed = TRUE;
904 		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
905 			status = I40E_SUCCESS;
906 		else
907 			status = I40E_ERR_ADMIN_QUEUE_ERROR;
908 		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
909 	}
910 
911 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
912 		   "AQTX: desc and buffer writeback:\n");
913 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
914 
915 	/* update the error if time out occurred */
916 	if ((!cmd_completed) &&
917 	    (!details->async && !details->postpone)) {
918 		i40e_debug(hw,
919 			   I40E_DEBUG_AQ_MESSAGE,
920 			   "AQTX: Writeback timeout.\n");
921 		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
922 	}
923 
924 asq_send_command_error:
925 	i40e_release_spinlock(&hw->aq.asq_spinlock);
926 asq_send_command_exit:
927 	return status;
928 }
929 
930 /**
931  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
932  *  @desc:     pointer to the temp descriptor (non DMA mem)
933  *  @opcode:   the opcode can be used to decide which flags to turn off or on
934  *
935  *  Fill the desc with default values
936  **/
937 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
938 				       u16 opcode)
939 {
940 	/* zero out the desc */
941 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
942 		    I40E_NONDMA_MEM);
943 	desc->opcode = CPU_TO_LE16(opcode);
944 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
945 }
946 
947 /**
948  *  i40e_clean_arq_element
949  *  @hw: pointer to the hw struct
950  *  @e: event info from the receive descriptor, includes any buffers
951  *  @pending: number of events that could be left to process
952  *
953  *  This function cleans one Admin Receive Queue element and returns
954  *  the contents through e.  It can also return how many events are
955  *  left to process through 'pending'
956  **/
957 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
958 					     struct i40e_arq_event_info *e,
959 					     u16 *pending)
960 {
961 	enum i40e_status_code ret_code = I40E_SUCCESS;
962 	u16 ntc = hw->aq.arq.next_to_clean;
963 	struct i40e_aq_desc *desc;
964 	struct i40e_dma_mem *bi;
965 	u16 desc_idx;
966 	u16 datalen;
967 	u16 flags;
968 	u16 ntu;
969 
970 	/* take the lock before we start messing with the ring */
971 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
972 
973 	/* set next_to_use to head */
974 	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
975 	if (ntu == ntc) {
976 		/* nothing to do - shouldn't need to update ring's values */
977 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
978 		goto clean_arq_element_out;
979 	}
980 
981 	/* now clean the next descriptor */
982 	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
983 	desc_idx = ntc;
984 
985 	flags = LE16_TO_CPU(desc->flags);
986 	if (flags & I40E_AQ_FLAG_ERR) {
987 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
988 		hw->aq.arq_last_status =
989 			(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
990 		i40e_debug(hw,
991 			   I40E_DEBUG_AQ_MESSAGE,
992 			   "AQRX: Event received with error 0x%X.\n",
993 			   hw->aq.arq_last_status);
994 	}
995 
996 	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
997 		    I40E_DMA_TO_NONDMA);
998 	datalen = LE16_TO_CPU(desc->datalen);
999 	e->msg_len = min(datalen, e->buf_len);
1000 	if (e->msg_buf != NULL && (e->msg_len != 0))
1001 		i40e_memcpy(e->msg_buf,
1002 			    hw->aq.arq.r.arq_bi[desc_idx].va,
1003 			    e->msg_len, I40E_DMA_TO_NONDMA);
1004 
1005 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1006 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1007 		      hw->aq.arq_buf_size);
1008 
1009 	/* Restore the original datalen and buffer address in the desc,
1010 	 * FW updates datalen to indicate the event message
1011 	 * size
1012 	 */
1013 	bi = &hw->aq.arq.r.arq_bi[ntc];
1014 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1015 
1016 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1017 	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1018 		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1019 	desc->datalen = CPU_TO_LE16((u16)bi->size);
1020 	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1021 	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1022 
1023 	/* set tail = the last cleaned desc index. */
1024 	wr32(hw, hw->aq.arq.tail, ntc);
1025 	/* ntc is updated to tail + 1 */
1026 	ntc++;
1027 	if (ntc == hw->aq.num_arq_entries)
1028 		ntc = 0;
1029 	hw->aq.arq.next_to_clean = ntc;
1030 	hw->aq.arq.next_to_use = ntu;
1031 
1032 clean_arq_element_out:
1033 	/* Set pending if needed, unlock and return */
1034 	if (pending != NULL)
1035 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1036 	i40e_release_spinlock(&hw->aq.arq_spinlock);
1037 
1038 	if (i40e_is_nvm_update_op(&e->desc)) {
1039 		if (hw->aq.nvm_release_on_done) {
1040 			i40e_release_nvm(hw);
1041 			hw->aq.nvm_release_on_done = FALSE;
1042 		}
1043 	}
1044 
1045 	return ret_code;
1046 }
1047 
1048 void i40e_resume_aq(struct i40e_hw *hw)
1049 {
1050 	/* Registers are reset after PF reset */
1051 	hw->aq.asq.next_to_use = 0;
1052 	hw->aq.asq.next_to_clean = 0;
1053 
1054 #if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
1055 #error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
1056 #endif
1057 	i40e_config_asq_regs(hw);
1058 
1059 	hw->aq.arq.next_to_use = 0;
1060 	hw->aq.arq.next_to_clean = 0;
1061 
1062 	i40e_config_arq_regs(hw);
1063 }
1064