xref: /freebsd/sys/dev/ixl/i40e_adminq.c (revision 349cc55c9796c4596a5b9904cd3281af295f878f)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_status.h"
36 #include "i40e_type.h"
37 #include "i40e_register.h"
38 #include "i40e_adminq.h"
39 #include "i40e_prototype.h"
40 
41 /**
42  *  i40e_adminq_init_regs - Initialize AdminQ registers
43  *  @hw: pointer to the hardware structure
44  *
45  *  This assumes the alloc_asq and alloc_arq functions have already been called
46  **/
47 static void i40e_adminq_init_regs(struct i40e_hw *hw)
48 {
49 	/* set head and tail registers in our local struct */
50 	if (i40e_is_vf(hw)) {
51 		hw->aq.asq.tail = I40E_VF_ATQT1;
52 		hw->aq.asq.head = I40E_VF_ATQH1;
53 		hw->aq.asq.len  = I40E_VF_ATQLEN1;
54 		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
55 		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
56 		hw->aq.arq.tail = I40E_VF_ARQT1;
57 		hw->aq.arq.head = I40E_VF_ARQH1;
58 		hw->aq.arq.len  = I40E_VF_ARQLEN1;
59 		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
60 		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
61 	} else {
62 		hw->aq.asq.tail = I40E_PF_ATQT;
63 		hw->aq.asq.head = I40E_PF_ATQH;
64 		hw->aq.asq.len  = I40E_PF_ATQLEN;
65 		hw->aq.asq.bal  = I40E_PF_ATQBAL;
66 		hw->aq.asq.bah  = I40E_PF_ATQBAH;
67 		hw->aq.arq.tail = I40E_PF_ARQT;
68 		hw->aq.arq.head = I40E_PF_ARQH;
69 		hw->aq.arq.len  = I40E_PF_ARQLEN;
70 		hw->aq.arq.bal  = I40E_PF_ARQBAL;
71 		hw->aq.arq.bah  = I40E_PF_ARQBAH;
72 	}
73 }
74 
75 /**
76  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
77  *  @hw: pointer to the hardware structure
78  **/
79 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
80 {
81 	enum i40e_status_code ret_code;
82 
83 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
84 					 i40e_mem_atq_ring,
85 					 (hw->aq.num_asq_entries *
86 					 sizeof(struct i40e_aq_desc)),
87 					 I40E_ADMINQ_DESC_ALIGNMENT);
88 	if (ret_code)
89 		return ret_code;
90 
91 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
92 					  (hw->aq.num_asq_entries *
93 					  sizeof(struct i40e_asq_cmd_details)));
94 	if (ret_code) {
95 		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
96 		return ret_code;
97 	}
98 
99 	return ret_code;
100 }
101 
102 /**
103  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
104  *  @hw: pointer to the hardware structure
105  **/
106 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
107 {
108 	enum i40e_status_code ret_code;
109 
110 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
111 					 i40e_mem_arq_ring,
112 					 (hw->aq.num_arq_entries *
113 					 sizeof(struct i40e_aq_desc)),
114 					 I40E_ADMINQ_DESC_ALIGNMENT);
115 
116 	return ret_code;
117 }
118 
119 /**
120  *  i40e_free_adminq_asq - Free Admin Queue send rings
121  *  @hw: pointer to the hardware structure
122  *
123  *  This assumes the posted send buffers have already been cleaned
124  *  and de-allocated
125  **/
126 void i40e_free_adminq_asq(struct i40e_hw *hw)
127 {
128 	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
129 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
130 }
131 
132 /**
133  *  i40e_free_adminq_arq - Free Admin Queue receive rings
134  *  @hw: pointer to the hardware structure
135  *
136  *  This assumes the posted receive buffers have already been cleaned
137  *  and de-allocated
138  **/
139 void i40e_free_adminq_arq(struct i40e_hw *hw)
140 {
141 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
142 }
143 
144 /**
145  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
146  *  @hw: pointer to the hardware structure
147  **/
148 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
149 {
150 	enum i40e_status_code ret_code;
151 	struct i40e_aq_desc *desc;
152 	struct i40e_dma_mem *bi;
153 	int i;
154 
155 	/* We'll be allocating the buffer info memory first, then we can
156 	 * allocate the mapped buffers for the event processing
157 	 */
158 
159 	/* buffer_info structures do not need alignment */
160 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
161 		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
162 	if (ret_code)
163 		goto alloc_arq_bufs;
164 	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
165 
166 	/* allocate the mapped buffers */
167 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
168 		bi = &hw->aq.arq.r.arq_bi[i];
169 		ret_code = i40e_allocate_dma_mem(hw, bi,
170 						 i40e_mem_arq_buf,
171 						 hw->aq.arq_buf_size,
172 						 I40E_ADMINQ_DESC_ALIGNMENT);
173 		if (ret_code)
174 			goto unwind_alloc_arq_bufs;
175 
176 		/* now configure the descriptors for use */
177 		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
178 
179 		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
180 		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
181 			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
182 		desc->opcode = 0;
183 		/* This is in accordance with Admin queue design, there is no
184 		 * register for buffer size configuration
185 		 */
186 		desc->datalen = CPU_TO_LE16((u16)bi->size);
187 		desc->retval = 0;
188 		desc->cookie_high = 0;
189 		desc->cookie_low = 0;
190 		desc->params.external.addr_high =
191 			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
192 		desc->params.external.addr_low =
193 			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
194 		desc->params.external.param0 = 0;
195 		desc->params.external.param1 = 0;
196 	}
197 
198 alloc_arq_bufs:
199 	return ret_code;
200 
201 unwind_alloc_arq_bufs:
202 	/* don't try to free the one that failed... */
203 	i--;
204 	for (; i >= 0; i--)
205 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
206 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
207 
208 	return ret_code;
209 }
210 
211 /**
212  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
213  *  @hw: pointer to the hardware structure
214  **/
215 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
216 {
217 	enum i40e_status_code ret_code;
218 	struct i40e_dma_mem *bi;
219 	int i;
220 
221 	/* No mapped memory needed yet, just the buffer info structures */
222 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
223 		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
224 	if (ret_code)
225 		goto alloc_asq_bufs;
226 	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
227 
228 	/* allocate the mapped buffers */
229 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
230 		bi = &hw->aq.asq.r.asq_bi[i];
231 		ret_code = i40e_allocate_dma_mem(hw, bi,
232 						 i40e_mem_asq_buf,
233 						 hw->aq.asq_buf_size,
234 						 I40E_ADMINQ_DESC_ALIGNMENT);
235 		if (ret_code)
236 			goto unwind_alloc_asq_bufs;
237 	}
238 alloc_asq_bufs:
239 	return ret_code;
240 
241 unwind_alloc_asq_bufs:
242 	/* don't try to free the one that failed... */
243 	i--;
244 	for (; i >= 0; i--)
245 		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
246 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
247 
248 	return ret_code;
249 }
250 
251 /**
252  *  i40e_free_arq_bufs - Free receive queue buffer info elements
253  *  @hw: pointer to the hardware structure
254  **/
255 static void i40e_free_arq_bufs(struct i40e_hw *hw)
256 {
257 	int i;
258 
259 	/* free descriptors */
260 	for (i = 0; i < hw->aq.num_arq_entries; i++)
261 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
262 
263 	/* free the descriptor memory */
264 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
265 
266 	/* free the dma header */
267 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
268 }
269 
270 /**
271  *  i40e_free_asq_bufs - Free send queue buffer info elements
272  *  @hw: pointer to the hardware structure
273  **/
274 static void i40e_free_asq_bufs(struct i40e_hw *hw)
275 {
276 	int i;
277 
278 	/* only unmap if the address is non-NULL */
279 	for (i = 0; i < hw->aq.num_asq_entries; i++)
280 		if (hw->aq.asq.r.asq_bi[i].pa)
281 			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
282 
283 	/* free the buffer info list */
284 	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
285 
286 	/* free the descriptor memory */
287 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
288 
289 	/* free the dma header */
290 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
291 }
292 
293 /**
294  *  i40e_config_asq_regs - configure ASQ registers
295  *  @hw: pointer to the hardware structure
296  *
297  *  Configure base address and length registers for the transmit queue
298  **/
299 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
300 {
301 	enum i40e_status_code ret_code = I40E_SUCCESS;
302 	u32 reg = 0;
303 
304 	/* Clear Head and Tail */
305 	wr32(hw, hw->aq.asq.head, 0);
306 	wr32(hw, hw->aq.asq.tail, 0);
307 
308 	/* set starting point */
309 	if (!i40e_is_vf(hw))
310 		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
311 					  I40E_PF_ATQLEN_ATQENABLE_MASK));
312 	if (i40e_is_vf(hw))
313 		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
314 					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
315 	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
316 	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
317 
318 	/* Check one register to verify that config was applied */
319 	reg = rd32(hw, hw->aq.asq.bal);
320 	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
321 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
322 
323 	return ret_code;
324 }
325 
326 /**
327  *  i40e_config_arq_regs - ARQ register configuration
328  *  @hw: pointer to the hardware structure
329  *
330  * Configure base address and length registers for the receive (event queue)
331  **/
332 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
333 {
334 	enum i40e_status_code ret_code = I40E_SUCCESS;
335 	u32 reg = 0;
336 
337 	/* Clear Head and Tail */
338 	wr32(hw, hw->aq.arq.head, 0);
339 	wr32(hw, hw->aq.arq.tail, 0);
340 
341 	/* set starting point */
342 	if (!i40e_is_vf(hw))
343 		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
344 					  I40E_PF_ARQLEN_ARQENABLE_MASK));
345 	if (i40e_is_vf(hw))
346 		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
347 					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
348 	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
349 	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
350 
351 	/* Update tail in the HW to post pre-allocated buffers */
352 	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
353 
354 	/* Check one register to verify that config was applied */
355 	reg = rd32(hw, hw->aq.arq.bal);
356 	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
357 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
358 
359 	return ret_code;
360 }
361 
362 /**
363  *  i40e_init_asq - main initialization routine for ASQ
364  *  @hw: pointer to the hardware structure
365  *
366  *  This is the main initialization routine for the Admin Send Queue
367  *  Prior to calling this function, drivers *MUST* set the following fields
368  *  in the hw->aq structure:
369  *     - hw->aq.num_asq_entries
370  *     - hw->aq.arq_buf_size
371  *
372  *  Do *NOT* hold the lock when calling this as the memory allocation routines
373  *  called are not going to be atomic context safe
374  **/
375 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
376 {
377 	enum i40e_status_code ret_code = I40E_SUCCESS;
378 
379 	if (hw->aq.asq.count > 0) {
380 		/* queue already initialized */
381 		ret_code = I40E_ERR_NOT_READY;
382 		goto init_adminq_exit;
383 	}
384 
385 	/* verify input for valid configuration */
386 	if ((hw->aq.num_asq_entries == 0) ||
387 	    (hw->aq.asq_buf_size == 0)) {
388 		ret_code = I40E_ERR_CONFIG;
389 		goto init_adminq_exit;
390 	}
391 
392 	hw->aq.asq.next_to_use = 0;
393 	hw->aq.asq.next_to_clean = 0;
394 
395 	/* allocate the ring memory */
396 	ret_code = i40e_alloc_adminq_asq_ring(hw);
397 	if (ret_code != I40E_SUCCESS)
398 		goto init_adminq_exit;
399 
400 	/* allocate buffers in the rings */
401 	ret_code = i40e_alloc_asq_bufs(hw);
402 	if (ret_code != I40E_SUCCESS)
403 		goto init_adminq_free_rings;
404 
405 	/* initialize base registers */
406 	ret_code = i40e_config_asq_regs(hw);
407 	if (ret_code != I40E_SUCCESS)
408 		goto init_config_regs;
409 
410 	/* success! */
411 	hw->aq.asq.count = hw->aq.num_asq_entries;
412 	goto init_adminq_exit;
413 
414 init_adminq_free_rings:
415 	i40e_free_adminq_asq(hw);
416 	return ret_code;
417 
418 init_config_regs:
419 	i40e_free_asq_bufs(hw);
420 
421 init_adminq_exit:
422 	return ret_code;
423 }
424 
425 /**
426  *  i40e_init_arq - initialize ARQ
427  *  @hw: pointer to the hardware structure
428  *
429  *  The main initialization routine for the Admin Receive (Event) Queue.
430  *  Prior to calling this function, drivers *MUST* set the following fields
431  *  in the hw->aq structure:
432  *     - hw->aq.num_asq_entries
433  *     - hw->aq.arq_buf_size
434  *
435  *  Do *NOT* hold the lock when calling this as the memory allocation routines
436  *  called are not going to be atomic context safe
437  **/
438 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
439 {
440 	enum i40e_status_code ret_code = I40E_SUCCESS;
441 
442 	if (hw->aq.arq.count > 0) {
443 		/* queue already initialized */
444 		ret_code = I40E_ERR_NOT_READY;
445 		goto init_adminq_exit;
446 	}
447 
448 	/* verify input for valid configuration */
449 	if ((hw->aq.num_arq_entries == 0) ||
450 	    (hw->aq.arq_buf_size == 0)) {
451 		ret_code = I40E_ERR_CONFIG;
452 		goto init_adminq_exit;
453 	}
454 
455 	hw->aq.arq.next_to_use = 0;
456 	hw->aq.arq.next_to_clean = 0;
457 
458 	/* allocate the ring memory */
459 	ret_code = i40e_alloc_adminq_arq_ring(hw);
460 	if (ret_code != I40E_SUCCESS)
461 		goto init_adminq_exit;
462 
463 	/* allocate buffers in the rings */
464 	ret_code = i40e_alloc_arq_bufs(hw);
465 	if (ret_code != I40E_SUCCESS)
466 		goto init_adminq_free_rings;
467 
468 	/* initialize base registers */
469 	ret_code = i40e_config_arq_regs(hw);
470 	if (ret_code != I40E_SUCCESS)
471 		goto init_adminq_free_rings;
472 
473 	/* success! */
474 	hw->aq.arq.count = hw->aq.num_arq_entries;
475 	goto init_adminq_exit;
476 
477 init_adminq_free_rings:
478 	i40e_free_adminq_arq(hw);
479 
480 init_adminq_exit:
481 	return ret_code;
482 }
483 
484 /**
485  *  i40e_shutdown_asq - shutdown the ASQ
486  *  @hw: pointer to the hardware structure
487  *
488  *  The main shutdown routine for the Admin Send Queue
489  **/
490 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
491 {
492 	enum i40e_status_code ret_code = I40E_SUCCESS;
493 
494 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
495 
496 	if (hw->aq.asq.count == 0) {
497 		ret_code = I40E_ERR_NOT_READY;
498 		goto shutdown_asq_out;
499 	}
500 
501 	/* Stop firmware AdminQ processing */
502 	wr32(hw, hw->aq.asq.head, 0);
503 	wr32(hw, hw->aq.asq.tail, 0);
504 	wr32(hw, hw->aq.asq.len, 0);
505 	wr32(hw, hw->aq.asq.bal, 0);
506 	wr32(hw, hw->aq.asq.bah, 0);
507 
508 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
509 
510 	/* free ring buffers */
511 	i40e_free_asq_bufs(hw);
512 
513 shutdown_asq_out:
514 	i40e_release_spinlock(&hw->aq.asq_spinlock);
515 	return ret_code;
516 }
517 
518 /**
519  *  i40e_shutdown_arq - shutdown ARQ
520  *  @hw: pointer to the hardware structure
521  *
522  *  The main shutdown routine for the Admin Receive Queue
523  **/
524 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
525 {
526 	enum i40e_status_code ret_code = I40E_SUCCESS;
527 
528 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
529 
530 	if (hw->aq.arq.count == 0) {
531 		ret_code = I40E_ERR_NOT_READY;
532 		goto shutdown_arq_out;
533 	}
534 
535 	/* Stop firmware AdminQ processing */
536 	wr32(hw, hw->aq.arq.head, 0);
537 	wr32(hw, hw->aq.arq.tail, 0);
538 	wr32(hw, hw->aq.arq.len, 0);
539 	wr32(hw, hw->aq.arq.bal, 0);
540 	wr32(hw, hw->aq.arq.bah, 0);
541 
542 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
543 
544 	/* free ring buffers */
545 	i40e_free_arq_bufs(hw);
546 
547 shutdown_arq_out:
548 	i40e_release_spinlock(&hw->aq.arq_spinlock);
549 	return ret_code;
550 }
551 
552 /**
553  *  i40e_resume_aq - resume AQ processing from 0
554  *  @hw: pointer to the hardware structure
555  **/
556 static void i40e_resume_aq(struct i40e_hw *hw)
557 {
558 	/* Registers are reset after PF reset */
559 	hw->aq.asq.next_to_use = 0;
560 	hw->aq.asq.next_to_clean = 0;
561 
562 	i40e_config_asq_regs(hw);
563 
564 	hw->aq.arq.next_to_use = 0;
565 	hw->aq.arq.next_to_clean = 0;
566 
567 	i40e_config_arq_regs(hw);
568 }
569 
570 /**
571  *  i40e_set_hw_flags - set HW flags
572  *  @hw: pointer to the hardware structure
573  **/
574 static void i40e_set_hw_flags(struct i40e_hw *hw)
575 {
576 	struct i40e_adminq_info *aq = &hw->aq;
577 
578 	hw->flags = 0;
579 
580 	switch (hw->mac.type) {
581 	case I40E_MAC_XL710:
582 		if (aq->api_maj_ver > 1 ||
583 		    (aq->api_maj_ver == 1 &&
584 		     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
585 			hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
586 			hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
587 			/* The ability to RX (not drop) 802.1ad frames */
588 			hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
589 		}
590 		break;
591 	case I40E_MAC_X722:
592 		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
593 			     I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
594 
595 		if (aq->api_maj_ver > 1 ||
596 		    (aq->api_maj_ver == 1 &&
597 		     aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
598 			hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
599 
600 		if (aq->api_maj_ver > 1 ||
601 		    (aq->api_maj_ver == 1 &&
602 		     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
603 			hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
604 
605 		if (aq->api_maj_ver > 1 ||
606 		    (aq->api_maj_ver == 1 &&
607 		     aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
608 			hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
609 
610 		/* fall through */
611 	default:
612 		break;
613 	}
614 
615 	/* Newer versions of firmware require lock when reading the NVM */
616 	if (aq->api_maj_ver > 1 ||
617 	    (aq->api_maj_ver == 1 &&
618 	     aq->api_min_ver >= 5))
619 		hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
620 
621 	if (aq->api_maj_ver > 1 ||
622 	    (aq->api_maj_ver == 1 &&
623 	     aq->api_min_ver >= 8)) {
624 		hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
625 		hw->flags |= I40E_HW_FLAG_DROP_MODE;
626 	}
627 
628 	if (aq->api_maj_ver > 1 ||
629 	    (aq->api_maj_ver == 1 &&
630 	     aq->api_min_ver >= 9))
631 		hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
632 }
633 
634 /**
635  *  i40e_init_adminq - main initialization routine for Admin Queue
636  *  @hw: pointer to the hardware structure
637  *
638  *  Prior to calling this function, drivers *MUST* set the following fields
639  *  in the hw->aq structure:
640  *     - hw->aq.num_asq_entries
641  *     - hw->aq.num_arq_entries
642  *     - hw->aq.arq_buf_size
643  *     - hw->aq.asq_buf_size
644  **/
645 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
646 {
647 	struct i40e_adminq_info *aq = &hw->aq;
648 	enum i40e_status_code ret_code;
649 	u16 oem_hi = 0, oem_lo = 0;
650 	u16 eetrack_hi = 0;
651 	u16 eetrack_lo = 0;
652 	u16 cfg_ptr = 0;
653 	int retry = 0;
654 
655 	/* verify input for valid configuration */
656 	if (aq->num_arq_entries == 0 ||
657 	    aq->num_asq_entries == 0 ||
658 	    aq->arq_buf_size == 0 ||
659 	    aq->asq_buf_size == 0) {
660 		ret_code = I40E_ERR_CONFIG;
661 		goto init_adminq_exit;
662 	}
663 	i40e_init_spinlock(&aq->asq_spinlock);
664 	i40e_init_spinlock(&aq->arq_spinlock);
665 
666 	/* Set up register offsets */
667 	i40e_adminq_init_regs(hw);
668 
669 	/* setup ASQ command write back timeout */
670 	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
671 
672 	/* allocate the ASQ */
673 	ret_code = i40e_init_asq(hw);
674 	if (ret_code != I40E_SUCCESS)
675 		goto init_adminq_destroy_spinlocks;
676 
677 	/* allocate the ARQ */
678 	ret_code = i40e_init_arq(hw);
679 	if (ret_code != I40E_SUCCESS)
680 		goto init_adminq_free_asq;
681 
682 	/* VF has no need of firmware */
683 	if (i40e_is_vf(hw))
684 		goto init_adminq_exit;
685 	/* There are some cases where the firmware may not be quite ready
686 	 * for AdminQ operations, so we retry the AdminQ setup a few times
687 	 * if we see timeouts in this first AQ call.
688 	 */
689 	do {
690 		ret_code = i40e_aq_get_firmware_version(hw,
691 							&aq->fw_maj_ver,
692 							&aq->fw_min_ver,
693 							&aq->fw_build,
694 							&aq->api_maj_ver,
695 							&aq->api_min_ver,
696 							NULL);
697 		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
698 			break;
699 		retry++;
700 		i40e_msec_delay(100);
701 		i40e_resume_aq(hw);
702 	} while (retry < 10);
703 	if (ret_code != I40E_SUCCESS)
704 		goto init_adminq_free_arq;
705 
706 	/*
707 	 * Some features were introduced in different FW API version
708 	 * for different MAC type.
709 	 */
710 	i40e_set_hw_flags(hw);
711 
712 	/* get the NVM version info */
713 	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
714 			   &hw->nvm.version);
715 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
716 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
717 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
718 	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
719 	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
720 			   &oem_hi);
721 	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
722 			   &oem_lo);
723 	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
724 
725 	if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
726 		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
727 		goto init_adminq_free_arq;
728 	}
729 
730 	/* pre-emptive resource lock release */
731 	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
732 	hw->nvm_release_on_done = FALSE;
733 	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
734 
735 	ret_code = I40E_SUCCESS;
736 
737 	/* success! */
738 	goto init_adminq_exit;
739 
740 init_adminq_free_arq:
741 	i40e_shutdown_arq(hw);
742 init_adminq_free_asq:
743 	i40e_shutdown_asq(hw);
744 init_adminq_destroy_spinlocks:
745 	i40e_destroy_spinlock(&aq->asq_spinlock);
746 	i40e_destroy_spinlock(&aq->arq_spinlock);
747 
748 init_adminq_exit:
749 	return ret_code;
750 }
751 
752 /**
753  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
754  *  @hw: pointer to the hardware structure
755  **/
756 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
757 {
758 	enum i40e_status_code ret_code = I40E_SUCCESS;
759 
760 	if (i40e_check_asq_alive(hw))
761 		i40e_aq_queue_shutdown(hw, TRUE);
762 
763 	i40e_shutdown_asq(hw);
764 	i40e_shutdown_arq(hw);
765 	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
766 	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
767 
768 	if (hw->nvm_buff.va)
769 		i40e_free_virt_mem(hw, &hw->nvm_buff);
770 
771 	return ret_code;
772 }
773 
774 /**
775  *  i40e_clean_asq - cleans Admin send queue
776  *  @hw: pointer to the hardware structure
777  *
778  *  returns the number of free desc
779  **/
780 u16 i40e_clean_asq(struct i40e_hw *hw)
781 {
782 	struct i40e_adminq_ring *asq = &(hw->aq.asq);
783 	struct i40e_asq_cmd_details *details;
784 	u16 ntc = asq->next_to_clean;
785 	struct i40e_aq_desc desc_cb;
786 	struct i40e_aq_desc *desc;
787 
788 	desc = I40E_ADMINQ_DESC(*asq, ntc);
789 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
790 	while (rd32(hw, hw->aq.asq.head) != ntc) {
791 		i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
792 			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
793 
794 		if (details->callback) {
795 			I40E_ADMINQ_CALLBACK cb_func =
796 					(I40E_ADMINQ_CALLBACK)details->callback;
797 			i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
798 				    I40E_DMA_TO_DMA);
799 			cb_func(hw, &desc_cb);
800 		}
801 		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
802 		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
803 		ntc++;
804 		if (ntc == asq->count)
805 			ntc = 0;
806 		desc = I40E_ADMINQ_DESC(*asq, ntc);
807 		details = I40E_ADMINQ_DETAILS(*asq, ntc);
808 	}
809 
810 	asq->next_to_clean = ntc;
811 
812 	return I40E_DESC_UNUSED(asq);
813 }
814 
815 /**
816  *  i40e_asq_done - check if FW has processed the Admin Send Queue
817  *  @hw: pointer to the hw struct
818  *
819  *  Returns TRUE if the firmware has processed all descriptors on the
820  *  admin send queue. Returns FALSE if there are still requests pending.
821  **/
822 bool i40e_asq_done(struct i40e_hw *hw)
823 {
824 	/* AQ designers suggest use of head for better
825 	 * timing reliability than DD bit
826 	 */
827 	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
828 
829 }
830 
831 /**
832  *  i40e_asq_send_command - send command to Admin Queue
833  *  @hw: pointer to the hw struct
834  *  @desc: prefilled descriptor describing the command (non DMA mem)
835  *  @buff: buffer to use for indirect commands
836  *  @buff_size: size of buffer for indirect commands
837  *  @cmd_details: pointer to command details structure
838  *
839  *  This is the main send command driver routine for the Admin Queue send
840  *  queue.  It runs the queue, cleans the queue, etc
841  **/
842 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
843 				struct i40e_aq_desc *desc,
844 				void *buff, /* can be NULL */
845 				u16  buff_size,
846 				struct i40e_asq_cmd_details *cmd_details)
847 {
848 	enum i40e_status_code status = I40E_SUCCESS;
849 	struct i40e_dma_mem *dma_buff = NULL;
850 	struct i40e_asq_cmd_details *details;
851 	struct i40e_aq_desc *desc_on_ring;
852 	bool cmd_completed = FALSE;
853 	u16  retval = 0;
854 	u32  val = 0;
855 
856 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
857 
858 	hw->aq.asq_last_status = I40E_AQ_RC_OK;
859 
860 	if (hw->aq.asq.count == 0) {
861 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
862 			   "AQTX: Admin queue not initialized.\n");
863 		status = I40E_ERR_QUEUE_EMPTY;
864 		goto asq_send_command_error;
865 	}
866 
867 	val = rd32(hw, hw->aq.asq.head);
868 	if (val >= hw->aq.num_asq_entries) {
869 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
870 			   "AQTX: head overrun at %d\n", val);
871 		status = I40E_ERR_ADMIN_QUEUE_FULL;
872 		goto asq_send_command_error;
873 	}
874 
875 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
876 	if (cmd_details) {
877 		i40e_memcpy(details,
878 			    cmd_details,
879 			    sizeof(struct i40e_asq_cmd_details),
880 			    I40E_NONDMA_TO_NONDMA);
881 
882 		/* If the cmd_details are defined copy the cookie.  The
883 		 * CPU_TO_LE32 is not needed here because the data is ignored
884 		 * by the FW, only used by the driver
885 		 */
886 		if (details->cookie) {
887 			desc->cookie_high =
888 				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
889 			desc->cookie_low =
890 				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
891 		}
892 	} else {
893 		i40e_memset(details, 0,
894 			    sizeof(struct i40e_asq_cmd_details),
895 			    I40E_NONDMA_MEM);
896 	}
897 
898 	/* clear requested flags and then set additional flags if defined */
899 	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
900 	desc->flags |= CPU_TO_LE16(details->flags_ena);
901 
902 	if (buff_size > hw->aq.asq_buf_size) {
903 		i40e_debug(hw,
904 			   I40E_DEBUG_AQ_MESSAGE,
905 			   "AQTX: Invalid buffer size: %d.\n",
906 			   buff_size);
907 		status = I40E_ERR_INVALID_SIZE;
908 		goto asq_send_command_error;
909 	}
910 
911 	if (details->postpone && !details->async) {
912 		i40e_debug(hw,
913 			   I40E_DEBUG_AQ_MESSAGE,
914 			   "AQTX: Async flag not set along with postpone flag");
915 		status = I40E_ERR_PARAM;
916 		goto asq_send_command_error;
917 	}
918 
919 	/* call clean and check queue available function to reclaim the
920 	 * descriptors that were processed by FW, the function returns the
921 	 * number of desc available
922 	 */
923 	/* the clean function called here could be called in a separate thread
924 	 * in case of asynchronous completions
925 	 */
926 	if (i40e_clean_asq(hw) == 0) {
927 		i40e_debug(hw,
928 			   I40E_DEBUG_AQ_MESSAGE,
929 			   "AQTX: Error queue is full.\n");
930 		status = I40E_ERR_ADMIN_QUEUE_FULL;
931 		goto asq_send_command_error;
932 	}
933 
934 	/* initialize the temp desc pointer with the right desc */
935 	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
936 
937 	/* if the desc is available copy the temp desc to the right place */
938 	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
939 		    I40E_NONDMA_TO_DMA);
940 
941 	/* if buff is not NULL assume indirect command */
942 	if (buff != NULL) {
943 		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
944 		/* copy the user buff into the respective DMA buff */
945 		i40e_memcpy(dma_buff->va, buff, buff_size,
946 			    I40E_NONDMA_TO_DMA);
947 		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
948 
949 		/* Update the address values in the desc with the pa value
950 		 * for respective buffer
951 		 */
952 		desc_on_ring->params.external.addr_high =
953 				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
954 		desc_on_ring->params.external.addr_low =
955 				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
956 	}
957 
958 	/* bump the tail */
959 	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
960 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
961 		      buff, buff_size);
962 	(hw->aq.asq.next_to_use)++;
963 	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
964 		hw->aq.asq.next_to_use = 0;
965 	if (!details->postpone)
966 		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
967 
968 	/* if cmd_details are not defined or async flag is not set,
969 	 * we need to wait for desc write back
970 	 */
971 	if (!details->async && !details->postpone) {
972 		u32 total_delay = 0;
973 
974 		do {
975 			/* AQ designers suggest use of head for better
976 			 * timing reliability than DD bit
977 			 */
978 			if (i40e_asq_done(hw))
979 				break;
980 			i40e_usec_delay(50);
981 			total_delay += 50;
982 		} while (total_delay < hw->aq.asq_cmd_timeout);
983 	}
984 
985 	/* if ready, copy the desc back to temp */
986 	if (i40e_asq_done(hw)) {
987 		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
988 			    I40E_DMA_TO_NONDMA);
989 		if (buff != NULL)
990 			i40e_memcpy(buff, dma_buff->va, buff_size,
991 				    I40E_DMA_TO_NONDMA);
992 		retval = LE16_TO_CPU(desc->retval);
993 		if (retval != 0) {
994 			i40e_debug(hw,
995 				   I40E_DEBUG_AQ_MESSAGE,
996 				   "AQTX: Command completed with error 0x%X.\n",
997 				   retval);
998 
999 			/* strip off FW internal code */
1000 			retval &= 0xff;
1001 		}
1002 		cmd_completed = TRUE;
1003 		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
1004 			status = I40E_SUCCESS;
1005 		else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
1006 			status = I40E_ERR_NOT_READY;
1007 		else
1008 			status = I40E_ERR_ADMIN_QUEUE_ERROR;
1009 		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
1010 	}
1011 
1012 	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
1013 		   "AQTX: desc and buffer writeback:\n");
1014 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
1015 
1016 	/* save writeback aq if requested */
1017 	if (details->wb_desc)
1018 		i40e_memcpy(details->wb_desc, desc_on_ring,
1019 			    sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
1020 
1021 	/* update the error if time out occurred */
1022 	if ((!cmd_completed) &&
1023 	    (!details->async && !details->postpone)) {
1024 		if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
1025 			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1026 				   "AQTX: AQ Critical error.\n");
1027 			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
1028 		} else {
1029 			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1030 				   "AQTX: Writeback timeout.\n");
1031 			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1032 		}
1033 	}
1034 
1035 asq_send_command_error:
1036 	i40e_release_spinlock(&hw->aq.asq_spinlock);
1037 	return status;
1038 }
1039 
1040 /**
1041  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1042  *  @desc:     pointer to the temp descriptor (non DMA mem)
1043  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1044  *
1045  *  Fill the desc with default values
1046  **/
1047 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1048 				       u16 opcode)
1049 {
1050 	/* zero out the desc */
1051 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1052 		    I40E_NONDMA_MEM);
1053 	desc->opcode = CPU_TO_LE16(opcode);
1054 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1055 }
1056 
1057 /**
1058  *  i40e_clean_arq_element
1059  *  @hw: pointer to the hw struct
1060  *  @e: event info from the receive descriptor, includes any buffers
1061  *  @pending: number of events that could be left to process
1062  *
1063  *  This function cleans one Admin Receive Queue element and returns
1064  *  the contents through e.  It can also return how many events are
1065  *  left to process through 'pending'
1066  **/
1067 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1068 					     struct i40e_arq_event_info *e,
1069 					     u16 *pending)
1070 {
1071 	enum i40e_status_code ret_code = I40E_SUCCESS;
1072 	u16 ntc = hw->aq.arq.next_to_clean;
1073 	struct i40e_aq_desc *desc;
1074 	struct i40e_dma_mem *bi;
1075 	u16 desc_idx;
1076 	u16 datalen;
1077 	u16 flags;
1078 	u16 ntu;
1079 
1080 	/* pre-clean the event info */
1081 	i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1082 
1083 	/* take the lock before we start messing with the ring */
1084 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1085 
1086 	if (hw->aq.arq.count == 0) {
1087 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1088 			   "AQRX: Admin queue not initialized.\n");
1089 		ret_code = I40E_ERR_QUEUE_EMPTY;
1090 		goto clean_arq_element_err;
1091 	}
1092 
1093 	/* set next_to_use to head */
1094 	if (!i40e_is_vf(hw))
1095 		ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1096 	else
1097 		ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1098 	if (ntu == ntc) {
1099 		/* nothing to do - shouldn't need to update ring's values */
1100 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1101 		goto clean_arq_element_out;
1102 	}
1103 
1104 	/* now clean the next descriptor */
1105 	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1106 	desc_idx = ntc;
1107 
1108 	hw->aq.arq_last_status =
1109 		(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1110 	flags = LE16_TO_CPU(desc->flags);
1111 	if (flags & I40E_AQ_FLAG_ERR) {
1112 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1113 		i40e_debug(hw,
1114 			   I40E_DEBUG_AQ_MESSAGE,
1115 			   "AQRX: Event received with error 0x%X.\n",
1116 			   hw->aq.arq_last_status);
1117 	}
1118 
1119 	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1120 		    I40E_DMA_TO_NONDMA);
1121 	datalen = LE16_TO_CPU(desc->datalen);
1122 	e->msg_len = min(datalen, e->buf_len);
1123 	if (e->msg_buf != NULL && (e->msg_len != 0))
1124 		i40e_memcpy(e->msg_buf,
1125 			    hw->aq.arq.r.arq_bi[desc_idx].va,
1126 			    e->msg_len, I40E_DMA_TO_NONDMA);
1127 
1128 	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1129 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1130 		      hw->aq.arq_buf_size);
1131 
1132 	/* Restore the original datalen and buffer address in the desc,
1133 	 * FW updates datalen to indicate the event message
1134 	 * size
1135 	 */
1136 	bi = &hw->aq.arq.r.arq_bi[ntc];
1137 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1138 
1139 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1140 	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1141 		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1142 	desc->datalen = CPU_TO_LE16((u16)bi->size);
1143 	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1144 	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1145 
1146 	/* set tail = the last cleaned desc index. */
1147 	wr32(hw, hw->aq.arq.tail, ntc);
1148 	/* ntc is updated to tail + 1 */
1149 	ntc++;
1150 	if (ntc == hw->aq.num_arq_entries)
1151 		ntc = 0;
1152 	hw->aq.arq.next_to_clean = ntc;
1153 	hw->aq.arq.next_to_use = ntu;
1154 
1155 	i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1156 clean_arq_element_out:
1157 	/* Set pending if needed, unlock and return */
1158 	if (pending != NULL)
1159 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1160 clean_arq_element_err:
1161 	i40e_release_spinlock(&hw->aq.arq_spinlock);
1162 
1163 	return ret_code;
1164 }
1165 
1166