xref: /freebsd/sys/dev/ixl/i40e_adminq.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_status.h"
36 #include "i40e_type.h"
37 #include "i40e_register.h"
38 #include "i40e_adminq.h"
39 #include "i40e_prototype.h"
40 
41 /**
42  *  i40e_adminq_init_regs - Initialize AdminQ registers
43  *  @hw: pointer to the hardware structure
44  *
45  *  This assumes the alloc_asq and alloc_arq functions have already been called
46  **/
47 static void i40e_adminq_init_regs(struct i40e_hw *hw)
48 {
49 	/* set head and tail registers in our local struct */
50 	if (i40e_is_vf(hw)) {
51 		hw->aq.asq.tail = I40E_VF_ATQT1;
52 		hw->aq.asq.head = I40E_VF_ATQH1;
53 		hw->aq.asq.len  = I40E_VF_ATQLEN1;
54 		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
55 		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
56 		hw->aq.arq.tail = I40E_VF_ARQT1;
57 		hw->aq.arq.head = I40E_VF_ARQH1;
58 		hw->aq.arq.len  = I40E_VF_ARQLEN1;
59 		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
60 		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
61 	} else {
62 		hw->aq.asq.tail = I40E_PF_ATQT;
63 		hw->aq.asq.head = I40E_PF_ATQH;
64 		hw->aq.asq.len  = I40E_PF_ATQLEN;
65 		hw->aq.asq.bal  = I40E_PF_ATQBAL;
66 		hw->aq.asq.bah  = I40E_PF_ATQBAH;
67 		hw->aq.arq.tail = I40E_PF_ARQT;
68 		hw->aq.arq.head = I40E_PF_ARQH;
69 		hw->aq.arq.len  = I40E_PF_ARQLEN;
70 		hw->aq.arq.bal  = I40E_PF_ARQBAL;
71 		hw->aq.arq.bah  = I40E_PF_ARQBAH;
72 	}
73 }
74 
75 /**
76  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
77  *  @hw: pointer to the hardware structure
78  **/
79 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
80 {
81 	enum i40e_status_code ret_code;
82 
83 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
84 					 i40e_mem_atq_ring,
85 					 (hw->aq.num_asq_entries *
86 					 sizeof(struct i40e_aq_desc)),
87 					 I40E_ADMINQ_DESC_ALIGNMENT);
88 	if (ret_code)
89 		return ret_code;
90 
91 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
92 					  (hw->aq.num_asq_entries *
93 					  sizeof(struct i40e_asq_cmd_details)));
94 	if (ret_code) {
95 		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
96 		return ret_code;
97 	}
98 
99 	return ret_code;
100 }
101 
102 /**
103  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
104  *  @hw: pointer to the hardware structure
105  **/
106 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
107 {
108 	enum i40e_status_code ret_code;
109 
110 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
111 					 i40e_mem_arq_ring,
112 					 (hw->aq.num_arq_entries *
113 					 sizeof(struct i40e_aq_desc)),
114 					 I40E_ADMINQ_DESC_ALIGNMENT);
115 
116 	return ret_code;
117 }
118 
119 /**
120  *  i40e_free_adminq_asq - Free Admin Queue send rings
121  *  @hw: pointer to the hardware structure
122  *
123  *  This assumes the posted send buffers have already been cleaned
124  *  and de-allocated
125  **/
126 void i40e_free_adminq_asq(struct i40e_hw *hw)
127 {
128 	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
129 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
130 }
131 
132 /**
133  *  i40e_free_adminq_arq - Free Admin Queue receive rings
134  *  @hw: pointer to the hardware structure
135  *
136  *  This assumes the posted receive buffers have already been cleaned
137  *  and de-allocated
138  **/
139 void i40e_free_adminq_arq(struct i40e_hw *hw)
140 {
141 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
142 }
143 
144 /**
145  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
146  *  @hw: pointer to the hardware structure
147  **/
148 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
149 {
150 	enum i40e_status_code ret_code;
151 	struct i40e_aq_desc *desc;
152 	struct i40e_dma_mem *bi;
153 	int i;
154 
155 	/* We'll be allocating the buffer info memory first, then we can
156 	 * allocate the mapped buffers for the event processing
157 	 */
158 
159 	/* buffer_info structures do not need alignment */
160 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
161 		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
162 	if (ret_code)
163 		goto alloc_arq_bufs;
164 	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
165 
166 	/* allocate the mapped buffers */
167 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
168 		bi = &hw->aq.arq.r.arq_bi[i];
169 		ret_code = i40e_allocate_dma_mem(hw, bi,
170 						 i40e_mem_arq_buf,
171 						 hw->aq.arq_buf_size,
172 						 I40E_ADMINQ_DESC_ALIGNMENT);
173 		if (ret_code)
174 			goto unwind_alloc_arq_bufs;
175 
176 		/* now configure the descriptors for use */
177 		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
178 
179 		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
180 		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
181 			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
182 		desc->opcode = 0;
183 		/* This is in accordance with Admin queue design, there is no
184 		 * register for buffer size configuration
185 		 */
186 		desc->datalen = CPU_TO_LE16((u16)bi->size);
187 		desc->retval = 0;
188 		desc->cookie_high = 0;
189 		desc->cookie_low = 0;
190 		desc->params.external.addr_high =
191 			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
192 		desc->params.external.addr_low =
193 			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
194 		desc->params.external.param0 = 0;
195 		desc->params.external.param1 = 0;
196 	}
197 
198 alloc_arq_bufs:
199 	return ret_code;
200 
201 unwind_alloc_arq_bufs:
202 	/* don't try to free the one that failed... */
203 	i--;
204 	for (; i >= 0; i--)
205 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
206 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
207 
208 	return ret_code;
209 }
210 
211 /**
212  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
213  *  @hw: pointer to the hardware structure
214  **/
215 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
216 {
217 	enum i40e_status_code ret_code;
218 	struct i40e_dma_mem *bi;
219 	int i;
220 
221 	/* No mapped memory needed yet, just the buffer info structures */
222 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
223 		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
224 	if (ret_code)
225 		goto alloc_asq_bufs;
226 	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
227 
228 	/* allocate the mapped buffers */
229 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
230 		bi = &hw->aq.asq.r.asq_bi[i];
231 		ret_code = i40e_allocate_dma_mem(hw, bi,
232 						 i40e_mem_asq_buf,
233 						 hw->aq.asq_buf_size,
234 						 I40E_ADMINQ_DESC_ALIGNMENT);
235 		if (ret_code)
236 			goto unwind_alloc_asq_bufs;
237 	}
238 alloc_asq_bufs:
239 	return ret_code;
240 
241 unwind_alloc_asq_bufs:
242 	/* don't try to free the one that failed... */
243 	i--;
244 	for (; i >= 0; i--)
245 		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
246 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
247 
248 	return ret_code;
249 }
250 
251 /**
252  *  i40e_free_arq_bufs - Free receive queue buffer info elements
253  *  @hw: pointer to the hardware structure
254  **/
255 static void i40e_free_arq_bufs(struct i40e_hw *hw)
256 {
257 	int i;
258 
259 	/* free descriptors */
260 	for (i = 0; i < hw->aq.num_arq_entries; i++)
261 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
262 
263 	/* free the descriptor memory */
264 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
265 
266 	/* free the dma header */
267 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
268 }
269 
270 /**
271  *  i40e_free_asq_bufs - Free send queue buffer info elements
272  *  @hw: pointer to the hardware structure
273  **/
274 static void i40e_free_asq_bufs(struct i40e_hw *hw)
275 {
276 	int i;
277 
278 	/* only unmap if the address is non-NULL */
279 	for (i = 0; i < hw->aq.num_asq_entries; i++)
280 		if (hw->aq.asq.r.asq_bi[i].pa)
281 			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
282 
283 	/* free the buffer info list */
284 	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
285 
286 	/* free the descriptor memory */
287 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
288 
289 	/* free the dma header */
290 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
291 }
292 
293 /**
294  *  i40e_config_asq_regs - configure ASQ registers
295  *  @hw: pointer to the hardware structure
296  *
297  *  Configure base address and length registers for the transmit queue
298  **/
299 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
300 {
301 	enum i40e_status_code ret_code = I40E_SUCCESS;
302 	u32 reg = 0;
303 
304 	/* Clear Head and Tail */
305 	wr32(hw, hw->aq.asq.head, 0);
306 	wr32(hw, hw->aq.asq.tail, 0);
307 
308 	/* set starting point */
309 	if (!i40e_is_vf(hw))
310 		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
311 					  I40E_PF_ATQLEN_ATQENABLE_MASK));
312 	if (i40e_is_vf(hw))
313 		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
314 					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
315 	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
316 	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
317 
318 	/* Check one register to verify that config was applied */
319 	reg = rd32(hw, hw->aq.asq.bal);
320 	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
321 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
322 
323 	return ret_code;
324 }
325 
326 /**
327  *  i40e_config_arq_regs - ARQ register configuration
328  *  @hw: pointer to the hardware structure
329  *
330  * Configure base address and length registers for the receive (event queue)
331  **/
332 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
333 {
334 	enum i40e_status_code ret_code = I40E_SUCCESS;
335 	u32 reg = 0;
336 
337 	/* Clear Head and Tail */
338 	wr32(hw, hw->aq.arq.head, 0);
339 	wr32(hw, hw->aq.arq.tail, 0);
340 
341 	/* set starting point */
342 	if (!i40e_is_vf(hw))
343 		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
344 					  I40E_PF_ARQLEN_ARQENABLE_MASK));
345 	if (i40e_is_vf(hw))
346 		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
347 					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
348 	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
349 	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
350 
351 	/* Update tail in the HW to post pre-allocated buffers */
352 	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
353 
354 	/* Check one register to verify that config was applied */
355 	reg = rd32(hw, hw->aq.arq.bal);
356 	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
357 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
358 
359 	return ret_code;
360 }
361 
362 /**
363  *  i40e_init_asq - main initialization routine for ASQ
364  *  @hw: pointer to the hardware structure
365  *
366  *  This is the main initialization routine for the Admin Send Queue
367  *  Prior to calling this function, drivers *MUST* set the following fields
368  *  in the hw->aq structure:
369  *     - hw->aq.num_asq_entries
370  *     - hw->aq.arq_buf_size
371  *
372  *  Do *NOT* hold the lock when calling this as the memory allocation routines
373  *  called are not going to be atomic context safe
374  **/
375 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
376 {
377 	enum i40e_status_code ret_code = I40E_SUCCESS;
378 
379 	if (hw->aq.asq.count > 0) {
380 		/* queue already initialized */
381 		ret_code = I40E_ERR_NOT_READY;
382 		goto init_adminq_exit;
383 	}
384 
385 	/* verify input for valid configuration */
386 	if ((hw->aq.num_asq_entries == 0) ||
387 	    (hw->aq.asq_buf_size == 0)) {
388 		ret_code = I40E_ERR_CONFIG;
389 		goto init_adminq_exit;
390 	}
391 
392 	hw->aq.asq.next_to_use = 0;
393 	hw->aq.asq.next_to_clean = 0;
394 
395 	/* allocate the ring memory */
396 	ret_code = i40e_alloc_adminq_asq_ring(hw);
397 	if (ret_code != I40E_SUCCESS)
398 		goto init_adminq_exit;
399 
400 	/* allocate buffers in the rings */
401 	ret_code = i40e_alloc_asq_bufs(hw);
402 	if (ret_code != I40E_SUCCESS)
403 		goto init_adminq_free_rings;
404 
405 	/* initialize base registers */
406 	ret_code = i40e_config_asq_regs(hw);
407 	if (ret_code != I40E_SUCCESS)
408 		goto init_config_regs;
409 
410 	/* success! */
411 	hw->aq.asq.count = hw->aq.num_asq_entries;
412 	goto init_adminq_exit;
413 
414 init_adminq_free_rings:
415 	i40e_free_adminq_asq(hw);
416 	return ret_code;
417 
418 init_config_regs:
419 	i40e_free_asq_bufs(hw);
420 
421 init_adminq_exit:
422 	return ret_code;
423 }
424 
425 /**
426  *  i40e_init_arq - initialize ARQ
427  *  @hw: pointer to the hardware structure
428  *
429  *  The main initialization routine for the Admin Receive (Event) Queue.
430  *  Prior to calling this function, drivers *MUST* set the following fields
431  *  in the hw->aq structure:
432  *     - hw->aq.num_asq_entries
433  *     - hw->aq.arq_buf_size
434  *
435  *  Do *NOT* hold the lock when calling this as the memory allocation routines
436  *  called are not going to be atomic context safe
437  **/
438 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
439 {
440 	enum i40e_status_code ret_code = I40E_SUCCESS;
441 
442 	if (hw->aq.arq.count > 0) {
443 		/* queue already initialized */
444 		ret_code = I40E_ERR_NOT_READY;
445 		goto init_adminq_exit;
446 	}
447 
448 	/* verify input for valid configuration */
449 	if ((hw->aq.num_arq_entries == 0) ||
450 	    (hw->aq.arq_buf_size == 0)) {
451 		ret_code = I40E_ERR_CONFIG;
452 		goto init_adminq_exit;
453 	}
454 
455 	hw->aq.arq.next_to_use = 0;
456 	hw->aq.arq.next_to_clean = 0;
457 
458 	/* allocate the ring memory */
459 	ret_code = i40e_alloc_adminq_arq_ring(hw);
460 	if (ret_code != I40E_SUCCESS)
461 		goto init_adminq_exit;
462 
463 	/* allocate buffers in the rings */
464 	ret_code = i40e_alloc_arq_bufs(hw);
465 	if (ret_code != I40E_SUCCESS)
466 		goto init_adminq_free_rings;
467 
468 	/* initialize base registers */
469 	ret_code = i40e_config_arq_regs(hw);
470 	if (ret_code != I40E_SUCCESS)
471 		goto init_adminq_free_rings;
472 
473 	/* success! */
474 	hw->aq.arq.count = hw->aq.num_arq_entries;
475 	goto init_adminq_exit;
476 
477 init_adminq_free_rings:
478 	i40e_free_adminq_arq(hw);
479 
480 init_adminq_exit:
481 	return ret_code;
482 }
483 
484 /**
485  *  i40e_shutdown_asq - shutdown the ASQ
486  *  @hw: pointer to the hardware structure
487  *
488  *  The main shutdown routine for the Admin Send Queue
489  **/
490 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
491 {
492 	enum i40e_status_code ret_code = I40E_SUCCESS;
493 
494 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
495 
496 	if (hw->aq.asq.count == 0) {
497 		ret_code = I40E_ERR_NOT_READY;
498 		goto shutdown_asq_out;
499 	}
500 
501 	/* Stop firmware AdminQ processing */
502 	wr32(hw, hw->aq.asq.head, 0);
503 	wr32(hw, hw->aq.asq.tail, 0);
504 	wr32(hw, hw->aq.asq.len, 0);
505 	wr32(hw, hw->aq.asq.bal, 0);
506 	wr32(hw, hw->aq.asq.bah, 0);
507 
508 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
509 
510 	/* free ring buffers */
511 	i40e_free_asq_bufs(hw);
512 
513 shutdown_asq_out:
514 	i40e_release_spinlock(&hw->aq.asq_spinlock);
515 	return ret_code;
516 }
517 
518 /**
519  *  i40e_shutdown_arq - shutdown ARQ
520  *  @hw: pointer to the hardware structure
521  *
522  *  The main shutdown routine for the Admin Receive Queue
523  **/
524 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
525 {
526 	enum i40e_status_code ret_code = I40E_SUCCESS;
527 
528 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
529 
530 	if (hw->aq.arq.count == 0) {
531 		ret_code = I40E_ERR_NOT_READY;
532 		goto shutdown_arq_out;
533 	}
534 
535 	/* Stop firmware AdminQ processing */
536 	wr32(hw, hw->aq.arq.head, 0);
537 	wr32(hw, hw->aq.arq.tail, 0);
538 	wr32(hw, hw->aq.arq.len, 0);
539 	wr32(hw, hw->aq.arq.bal, 0);
540 	wr32(hw, hw->aq.arq.bah, 0);
541 
542 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
543 
544 	/* free ring buffers */
545 	i40e_free_arq_bufs(hw);
546 
547 shutdown_arq_out:
548 	i40e_release_spinlock(&hw->aq.arq_spinlock);
549 	return ret_code;
550 }
551 
552 /**
553  *  i40e_resume_aq - resume AQ processing from 0
554  *  @hw: pointer to the hardware structure
555  **/
556 static void i40e_resume_aq(struct i40e_hw *hw)
557 {
558 	/* Registers are reset after PF reset */
559 	hw->aq.asq.next_to_use = 0;
560 	hw->aq.asq.next_to_clean = 0;
561 
562 	i40e_config_asq_regs(hw);
563 
564 	hw->aq.arq.next_to_use = 0;
565 	hw->aq.arq.next_to_clean = 0;
566 
567 	i40e_config_arq_regs(hw);
568 }
569 
570 /**
571  *  i40e_set_hw_flags - set HW flags
572  *  @hw: pointer to the hardware structure
573  **/
574 static void i40e_set_hw_flags(struct i40e_hw *hw)
575 {
576 	struct i40e_adminq_info *aq = &hw->aq;
577 
578 	hw->flags = 0;
579 
580 	switch (hw->mac.type) {
581 	case I40E_MAC_XL710:
582 		if (aq->api_maj_ver > 1 ||
583 		    (aq->api_maj_ver == 1 &&
584 		     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
585 			hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
586 			hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
587 			/* The ability to RX (not drop) 802.1ad frames */
588 			hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
589 		}
590 		break;
591 	case I40E_MAC_X722:
592 		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
593 			     I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
594 
595 		if (aq->api_maj_ver > 1 ||
596 		    (aq->api_maj_ver == 1 &&
597 		     aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
598 			hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
599 
600 		if (aq->api_maj_ver > 1 ||
601 		    (aq->api_maj_ver == 1 &&
602 		     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
603 			hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
604 
605 		if (aq->api_maj_ver > 1 ||
606 		    (aq->api_maj_ver == 1 &&
607 		     aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
608 			hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
609 
610 		/* fall through */
611 	default:
612 		break;
613 	}
614 
615 	/* Newer versions of firmware require lock when reading the NVM */
616 	if (aq->api_maj_ver > 1 ||
617 	    (aq->api_maj_ver == 1 &&
618 	     aq->api_min_ver >= 5))
619 		hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
620 
621 	if (aq->api_maj_ver > 1 ||
622 	    (aq->api_maj_ver == 1 &&
623 	     aq->api_min_ver >= 8)) {
624 		hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
625 		hw->flags |= I40E_HW_FLAG_DROP_MODE;
626 	}
627 
628 	if (aq->api_maj_ver > 1 ||
629 	    (aq->api_maj_ver == 1 &&
630 	     aq->api_min_ver >= 9))
631 		hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
632 }
633 
634 /**
635  *  i40e_init_adminq - main initialization routine for Admin Queue
636  *  @hw: pointer to the hardware structure
637  *
638  *  Prior to calling this function, drivers *MUST* set the following fields
639  *  in the hw->aq structure:
640  *     - hw->aq.num_asq_entries
641  *     - hw->aq.num_arq_entries
642  *     - hw->aq.arq_buf_size
643  *     - hw->aq.asq_buf_size
644  **/
645 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
646 {
647 	struct i40e_adminq_info *aq = &hw->aq;
648 	enum i40e_status_code ret_code;
649 	u16 cfg_ptr, oem_hi, oem_lo;
650 	u16 eetrack_lo, eetrack_hi;
651 	int retry = 0;
652 
653 	/* verify input for valid configuration */
654 	if (aq->num_arq_entries == 0 ||
655 	    aq->num_asq_entries == 0 ||
656 	    aq->arq_buf_size == 0 ||
657 	    aq->asq_buf_size == 0) {
658 		ret_code = I40E_ERR_CONFIG;
659 		goto init_adminq_exit;
660 	}
661 	i40e_init_spinlock(&aq->asq_spinlock);
662 	i40e_init_spinlock(&aq->arq_spinlock);
663 
664 	/* Set up register offsets */
665 	i40e_adminq_init_regs(hw);
666 
667 	/* setup ASQ command write back timeout */
668 	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
669 
670 	/* allocate the ASQ */
671 	ret_code = i40e_init_asq(hw);
672 	if (ret_code != I40E_SUCCESS)
673 		goto init_adminq_destroy_spinlocks;
674 
675 	/* allocate the ARQ */
676 	ret_code = i40e_init_arq(hw);
677 	if (ret_code != I40E_SUCCESS)
678 		goto init_adminq_free_asq;
679 
680 	/* VF has no need of firmware */
681 	if (i40e_is_vf(hw))
682 		goto init_adminq_exit;
683 	/* There are some cases where the firmware may not be quite ready
684 	 * for AdminQ operations, so we retry the AdminQ setup a few times
685 	 * if we see timeouts in this first AQ call.
686 	 */
687 	do {
688 		ret_code = i40e_aq_get_firmware_version(hw,
689 							&aq->fw_maj_ver,
690 							&aq->fw_min_ver,
691 							&aq->fw_build,
692 							&aq->api_maj_ver,
693 							&aq->api_min_ver,
694 							NULL);
695 		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
696 			break;
697 		retry++;
698 		i40e_msec_delay(100);
699 		i40e_resume_aq(hw);
700 	} while (retry < 10);
701 	if (ret_code != I40E_SUCCESS)
702 		goto init_adminq_free_arq;
703 
704 	/*
705 	 * Some features were introduced in different FW API version
706 	 * for different MAC type.
707 	 */
708 	i40e_set_hw_flags(hw);
709 
710 	/* get the NVM version info */
711 	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
712 			   &hw->nvm.version);
713 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
714 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
715 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
716 	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
717 	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
718 			   &oem_hi);
719 	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
720 			   &oem_lo);
721 	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
722 
723 	if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
724 		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
725 		goto init_adminq_free_arq;
726 	}
727 
728 	/* pre-emptive resource lock release */
729 	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
730 	hw->nvm_release_on_done = FALSE;
731 	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
732 
733 	ret_code = I40E_SUCCESS;
734 
735 	/* success! */
736 	goto init_adminq_exit;
737 
738 init_adminq_free_arq:
739 	i40e_shutdown_arq(hw);
740 init_adminq_free_asq:
741 	i40e_shutdown_asq(hw);
742 init_adminq_destroy_spinlocks:
743 	i40e_destroy_spinlock(&aq->asq_spinlock);
744 	i40e_destroy_spinlock(&aq->arq_spinlock);
745 
746 init_adminq_exit:
747 	return ret_code;
748 }
749 
750 /**
751  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
752  *  @hw: pointer to the hardware structure
753  **/
754 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
755 {
756 	enum i40e_status_code ret_code = I40E_SUCCESS;
757 
758 	if (i40e_check_asq_alive(hw))
759 		i40e_aq_queue_shutdown(hw, TRUE);
760 
761 	i40e_shutdown_asq(hw);
762 	i40e_shutdown_arq(hw);
763 	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
764 	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
765 
766 	if (hw->nvm_buff.va)
767 		i40e_free_virt_mem(hw, &hw->nvm_buff);
768 
769 	return ret_code;
770 }
771 
772 /**
773  *  i40e_clean_asq - cleans Admin send queue
774  *  @hw: pointer to the hardware structure
775  *
776  *  returns the number of free desc
777  **/
778 u16 i40e_clean_asq(struct i40e_hw *hw)
779 {
780 	struct i40e_adminq_ring *asq = &(hw->aq.asq);
781 	struct i40e_asq_cmd_details *details;
782 	u16 ntc = asq->next_to_clean;
783 	struct i40e_aq_desc desc_cb;
784 	struct i40e_aq_desc *desc;
785 
786 	desc = I40E_ADMINQ_DESC(*asq, ntc);
787 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
788 	while (rd32(hw, hw->aq.asq.head) != ntc) {
789 		i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
790 			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
791 
792 		if (details->callback) {
793 			I40E_ADMINQ_CALLBACK cb_func =
794 					(I40E_ADMINQ_CALLBACK)details->callback;
795 			i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
796 				    I40E_DMA_TO_DMA);
797 			cb_func(hw, &desc_cb);
798 		}
799 		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
800 		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
801 		ntc++;
802 		if (ntc == asq->count)
803 			ntc = 0;
804 		desc = I40E_ADMINQ_DESC(*asq, ntc);
805 		details = I40E_ADMINQ_DETAILS(*asq, ntc);
806 	}
807 
808 	asq->next_to_clean = ntc;
809 
810 	return I40E_DESC_UNUSED(asq);
811 }
812 
813 /**
814  *  i40e_asq_done - check if FW has processed the Admin Send Queue
815  *  @hw: pointer to the hw struct
816  *
817  *  Returns TRUE if the firmware has processed all descriptors on the
818  *  admin send queue. Returns FALSE if there are still requests pending.
819  **/
820 bool i40e_asq_done(struct i40e_hw *hw)
821 {
822 	/* AQ designers suggest use of head for better
823 	 * timing reliability than DD bit
824 	 */
825 	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
826 
827 }
828 
829 /**
830  *  i40e_asq_send_command - send command to Admin Queue
831  *  @hw: pointer to the hw struct
832  *  @desc: prefilled descriptor describing the command (non DMA mem)
833  *  @buff: buffer to use for indirect commands
834  *  @buff_size: size of buffer for indirect commands
835  *  @cmd_details: pointer to command details structure
836  *
837  *  This is the main send command driver routine for the Admin Queue send
838  *  queue.  It runs the queue, cleans the queue, etc
839  **/
840 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
841 				struct i40e_aq_desc *desc,
842 				void *buff, /* can be NULL */
843 				u16  buff_size,
844 				struct i40e_asq_cmd_details *cmd_details)
845 {
846 	enum i40e_status_code status = I40E_SUCCESS;
847 	struct i40e_dma_mem *dma_buff = NULL;
848 	struct i40e_asq_cmd_details *details;
849 	struct i40e_aq_desc *desc_on_ring;
850 	bool cmd_completed = FALSE;
851 	u16  retval = 0;
852 	u32  val = 0;
853 
854 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
855 
856 	hw->aq.asq_last_status = I40E_AQ_RC_OK;
857 
858 	if (hw->aq.asq.count == 0) {
859 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
860 			   "AQTX: Admin queue not initialized.\n");
861 		status = I40E_ERR_QUEUE_EMPTY;
862 		goto asq_send_command_error;
863 	}
864 
865 	val = rd32(hw, hw->aq.asq.head);
866 	if (val >= hw->aq.num_asq_entries) {
867 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
868 			   "AQTX: head overrun at %d\n", val);
869 		status = I40E_ERR_ADMIN_QUEUE_FULL;
870 		goto asq_send_command_error;
871 	}
872 
873 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
874 	if (cmd_details) {
875 		i40e_memcpy(details,
876 			    cmd_details,
877 			    sizeof(struct i40e_asq_cmd_details),
878 			    I40E_NONDMA_TO_NONDMA);
879 
880 		/* If the cmd_details are defined copy the cookie.  The
881 		 * CPU_TO_LE32 is not needed here because the data is ignored
882 		 * by the FW, only used by the driver
883 		 */
884 		if (details->cookie) {
885 			desc->cookie_high =
886 				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
887 			desc->cookie_low =
888 				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
889 		}
890 	} else {
891 		i40e_memset(details, 0,
892 			    sizeof(struct i40e_asq_cmd_details),
893 			    I40E_NONDMA_MEM);
894 	}
895 
896 	/* clear requested flags and then set additional flags if defined */
897 	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
898 	desc->flags |= CPU_TO_LE16(details->flags_ena);
899 
900 	if (buff_size > hw->aq.asq_buf_size) {
901 		i40e_debug(hw,
902 			   I40E_DEBUG_AQ_MESSAGE,
903 			   "AQTX: Invalid buffer size: %d.\n",
904 			   buff_size);
905 		status = I40E_ERR_INVALID_SIZE;
906 		goto asq_send_command_error;
907 	}
908 
909 	if (details->postpone && !details->async) {
910 		i40e_debug(hw,
911 			   I40E_DEBUG_AQ_MESSAGE,
912 			   "AQTX: Async flag not set along with postpone flag");
913 		status = I40E_ERR_PARAM;
914 		goto asq_send_command_error;
915 	}
916 
917 	/* call clean and check queue available function to reclaim the
918 	 * descriptors that were processed by FW, the function returns the
919 	 * number of desc available
920 	 */
921 	/* the clean function called here could be called in a separate thread
922 	 * in case of asynchronous completions
923 	 */
924 	if (i40e_clean_asq(hw) == 0) {
925 		i40e_debug(hw,
926 			   I40E_DEBUG_AQ_MESSAGE,
927 			   "AQTX: Error queue is full.\n");
928 		status = I40E_ERR_ADMIN_QUEUE_FULL;
929 		goto asq_send_command_error;
930 	}
931 
932 	/* initialize the temp desc pointer with the right desc */
933 	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
934 
935 	/* if the desc is available copy the temp desc to the right place */
936 	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
937 		    I40E_NONDMA_TO_DMA);
938 
939 	/* if buff is not NULL assume indirect command */
940 	if (buff != NULL) {
941 		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
942 		/* copy the user buff into the respective DMA buff */
943 		i40e_memcpy(dma_buff->va, buff, buff_size,
944 			    I40E_NONDMA_TO_DMA);
945 		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
946 
947 		/* Update the address values in the desc with the pa value
948 		 * for respective buffer
949 		 */
950 		desc_on_ring->params.external.addr_high =
951 				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
952 		desc_on_ring->params.external.addr_low =
953 				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
954 	}
955 
956 	/* bump the tail */
957 	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
958 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
959 		      buff, buff_size);
960 	(hw->aq.asq.next_to_use)++;
961 	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
962 		hw->aq.asq.next_to_use = 0;
963 	if (!details->postpone)
964 		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
965 
966 	/* if cmd_details are not defined or async flag is not set,
967 	 * we need to wait for desc write back
968 	 */
969 	if (!details->async && !details->postpone) {
970 		u32 total_delay = 0;
971 
972 		do {
973 			/* AQ designers suggest use of head for better
974 			 * timing reliability than DD bit
975 			 */
976 			if (i40e_asq_done(hw))
977 				break;
978 			i40e_usec_delay(50);
979 			total_delay += 50;
980 		} while (total_delay < hw->aq.asq_cmd_timeout);
981 	}
982 
983 	/* if ready, copy the desc back to temp */
984 	if (i40e_asq_done(hw)) {
985 		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
986 			    I40E_DMA_TO_NONDMA);
987 		if (buff != NULL)
988 			i40e_memcpy(buff, dma_buff->va, buff_size,
989 				    I40E_DMA_TO_NONDMA);
990 		retval = LE16_TO_CPU(desc->retval);
991 		if (retval != 0) {
992 			i40e_debug(hw,
993 				   I40E_DEBUG_AQ_MESSAGE,
994 				   "AQTX: Command completed with error 0x%X.\n",
995 				   retval);
996 
997 			/* strip off FW internal code */
998 			retval &= 0xff;
999 		}
1000 		cmd_completed = TRUE;
1001 		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
1002 			status = I40E_SUCCESS;
1003 		else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
1004 			status = I40E_ERR_NOT_READY;
1005 		else
1006 			status = I40E_ERR_ADMIN_QUEUE_ERROR;
1007 		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
1008 	}
1009 
1010 	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
1011 		   "AQTX: desc and buffer writeback:\n");
1012 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
1013 
1014 	/* save writeback aq if requested */
1015 	if (details->wb_desc)
1016 		i40e_memcpy(details->wb_desc, desc_on_ring,
1017 			    sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
1018 
1019 	/* update the error if time out occurred */
1020 	if ((!cmd_completed) &&
1021 	    (!details->async && !details->postpone)) {
1022 		if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
1023 			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1024 				   "AQTX: AQ Critical error.\n");
1025 			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
1026 		} else {
1027 			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1028 				   "AQTX: Writeback timeout.\n");
1029 			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1030 		}
1031 	}
1032 
1033 asq_send_command_error:
1034 	i40e_release_spinlock(&hw->aq.asq_spinlock);
1035 	return status;
1036 }
1037 
1038 /**
1039  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1040  *  @desc:     pointer to the temp descriptor (non DMA mem)
1041  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1042  *
1043  *  Fill the desc with default values
1044  **/
1045 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1046 				       u16 opcode)
1047 {
1048 	/* zero out the desc */
1049 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1050 		    I40E_NONDMA_MEM);
1051 	desc->opcode = CPU_TO_LE16(opcode);
1052 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1053 }
1054 
1055 /**
1056  *  i40e_clean_arq_element
1057  *  @hw: pointer to the hw struct
1058  *  @e: event info from the receive descriptor, includes any buffers
1059  *  @pending: number of events that could be left to process
1060  *
1061  *  This function cleans one Admin Receive Queue element and returns
1062  *  the contents through e.  It can also return how many events are
1063  *  left to process through 'pending'
1064  **/
1065 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1066 					     struct i40e_arq_event_info *e,
1067 					     u16 *pending)
1068 {
1069 	enum i40e_status_code ret_code = I40E_SUCCESS;
1070 	u16 ntc = hw->aq.arq.next_to_clean;
1071 	struct i40e_aq_desc *desc;
1072 	struct i40e_dma_mem *bi;
1073 	u16 desc_idx;
1074 	u16 datalen;
1075 	u16 flags;
1076 	u16 ntu;
1077 
1078 	/* pre-clean the event info */
1079 	i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1080 
1081 	/* take the lock before we start messing with the ring */
1082 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1083 
1084 	if (hw->aq.arq.count == 0) {
1085 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1086 			   "AQRX: Admin queue not initialized.\n");
1087 		ret_code = I40E_ERR_QUEUE_EMPTY;
1088 		goto clean_arq_element_err;
1089 	}
1090 
1091 	/* set next_to_use to head */
1092 	if (!i40e_is_vf(hw))
1093 		ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1094 	else
1095 		ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1096 	if (ntu == ntc) {
1097 		/* nothing to do - shouldn't need to update ring's values */
1098 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1099 		goto clean_arq_element_out;
1100 	}
1101 
1102 	/* now clean the next descriptor */
1103 	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1104 	desc_idx = ntc;
1105 
1106 	hw->aq.arq_last_status =
1107 		(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1108 	flags = LE16_TO_CPU(desc->flags);
1109 	if (flags & I40E_AQ_FLAG_ERR) {
1110 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1111 		i40e_debug(hw,
1112 			   I40E_DEBUG_AQ_MESSAGE,
1113 			   "AQRX: Event received with error 0x%X.\n",
1114 			   hw->aq.arq_last_status);
1115 	}
1116 
1117 	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1118 		    I40E_DMA_TO_NONDMA);
1119 	datalen = LE16_TO_CPU(desc->datalen);
1120 	e->msg_len = min(datalen, e->buf_len);
1121 	if (e->msg_buf != NULL && (e->msg_len != 0))
1122 		i40e_memcpy(e->msg_buf,
1123 			    hw->aq.arq.r.arq_bi[desc_idx].va,
1124 			    e->msg_len, I40E_DMA_TO_NONDMA);
1125 
1126 	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1127 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1128 		      hw->aq.arq_buf_size);
1129 
1130 	/* Restore the original datalen and buffer address in the desc,
1131 	 * FW updates datalen to indicate the event message
1132 	 * size
1133 	 */
1134 	bi = &hw->aq.arq.r.arq_bi[ntc];
1135 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1136 
1137 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1138 	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1139 		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1140 	desc->datalen = CPU_TO_LE16((u16)bi->size);
1141 	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1142 	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1143 
1144 	/* set tail = the last cleaned desc index. */
1145 	wr32(hw, hw->aq.arq.tail, ntc);
1146 	/* ntc is updated to tail + 1 */
1147 	ntc++;
1148 	if (ntc == hw->aq.num_arq_entries)
1149 		ntc = 0;
1150 	hw->aq.arq.next_to_clean = ntc;
1151 	hw->aq.arq.next_to_use = ntu;
1152 
1153 	i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1154 clean_arq_element_out:
1155 	/* Set pending if needed, unlock and return */
1156 	if (pending != NULL)
1157 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1158 clean_arq_element_err:
1159 	i40e_release_spinlock(&hw->aq.arq_spinlock);
1160 
1161 	return ret_code;
1162 }
1163 
1164