xref: /linux/drivers/net/ethernet/intel/i40e/i40e_adminq.c (revision 48eb03dd26304c24f03bdbb9382e89c8564e71df)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/delay.h>
5 #include "i40e_alloc.h"
6 #include "i40e_register.h"
7 #include "i40e_prototype.h"
8 
9 static void i40e_resume_aq(struct i40e_hw *hw);
10 
11 /**
12  *  i40e_adminq_init_regs - Initialize AdminQ registers
13  *  @hw: pointer to the hardware structure
14  *
15  *  This assumes the alloc_asq and alloc_arq functions have already been called
16  **/
17 static void i40e_adminq_init_regs(struct i40e_hw *hw)
18 {
19 	/* set head and tail registers in our local struct */
20 	hw->aq.asq.tail = I40E_PF_ATQT;
21 	hw->aq.asq.head = I40E_PF_ATQH;
22 	hw->aq.asq.len  = I40E_PF_ATQLEN;
23 	hw->aq.asq.bal  = I40E_PF_ATQBAL;
24 	hw->aq.asq.bah  = I40E_PF_ATQBAH;
25 	hw->aq.arq.tail = I40E_PF_ARQT;
26 	hw->aq.arq.head = I40E_PF_ARQH;
27 	hw->aq.arq.len  = I40E_PF_ARQLEN;
28 	hw->aq.arq.bal  = I40E_PF_ARQBAL;
29 	hw->aq.arq.bah  = I40E_PF_ARQBAH;
30 }
31 
32 /**
33  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
34  *  @hw: pointer to the hardware structure
35  **/
36 static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
37 {
38 	int ret_code;
39 
40 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
41 					 (hw->aq.num_asq_entries *
42 					 sizeof(struct i40e_aq_desc)),
43 					 I40E_ADMINQ_DESC_ALIGNMENT);
44 	if (ret_code)
45 		return ret_code;
46 
47 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
48 					  (hw->aq.num_asq_entries *
49 					  sizeof(struct i40e_asq_cmd_details)));
50 	if (ret_code) {
51 		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
52 		return ret_code;
53 	}
54 
55 	return ret_code;
56 }
57 
58 /**
59  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
60  *  @hw: pointer to the hardware structure
61  **/
62 static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
63 {
64 	int ret_code;
65 
66 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
67 					 (hw->aq.num_arq_entries *
68 					 sizeof(struct i40e_aq_desc)),
69 					 I40E_ADMINQ_DESC_ALIGNMENT);
70 
71 	return ret_code;
72 }
73 
74 /**
75  *  i40e_free_adminq_asq - Free Admin Queue send rings
76  *  @hw: pointer to the hardware structure
77  *
78  *  This assumes the posted send buffers have already been cleaned
79  *  and de-allocated
80  **/
81 static void i40e_free_adminq_asq(struct i40e_hw *hw)
82 {
83 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
84 }
85 
86 /**
87  *  i40e_free_adminq_arq - Free Admin Queue receive rings
88  *  @hw: pointer to the hardware structure
89  *
90  *  This assumes the posted receive buffers have already been cleaned
91  *  and de-allocated
92  **/
93 static void i40e_free_adminq_arq(struct i40e_hw *hw)
94 {
95 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
96 }
97 
98 /**
99  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
100  *  @hw: pointer to the hardware structure
101  **/
102 static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
103 {
104 	struct i40e_aq_desc *desc;
105 	struct i40e_dma_mem *bi;
106 	int ret_code;
107 	int i;
108 
109 	/* We'll be allocating the buffer info memory first, then we can
110 	 * allocate the mapped buffers for the event processing
111 	 */
112 
113 	/* buffer_info structures do not need alignment */
114 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
115 		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
116 	if (ret_code)
117 		goto alloc_arq_bufs;
118 	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
119 
120 	/* allocate the mapped buffers */
121 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
122 		bi = &hw->aq.arq.r.arq_bi[i];
123 		ret_code = i40e_allocate_dma_mem(hw, bi,
124 						 hw->aq.arq_buf_size,
125 						 I40E_ADMINQ_DESC_ALIGNMENT);
126 		if (ret_code)
127 			goto unwind_alloc_arq_bufs;
128 
129 		/* now configure the descriptors for use */
130 		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
131 
132 		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
133 		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
134 			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
135 		desc->opcode = 0;
136 		/* This is in accordance with Admin queue design, there is no
137 		 * register for buffer size configuration
138 		 */
139 		desc->datalen = cpu_to_le16((u16)bi->size);
140 		desc->retval = 0;
141 		desc->cookie_high = 0;
142 		desc->cookie_low = 0;
143 		desc->params.external.addr_high =
144 			cpu_to_le32(upper_32_bits(bi->pa));
145 		desc->params.external.addr_low =
146 			cpu_to_le32(lower_32_bits(bi->pa));
147 		desc->params.external.param0 = 0;
148 		desc->params.external.param1 = 0;
149 	}
150 
151 alloc_arq_bufs:
152 	return ret_code;
153 
154 unwind_alloc_arq_bufs:
155 	/* don't try to free the one that failed... */
156 	i--;
157 	for (; i >= 0; i--)
158 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
159 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
160 
161 	return ret_code;
162 }
163 
164 /**
165  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
166  *  @hw: pointer to the hardware structure
167  **/
168 static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
169 {
170 	struct i40e_dma_mem *bi;
171 	int ret_code;
172 	int i;
173 
174 	/* No mapped memory needed yet, just the buffer info structures */
175 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
176 		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
177 	if (ret_code)
178 		goto alloc_asq_bufs;
179 	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
180 
181 	/* allocate the mapped buffers */
182 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
183 		bi = &hw->aq.asq.r.asq_bi[i];
184 		ret_code = i40e_allocate_dma_mem(hw, bi,
185 						 hw->aq.asq_buf_size,
186 						 I40E_ADMINQ_DESC_ALIGNMENT);
187 		if (ret_code)
188 			goto unwind_alloc_asq_bufs;
189 	}
190 alloc_asq_bufs:
191 	return ret_code;
192 
193 unwind_alloc_asq_bufs:
194 	/* don't try to free the one that failed... */
195 	i--;
196 	for (; i >= 0; i--)
197 		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
198 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
199 
200 	return ret_code;
201 }
202 
203 /**
204  *  i40e_free_arq_bufs - Free receive queue buffer info elements
205  *  @hw: pointer to the hardware structure
206  **/
207 static void i40e_free_arq_bufs(struct i40e_hw *hw)
208 {
209 	int i;
210 
211 	/* free descriptors */
212 	for (i = 0; i < hw->aq.num_arq_entries; i++)
213 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
214 
215 	/* free the descriptor memory */
216 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
217 
218 	/* free the dma header */
219 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
220 }
221 
222 /**
223  *  i40e_free_asq_bufs - Free send queue buffer info elements
224  *  @hw: pointer to the hardware structure
225  **/
226 static void i40e_free_asq_bufs(struct i40e_hw *hw)
227 {
228 	int i;
229 
230 	/* only unmap if the address is non-NULL */
231 	for (i = 0; i < hw->aq.num_asq_entries; i++)
232 		if (hw->aq.asq.r.asq_bi[i].pa)
233 			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
234 
235 	/* free the buffer info list */
236 	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
237 
238 	/* free the descriptor memory */
239 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
240 
241 	/* free the dma header */
242 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
243 }
244 
245 /**
246  *  i40e_config_asq_regs - configure ASQ registers
247  *  @hw: pointer to the hardware structure
248  *
249  *  Configure base address and length registers for the transmit queue
250  **/
251 static int i40e_config_asq_regs(struct i40e_hw *hw)
252 {
253 	int ret_code = 0;
254 	u32 reg = 0;
255 
256 	/* Clear Head and Tail */
257 	wr32(hw, hw->aq.asq.head, 0);
258 	wr32(hw, hw->aq.asq.tail, 0);
259 
260 	/* set starting point */
261 	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
262 				  I40E_PF_ATQLEN_ATQENABLE_MASK));
263 	wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
264 	wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
265 
266 	/* Check one register to verify that config was applied */
267 	reg = rd32(hw, hw->aq.asq.bal);
268 	if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
269 		ret_code = -EIO;
270 
271 	return ret_code;
272 }
273 
274 /**
275  *  i40e_config_arq_regs - ARQ register configuration
276  *  @hw: pointer to the hardware structure
277  *
278  * Configure base address and length registers for the receive (event queue)
279  **/
280 static int i40e_config_arq_regs(struct i40e_hw *hw)
281 {
282 	int ret_code = 0;
283 	u32 reg = 0;
284 
285 	/* Clear Head and Tail */
286 	wr32(hw, hw->aq.arq.head, 0);
287 	wr32(hw, hw->aq.arq.tail, 0);
288 
289 	/* set starting point */
290 	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
291 				  I40E_PF_ARQLEN_ARQENABLE_MASK));
292 	wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
293 	wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
294 
295 	/* Update tail in the HW to post pre-allocated buffers */
296 	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
297 
298 	/* Check one register to verify that config was applied */
299 	reg = rd32(hw, hw->aq.arq.bal);
300 	if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
301 		ret_code = -EIO;
302 
303 	return ret_code;
304 }
305 
306 /**
307  *  i40e_init_asq - main initialization routine for ASQ
308  *  @hw: pointer to the hardware structure
309  *
310  *  This is the main initialization routine for the Admin Send Queue
311  *  Prior to calling this function, drivers *MUST* set the following fields
312  *  in the hw->aq structure:
313  *     - hw->aq.num_asq_entries
314  *     - hw->aq.arq_buf_size
315  *
316  *  Do *NOT* hold the lock when calling this as the memory allocation routines
317  *  called are not going to be atomic context safe
318  **/
319 static int i40e_init_asq(struct i40e_hw *hw)
320 {
321 	int ret_code = 0;
322 
323 	if (hw->aq.asq.count > 0) {
324 		/* queue already initialized */
325 		ret_code = -EBUSY;
326 		goto init_adminq_exit;
327 	}
328 
329 	/* verify input for valid configuration */
330 	if ((hw->aq.num_asq_entries == 0) ||
331 	    (hw->aq.asq_buf_size == 0)) {
332 		ret_code = -EIO;
333 		goto init_adminq_exit;
334 	}
335 
336 	hw->aq.asq.next_to_use = 0;
337 	hw->aq.asq.next_to_clean = 0;
338 
339 	/* allocate the ring memory */
340 	ret_code = i40e_alloc_adminq_asq_ring(hw);
341 	if (ret_code)
342 		goto init_adminq_exit;
343 
344 	/* allocate buffers in the rings */
345 	ret_code = i40e_alloc_asq_bufs(hw);
346 	if (ret_code)
347 		goto init_adminq_free_rings;
348 
349 	/* initialize base registers */
350 	ret_code = i40e_config_asq_regs(hw);
351 	if (ret_code)
352 		goto init_adminq_free_rings;
353 
354 	/* success! */
355 	hw->aq.asq.count = hw->aq.num_asq_entries;
356 	goto init_adminq_exit;
357 
358 init_adminq_free_rings:
359 	i40e_free_adminq_asq(hw);
360 
361 init_adminq_exit:
362 	return ret_code;
363 }
364 
365 /**
366  *  i40e_init_arq - initialize ARQ
367  *  @hw: pointer to the hardware structure
368  *
369  *  The main initialization routine for the Admin Receive (Event) Queue.
370  *  Prior to calling this function, drivers *MUST* set the following fields
371  *  in the hw->aq structure:
372  *     - hw->aq.num_asq_entries
373  *     - hw->aq.arq_buf_size
374  *
375  *  Do *NOT* hold the lock when calling this as the memory allocation routines
376  *  called are not going to be atomic context safe
377  **/
378 static int i40e_init_arq(struct i40e_hw *hw)
379 {
380 	int ret_code = 0;
381 
382 	if (hw->aq.arq.count > 0) {
383 		/* queue already initialized */
384 		ret_code = -EBUSY;
385 		goto init_adminq_exit;
386 	}
387 
388 	/* verify input for valid configuration */
389 	if ((hw->aq.num_arq_entries == 0) ||
390 	    (hw->aq.arq_buf_size == 0)) {
391 		ret_code = -EIO;
392 		goto init_adminq_exit;
393 	}
394 
395 	hw->aq.arq.next_to_use = 0;
396 	hw->aq.arq.next_to_clean = 0;
397 
398 	/* allocate the ring memory */
399 	ret_code = i40e_alloc_adminq_arq_ring(hw);
400 	if (ret_code)
401 		goto init_adminq_exit;
402 
403 	/* allocate buffers in the rings */
404 	ret_code = i40e_alloc_arq_bufs(hw);
405 	if (ret_code)
406 		goto init_adminq_free_rings;
407 
408 	/* initialize base registers */
409 	ret_code = i40e_config_arq_regs(hw);
410 	if (ret_code)
411 		goto init_adminq_free_rings;
412 
413 	/* success! */
414 	hw->aq.arq.count = hw->aq.num_arq_entries;
415 	goto init_adminq_exit;
416 
417 init_adminq_free_rings:
418 	i40e_free_adminq_arq(hw);
419 
420 init_adminq_exit:
421 	return ret_code;
422 }
423 
424 /**
425  *  i40e_shutdown_asq - shutdown the ASQ
426  *  @hw: pointer to the hardware structure
427  *
428  *  The main shutdown routine for the Admin Send Queue
429  **/
430 static int i40e_shutdown_asq(struct i40e_hw *hw)
431 {
432 	int ret_code = 0;
433 
434 	mutex_lock(&hw->aq.asq_mutex);
435 
436 	if (hw->aq.asq.count == 0) {
437 		ret_code = -EBUSY;
438 		goto shutdown_asq_out;
439 	}
440 
441 	/* Stop firmware AdminQ processing */
442 	wr32(hw, hw->aq.asq.head, 0);
443 	wr32(hw, hw->aq.asq.tail, 0);
444 	wr32(hw, hw->aq.asq.len, 0);
445 	wr32(hw, hw->aq.asq.bal, 0);
446 	wr32(hw, hw->aq.asq.bah, 0);
447 
448 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
449 
450 	/* free ring buffers */
451 	i40e_free_asq_bufs(hw);
452 
453 shutdown_asq_out:
454 	mutex_unlock(&hw->aq.asq_mutex);
455 	return ret_code;
456 }
457 
458 /**
459  *  i40e_shutdown_arq - shutdown ARQ
460  *  @hw: pointer to the hardware structure
461  *
462  *  The main shutdown routine for the Admin Receive Queue
463  **/
464 static int i40e_shutdown_arq(struct i40e_hw *hw)
465 {
466 	int ret_code = 0;
467 
468 	mutex_lock(&hw->aq.arq_mutex);
469 
470 	if (hw->aq.arq.count == 0) {
471 		ret_code = -EBUSY;
472 		goto shutdown_arq_out;
473 	}
474 
475 	/* Stop firmware AdminQ processing */
476 	wr32(hw, hw->aq.arq.head, 0);
477 	wr32(hw, hw->aq.arq.tail, 0);
478 	wr32(hw, hw->aq.arq.len, 0);
479 	wr32(hw, hw->aq.arq.bal, 0);
480 	wr32(hw, hw->aq.arq.bah, 0);
481 
482 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
483 
484 	/* free ring buffers */
485 	i40e_free_arq_bufs(hw);
486 
487 shutdown_arq_out:
488 	mutex_unlock(&hw->aq.arq_mutex);
489 	return ret_code;
490 }
491 
492 /**
493  *  i40e_set_hw_caps - set HW flags
494  *  @hw: pointer to the hardware structure
495  **/
496 static void i40e_set_hw_caps(struct i40e_hw *hw)
497 {
498 	bitmap_zero(hw->caps, I40E_HW_CAPS_NBITS);
499 
500 	switch (hw->mac.type) {
501 	case I40E_MAC_XL710:
502 		if (i40e_is_aq_api_ver_ge(hw, 1,
503 					  I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
504 			set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
505 			set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
506 			/* The ability to RX (not drop) 802.1ad frames */
507 			set_bit(I40E_HW_CAP_802_1AD, hw->caps);
508 		}
509 		if (i40e_is_aq_api_ver_ge(hw, 1, 5)) {
510 			/* Supported in FW API version higher than 1.4 */
511 			set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
512 		}
513 		if (i40e_is_fw_ver_lt(hw, 4, 33)) {
514 			set_bit(I40E_HW_CAP_RESTART_AUTONEG, hw->caps);
515 			/* No DCB support  for FW < v4.33 */
516 			set_bit(I40E_HW_CAP_NO_DCB_SUPPORT, hw->caps);
517 		}
518 		if (i40e_is_fw_ver_lt(hw, 4, 3)) {
519 			/* Disable FW LLDP if FW < v4.3 */
520 			set_bit(I40E_HW_CAP_STOP_FW_LLDP, hw->caps);
521 		}
522 		if (i40e_is_fw_ver_ge(hw, 4, 40)) {
523 			/* Use the FW Set LLDP MIB API if FW >= v4.40 */
524 			set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
525 		}
526 		if (i40e_is_fw_ver_ge(hw, 6, 0)) {
527 			/* Enable PTP L4 if FW > v6.0 */
528 			set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
529 		}
530 		break;
531 	case I40E_MAC_X722:
532 		set_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps);
533 		set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
534 		set_bit(I40E_HW_CAP_RSS_AQ, hw->caps);
535 		set_bit(I40E_HW_CAP_128_QP_RSS, hw->caps);
536 		set_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
537 		set_bit(I40E_HW_CAP_WB_ON_ITR, hw->caps);
538 		set_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, hw->caps);
539 		set_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, hw->caps);
540 		set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
541 		set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
542 		set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
543 		set_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, hw->caps);
544 		set_bit(I40E_HW_CAP_OUTER_UDP_CSUM, hw->caps);
545 
546 		if (rd32(hw, I40E_GLQF_FDEVICTENA(1)) !=
547 		    I40E_FDEVICT_PCTYPE_DEFAULT) {
548 			hw_warn(hw, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
549 			clear_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
550 		}
551 
552 		if (i40e_is_aq_api_ver_ge(hw, 1,
553 					  I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
554 			set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
555 
556 		if (i40e_is_aq_api_ver_ge(hw, 1,
557 					  I40E_MINOR_VER_GET_LINK_INFO_X722))
558 			set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
559 
560 		if (i40e_is_aq_api_ver_ge(hw, 1,
561 					  I40E_MINOR_VER_FW_REQUEST_FEC_X722))
562 			set_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps);
563 
564 		fallthrough;
565 	default:
566 		break;
567 	}
568 
569 	/* Newer versions of firmware require lock when reading the NVM */
570 	if (i40e_is_aq_api_ver_ge(hw, 1, 5))
571 		set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
572 
573 	/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
574 	if (i40e_is_aq_api_ver_ge(hw, 1, 7))
575 		set_bit(I40E_HW_CAP_802_1AD, hw->caps);
576 
577 	if (i40e_is_aq_api_ver_ge(hw, 1, 8))
578 		set_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps);
579 
580 	if (i40e_is_aq_api_ver_ge(hw, 1, 9))
581 		set_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps);
582 }
583 
584 /**
585  *  i40e_init_adminq - main initialization routine for Admin Queue
586  *  @hw: pointer to the hardware structure
587  *
588  *  Prior to calling this function, drivers *MUST* set the following fields
589  *  in the hw->aq structure:
590  *     - hw->aq.num_asq_entries
591  *     - hw->aq.num_arq_entries
592  *     - hw->aq.arq_buf_size
593  *     - hw->aq.asq_buf_size
594  **/
595 int i40e_init_adminq(struct i40e_hw *hw)
596 {
597 	u16 cfg_ptr, oem_hi, oem_lo;
598 	u16 eetrack_lo, eetrack_hi;
599 	int retry = 0;
600 	int ret_code;
601 
602 	/* verify input for valid configuration */
603 	if ((hw->aq.num_arq_entries == 0) ||
604 	    (hw->aq.num_asq_entries == 0) ||
605 	    (hw->aq.arq_buf_size == 0) ||
606 	    (hw->aq.asq_buf_size == 0)) {
607 		ret_code = -EIO;
608 		goto init_adminq_exit;
609 	}
610 
611 	/* Set up register offsets */
612 	i40e_adminq_init_regs(hw);
613 
614 	/* setup ASQ command write back timeout */
615 	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
616 
617 	/* allocate the ASQ */
618 	ret_code = i40e_init_asq(hw);
619 	if (ret_code)
620 		goto init_adminq_destroy_locks;
621 
622 	/* allocate the ARQ */
623 	ret_code = i40e_init_arq(hw);
624 	if (ret_code)
625 		goto init_adminq_free_asq;
626 
627 	/* There are some cases where the firmware may not be quite ready
628 	 * for AdminQ operations, so we retry the AdminQ setup a few times
629 	 * if we see timeouts in this first AQ call.
630 	 */
631 	do {
632 		ret_code = i40e_aq_get_firmware_version(hw,
633 							&hw->aq.fw_maj_ver,
634 							&hw->aq.fw_min_ver,
635 							&hw->aq.fw_build,
636 							&hw->aq.api_maj_ver,
637 							&hw->aq.api_min_ver,
638 							NULL);
639 		if (ret_code != -EIO)
640 			break;
641 		retry++;
642 		msleep(100);
643 		i40e_resume_aq(hw);
644 	} while (retry < 10);
645 	if (ret_code != 0)
646 		goto init_adminq_free_arq;
647 
648 	/* Some features were introduced in different FW API version
649 	 * for different MAC type.
650 	 */
651 	i40e_set_hw_caps(hw);
652 
653 	/* get the NVM version info */
654 	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
655 			   &hw->nvm.version);
656 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
657 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
658 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
659 	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
660 	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
661 			   &oem_hi);
662 	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
663 			   &oem_lo);
664 	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
665 
666 	if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR + 1, 0)) {
667 		ret_code = -EIO;
668 		goto init_adminq_free_arq;
669 	}
670 
671 	/* pre-emptive resource lock release */
672 	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
673 	hw->nvm_release_on_done = false;
674 	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
675 
676 	ret_code = 0;
677 
678 	/* success! */
679 	goto init_adminq_exit;
680 
681 init_adminq_free_arq:
682 	i40e_shutdown_arq(hw);
683 init_adminq_free_asq:
684 	i40e_shutdown_asq(hw);
685 init_adminq_destroy_locks:
686 
687 init_adminq_exit:
688 	return ret_code;
689 }
690 
691 /**
692  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
693  *  @hw: pointer to the hardware structure
694  **/
695 void i40e_shutdown_adminq(struct i40e_hw *hw)
696 {
697 	if (i40e_check_asq_alive(hw))
698 		i40e_aq_queue_shutdown(hw, true);
699 
700 	i40e_shutdown_asq(hw);
701 	i40e_shutdown_arq(hw);
702 
703 	if (hw->nvm_buff.va)
704 		i40e_free_virt_mem(hw, &hw->nvm_buff);
705 }
706 
707 /**
708  *  i40e_clean_asq - cleans Admin send queue
709  *  @hw: pointer to the hardware structure
710  *
711  *  returns the number of free desc
712  **/
713 static u16 i40e_clean_asq(struct i40e_hw *hw)
714 {
715 	struct i40e_adminq_ring *asq = &(hw->aq.asq);
716 	struct i40e_asq_cmd_details *details;
717 	u16 ntc = asq->next_to_clean;
718 	struct i40e_aq_desc desc_cb;
719 	struct i40e_aq_desc *desc;
720 
721 	desc = I40E_ADMINQ_DESC(*asq, ntc);
722 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
723 	while (rd32(hw, hw->aq.asq.head) != ntc) {
724 		i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
725 			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
726 
727 		if (details->callback) {
728 			I40E_ADMINQ_CALLBACK cb_func =
729 					(I40E_ADMINQ_CALLBACK)details->callback;
730 			desc_cb = *desc;
731 			cb_func(hw, &desc_cb);
732 		}
733 		memset(desc, 0, sizeof(*desc));
734 		memset(details, 0, sizeof(*details));
735 		ntc++;
736 		if (ntc == asq->count)
737 			ntc = 0;
738 		desc = I40E_ADMINQ_DESC(*asq, ntc);
739 		details = I40E_ADMINQ_DETAILS(*asq, ntc);
740 	}
741 
742 	asq->next_to_clean = ntc;
743 
744 	return I40E_DESC_UNUSED(asq);
745 }
746 
747 /**
748  *  i40e_asq_done - check if FW has processed the Admin Send Queue
749  *  @hw: pointer to the hw struct
750  *
751  *  Returns true if the firmware has processed all descriptors on the
752  *  admin send queue. Returns false if there are still requests pending.
753  **/
754 static bool i40e_asq_done(struct i40e_hw *hw)
755 {
756 	/* AQ designers suggest use of head for better
757 	 * timing reliability than DD bit
758 	 */
759 	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
760 
761 }
762 
763 /**
764  *  i40e_asq_send_command_atomic_exec - send command to Admin Queue
765  *  @hw: pointer to the hw struct
766  *  @desc: prefilled descriptor describing the command (non DMA mem)
767  *  @buff: buffer to use for indirect commands
768  *  @buff_size: size of buffer for indirect commands
769  *  @cmd_details: pointer to command details structure
770  *  @is_atomic_context: is the function called in an atomic context?
771  *
772  *  This is the main send command driver routine for the Admin Queue send
773  *  queue.  It runs the queue, cleans the queue, etc
774  **/
775 static int
776 i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
777 				  struct i40e_aq_desc *desc,
778 				  void *buff, /* can be NULL */
779 				  u16  buff_size,
780 				  struct i40e_asq_cmd_details *cmd_details,
781 				  bool is_atomic_context)
782 {
783 	struct i40e_dma_mem *dma_buff = NULL;
784 	struct i40e_asq_cmd_details *details;
785 	struct i40e_aq_desc *desc_on_ring;
786 	bool cmd_completed = false;
787 	u16  retval = 0;
788 	int status = 0;
789 	u32  val = 0;
790 
791 	if (hw->aq.asq.count == 0) {
792 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
793 			   "AQTX: Admin queue not initialized.\n");
794 		status = -EIO;
795 		goto asq_send_command_error;
796 	}
797 
798 	hw->aq.asq_last_status = I40E_AQ_RC_OK;
799 
800 	val = rd32(hw, hw->aq.asq.head);
801 	if (val >= hw->aq.num_asq_entries) {
802 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
803 			   "AQTX: head overrun at %d\n", val);
804 		status = -ENOSPC;
805 		goto asq_send_command_error;
806 	}
807 
808 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
809 	if (cmd_details) {
810 		*details = *cmd_details;
811 
812 		/* If the cmd_details are defined copy the cookie.  The
813 		 * cpu_to_le32 is not needed here because the data is ignored
814 		 * by the FW, only used by the driver
815 		 */
816 		if (details->cookie) {
817 			desc->cookie_high =
818 				cpu_to_le32(upper_32_bits(details->cookie));
819 			desc->cookie_low =
820 				cpu_to_le32(lower_32_bits(details->cookie));
821 		}
822 	} else {
823 		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
824 	}
825 
826 	/* clear requested flags and then set additional flags if defined */
827 	desc->flags &= ~cpu_to_le16(details->flags_dis);
828 	desc->flags |= cpu_to_le16(details->flags_ena);
829 
830 	if (buff_size > hw->aq.asq_buf_size) {
831 		i40e_debug(hw,
832 			   I40E_DEBUG_AQ_MESSAGE,
833 			   "AQTX: Invalid buffer size: %d.\n",
834 			   buff_size);
835 		status = -EINVAL;
836 		goto asq_send_command_error;
837 	}
838 
839 	if (details->postpone && !details->async) {
840 		i40e_debug(hw,
841 			   I40E_DEBUG_AQ_MESSAGE,
842 			   "AQTX: Async flag not set along with postpone flag");
843 		status = -EINVAL;
844 		goto asq_send_command_error;
845 	}
846 
847 	/* call clean and check queue available function to reclaim the
848 	 * descriptors that were processed by FW, the function returns the
849 	 * number of desc available
850 	 */
851 	/* the clean function called here could be called in a separate thread
852 	 * in case of asynchronous completions
853 	 */
854 	if (i40e_clean_asq(hw) == 0) {
855 		i40e_debug(hw,
856 			   I40E_DEBUG_AQ_MESSAGE,
857 			   "AQTX: Error queue is full.\n");
858 		status = -ENOSPC;
859 		goto asq_send_command_error;
860 	}
861 
862 	/* initialize the temp desc pointer with the right desc */
863 	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
864 
865 	/* if the desc is available copy the temp desc to the right place */
866 	*desc_on_ring = *desc;
867 
868 	/* if buff is not NULL assume indirect command */
869 	if (buff != NULL) {
870 		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
871 		/* copy the user buff into the respective DMA buff */
872 		memcpy(dma_buff->va, buff, buff_size);
873 		desc_on_ring->datalen = cpu_to_le16(buff_size);
874 
875 		/* Update the address values in the desc with the pa value
876 		 * for respective buffer
877 		 */
878 		desc_on_ring->params.external.addr_high =
879 				cpu_to_le32(upper_32_bits(dma_buff->pa));
880 		desc_on_ring->params.external.addr_low =
881 				cpu_to_le32(lower_32_bits(dma_buff->pa));
882 	}
883 
884 	/* bump the tail */
885 	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
886 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
887 		      buff, buff_size);
888 	(hw->aq.asq.next_to_use)++;
889 	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
890 		hw->aq.asq.next_to_use = 0;
891 	if (!details->postpone)
892 		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
893 
894 	/* if cmd_details are not defined or async flag is not set,
895 	 * we need to wait for desc write back
896 	 */
897 	if (!details->async && !details->postpone) {
898 		u32 total_delay = 0;
899 
900 		do {
901 			/* AQ designers suggest use of head for better
902 			 * timing reliability than DD bit
903 			 */
904 			if (i40e_asq_done(hw))
905 				break;
906 
907 			if (is_atomic_context)
908 				udelay(50);
909 			else
910 				usleep_range(40, 60);
911 
912 			total_delay += 50;
913 		} while (total_delay < hw->aq.asq_cmd_timeout);
914 	}
915 
916 	/* if ready, copy the desc back to temp */
917 	if (i40e_asq_done(hw)) {
918 		*desc = *desc_on_ring;
919 		if (buff != NULL)
920 			memcpy(buff, dma_buff->va, buff_size);
921 		retval = le16_to_cpu(desc->retval);
922 		if (retval != 0) {
923 			i40e_debug(hw,
924 				   I40E_DEBUG_AQ_MESSAGE,
925 				   "AQTX: Command completed with error 0x%X.\n",
926 				   retval);
927 
928 			/* strip off FW internal code */
929 			retval &= 0xff;
930 		}
931 		cmd_completed = true;
932 		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
933 			status = 0;
934 		else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
935 			status = -EBUSY;
936 		else
937 			status = -EIO;
938 		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
939 	}
940 
941 	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
942 		   "AQTX: desc and buffer writeback:\n");
943 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
944 
945 	/* save writeback aq if requested */
946 	if (details->wb_desc)
947 		*details->wb_desc = *desc_on_ring;
948 
949 	/* update the error if time out occurred */
950 	if ((!cmd_completed) &&
951 	    (!details->async && !details->postpone)) {
952 		if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
953 			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
954 				   "AQTX: AQ Critical error.\n");
955 			status = -EIO;
956 		} else {
957 			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
958 				   "AQTX: Writeback timeout.\n");
959 			status = -EIO;
960 		}
961 	}
962 
963 asq_send_command_error:
964 	return status;
965 }
966 
967 /**
968  *  i40e_asq_send_command_atomic - send command to Admin Queue
969  *  @hw: pointer to the hw struct
970  *  @desc: prefilled descriptor describing the command (non DMA mem)
971  *  @buff: buffer to use for indirect commands
972  *  @buff_size: size of buffer for indirect commands
973  *  @cmd_details: pointer to command details structure
974  *  @is_atomic_context: is the function called in an atomic context?
975  *
976  *  Acquires the lock and calls the main send command execution
977  *  routine.
978  **/
979 int
980 i40e_asq_send_command_atomic(struct i40e_hw *hw,
981 			     struct i40e_aq_desc *desc,
982 			     void *buff, /* can be NULL */
983 			     u16  buff_size,
984 			     struct i40e_asq_cmd_details *cmd_details,
985 			     bool is_atomic_context)
986 {
987 	int status;
988 
989 	mutex_lock(&hw->aq.asq_mutex);
990 	status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
991 						   cmd_details,
992 						   is_atomic_context);
993 
994 	mutex_unlock(&hw->aq.asq_mutex);
995 	return status;
996 }
997 
998 int
999 i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
1000 		      void *buff, /* can be NULL */ u16  buff_size,
1001 		      struct i40e_asq_cmd_details *cmd_details)
1002 {
1003 	return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
1004 					    cmd_details, false);
1005 }
1006 
1007 /**
1008  *  i40e_asq_send_command_atomic_v2 - send command to Admin Queue
1009  *  @hw: pointer to the hw struct
1010  *  @desc: prefilled descriptor describing the command (non DMA mem)
1011  *  @buff: buffer to use for indirect commands
1012  *  @buff_size: size of buffer for indirect commands
1013  *  @cmd_details: pointer to command details structure
1014  *  @is_atomic_context: is the function called in an atomic context?
1015  *  @aq_status: pointer to Admin Queue status return value
1016  *
1017  *  Acquires the lock and calls the main send command execution
1018  *  routine. Returns the last Admin Queue status in aq_status
1019  *  to avoid race conditions in access to hw->aq.asq_last_status.
1020  **/
1021 int
1022 i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
1023 				struct i40e_aq_desc *desc,
1024 				void *buff, /* can be NULL */
1025 				u16  buff_size,
1026 				struct i40e_asq_cmd_details *cmd_details,
1027 				bool is_atomic_context,
1028 				enum i40e_admin_queue_err *aq_status)
1029 {
1030 	int status;
1031 
1032 	mutex_lock(&hw->aq.asq_mutex);
1033 	status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
1034 						   buff_size,
1035 						   cmd_details,
1036 						   is_atomic_context);
1037 	if (aq_status)
1038 		*aq_status = hw->aq.asq_last_status;
1039 	mutex_unlock(&hw->aq.asq_mutex);
1040 	return status;
1041 }
1042 
1043 int
1044 i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
1045 			 void *buff, /* can be NULL */ u16  buff_size,
1046 			 struct i40e_asq_cmd_details *cmd_details,
1047 			 enum i40e_admin_queue_err *aq_status)
1048 {
1049 	return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
1050 					       cmd_details, true, aq_status);
1051 }
1052 
1053 /**
1054  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1055  *  @desc:     pointer to the temp descriptor (non DMA mem)
1056  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1057  *
1058  *  Fill the desc with default values
1059  **/
1060 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1061 				       u16 opcode)
1062 {
1063 	/* zero out the desc */
1064 	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1065 	desc->opcode = cpu_to_le16(opcode);
1066 	desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
1067 }
1068 
1069 /**
1070  *  i40e_clean_arq_element
1071  *  @hw: pointer to the hw struct
1072  *  @e: event info from the receive descriptor, includes any buffers
1073  *  @pending: number of events that could be left to process
1074  *
1075  *  This function cleans one Admin Receive Queue element and returns
1076  *  the contents through e.  It can also return how many events are
1077  *  left to process through 'pending'
1078  **/
1079 int i40e_clean_arq_element(struct i40e_hw *hw,
1080 			   struct i40e_arq_event_info *e,
1081 			   u16 *pending)
1082 {
1083 	u16 ntc = hw->aq.arq.next_to_clean;
1084 	struct i40e_aq_desc *desc;
1085 	struct i40e_dma_mem *bi;
1086 	int ret_code = 0;
1087 	u16 desc_idx;
1088 	u16 datalen;
1089 	u16 flags;
1090 	u16 ntu;
1091 
1092 	/* pre-clean the event info */
1093 	memset(&e->desc, 0, sizeof(e->desc));
1094 
1095 	/* take the lock before we start messing with the ring */
1096 	mutex_lock(&hw->aq.arq_mutex);
1097 
1098 	if (hw->aq.arq.count == 0) {
1099 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1100 			   "AQRX: Admin queue not initialized.\n");
1101 		ret_code = -EIO;
1102 		goto clean_arq_element_err;
1103 	}
1104 
1105 	/* set next_to_use to head */
1106 	ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1107 	if (ntu == ntc) {
1108 		/* nothing to do - shouldn't need to update ring's values */
1109 		ret_code = -EALREADY;
1110 		goto clean_arq_element_out;
1111 	}
1112 
1113 	/* now clean the next descriptor */
1114 	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1115 	desc_idx = ntc;
1116 
1117 	hw->aq.arq_last_status =
1118 		(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
1119 	flags = le16_to_cpu(desc->flags);
1120 	if (flags & I40E_AQ_FLAG_ERR) {
1121 		ret_code = -EIO;
1122 		i40e_debug(hw,
1123 			   I40E_DEBUG_AQ_MESSAGE,
1124 			   "AQRX: Event received with error 0x%X.\n",
1125 			   hw->aq.arq_last_status);
1126 	}
1127 
1128 	e->desc = *desc;
1129 	datalen = le16_to_cpu(desc->datalen);
1130 	e->msg_len = min(datalen, e->buf_len);
1131 	if (e->msg_buf != NULL && (e->msg_len != 0))
1132 		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
1133 		       e->msg_len);
1134 
1135 	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1136 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1137 		      hw->aq.arq_buf_size);
1138 
1139 	/* Restore the original datalen and buffer address in the desc,
1140 	 * FW updates datalen to indicate the event message
1141 	 * size
1142 	 */
1143 	bi = &hw->aq.arq.r.arq_bi[ntc];
1144 	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1145 
1146 	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
1147 	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1148 		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
1149 	desc->datalen = cpu_to_le16((u16)bi->size);
1150 	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1151 	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1152 
1153 	/* set tail = the last cleaned desc index. */
1154 	wr32(hw, hw->aq.arq.tail, ntc);
1155 	/* ntc is updated to tail + 1 */
1156 	ntc++;
1157 	if (ntc == hw->aq.num_arq_entries)
1158 		ntc = 0;
1159 	hw->aq.arq.next_to_clean = ntc;
1160 	hw->aq.arq.next_to_use = ntu;
1161 
1162 	i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
1163 clean_arq_element_out:
1164 	/* Set pending if needed, unlock and return */
1165 	if (pending)
1166 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1167 clean_arq_element_err:
1168 	mutex_unlock(&hw->aq.arq_mutex);
1169 
1170 	return ret_code;
1171 }
1172 
1173 static void i40e_resume_aq(struct i40e_hw *hw)
1174 {
1175 	/* Registers are reset after PF reset */
1176 	hw->aq.asq.next_to_use = 0;
1177 	hw->aq.asq.next_to_clean = 0;
1178 
1179 	i40e_config_asq_regs(hw);
1180 
1181 	hw->aq.arq.next_to_use = 0;
1182 	hw->aq.arq.next_to_clean = 0;
1183 
1184 	i40e_config_arq_regs(hw);
1185 }
1186