1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34 #include "i40e_status.h"
35 #include "i40e_type.h"
36 #include "i40e_register.h"
37 #include "i40e_adminq.h"
38 #include "i40e_prototype.h"
39
40 /**
41 * i40e_adminq_init_regs - Initialize AdminQ registers
42 * @hw: pointer to the hardware structure
43 *
44 * This assumes the alloc_asq and alloc_arq functions have already been called
45 **/
i40e_adminq_init_regs(struct i40e_hw * hw)46 static void i40e_adminq_init_regs(struct i40e_hw *hw)
47 {
48 /* set head and tail registers in our local struct */
49 if (i40e_is_vf(hw)) {
50 hw->aq.asq.tail = I40E_VF_ATQT1;
51 hw->aq.asq.head = I40E_VF_ATQH1;
52 hw->aq.asq.len = I40E_VF_ATQLEN1;
53 hw->aq.asq.bal = I40E_VF_ATQBAL1;
54 hw->aq.asq.bah = I40E_VF_ATQBAH1;
55 hw->aq.arq.tail = I40E_VF_ARQT1;
56 hw->aq.arq.head = I40E_VF_ARQH1;
57 hw->aq.arq.len = I40E_VF_ARQLEN1;
58 hw->aq.arq.bal = I40E_VF_ARQBAL1;
59 hw->aq.arq.bah = I40E_VF_ARQBAH1;
60 } else {
61 hw->aq.asq.tail = I40E_PF_ATQT;
62 hw->aq.asq.head = I40E_PF_ATQH;
63 hw->aq.asq.len = I40E_PF_ATQLEN;
64 hw->aq.asq.bal = I40E_PF_ATQBAL;
65 hw->aq.asq.bah = I40E_PF_ATQBAH;
66 hw->aq.arq.tail = I40E_PF_ARQT;
67 hw->aq.arq.head = I40E_PF_ARQH;
68 hw->aq.arq.len = I40E_PF_ARQLEN;
69 hw->aq.arq.bal = I40E_PF_ARQBAL;
70 hw->aq.arq.bah = I40E_PF_ARQBAH;
71 }
72 }
73
74 /**
75 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
76 * @hw: pointer to the hardware structure
77 **/
i40e_alloc_adminq_asq_ring(struct i40e_hw * hw)78 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
79 {
80 enum i40e_status_code ret_code;
81
82 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
83 i40e_mem_atq_ring,
84 (hw->aq.num_asq_entries *
85 sizeof(struct i40e_aq_desc)),
86 I40E_ADMINQ_DESC_ALIGNMENT);
87 if (ret_code)
88 return ret_code;
89
90 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
91 (hw->aq.num_asq_entries *
92 sizeof(struct i40e_asq_cmd_details)));
93 if (ret_code) {
94 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
95 return ret_code;
96 }
97
98 return ret_code;
99 }
100
101 /**
102 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
103 * @hw: pointer to the hardware structure
104 **/
i40e_alloc_adminq_arq_ring(struct i40e_hw * hw)105 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
106 {
107 enum i40e_status_code ret_code;
108
109 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
110 i40e_mem_arq_ring,
111 (hw->aq.num_arq_entries *
112 sizeof(struct i40e_aq_desc)),
113 I40E_ADMINQ_DESC_ALIGNMENT);
114
115 return ret_code;
116 }
117
118 /**
119 * i40e_free_adminq_asq - Free Admin Queue send rings
120 * @hw: pointer to the hardware structure
121 *
122 * This assumes the posted send buffers have already been cleaned
123 * and de-allocated
124 **/
i40e_free_adminq_asq(struct i40e_hw * hw)125 void i40e_free_adminq_asq(struct i40e_hw *hw)
126 {
127 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
128 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
129 }
130
131 /**
132 * i40e_free_adminq_arq - Free Admin Queue receive rings
133 * @hw: pointer to the hardware structure
134 *
135 * This assumes the posted receive buffers have already been cleaned
136 * and de-allocated
137 **/
i40e_free_adminq_arq(struct i40e_hw * hw)138 void i40e_free_adminq_arq(struct i40e_hw *hw)
139 {
140 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
141 }
142
143 /**
144 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
145 * @hw: pointer to the hardware structure
146 **/
i40e_alloc_arq_bufs(struct i40e_hw * hw)147 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
148 {
149 enum i40e_status_code ret_code;
150 struct i40e_aq_desc *desc;
151 struct i40e_dma_mem *bi;
152 int i;
153
154 /* We'll be allocating the buffer info memory first, then we can
155 * allocate the mapped buffers for the event processing
156 */
157
158 /* buffer_info structures do not need alignment */
159 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
160 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
161 if (ret_code)
162 goto alloc_arq_bufs;
163 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
164
165 /* allocate the mapped buffers */
166 for (i = 0; i < hw->aq.num_arq_entries; i++) {
167 bi = &hw->aq.arq.r.arq_bi[i];
168 ret_code = i40e_allocate_dma_mem(hw, bi,
169 i40e_mem_arq_buf,
170 hw->aq.arq_buf_size,
171 I40E_ADMINQ_DESC_ALIGNMENT);
172 if (ret_code)
173 goto unwind_alloc_arq_bufs;
174
175 /* now configure the descriptors for use */
176 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
177
178 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
179 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
180 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
181 desc->opcode = 0;
182 /* This is in accordance with Admin queue design, there is no
183 * register for buffer size configuration
184 */
185 desc->datalen = CPU_TO_LE16((u16)bi->size);
186 desc->retval = 0;
187 desc->cookie_high = 0;
188 desc->cookie_low = 0;
189 desc->params.external.addr_high =
190 CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
191 desc->params.external.addr_low =
192 CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
193 desc->params.external.param0 = 0;
194 desc->params.external.param1 = 0;
195 }
196
197 alloc_arq_bufs:
198 return ret_code;
199
200 unwind_alloc_arq_bufs:
201 /* don't try to free the one that failed... */
202 i--;
203 for (; i >= 0; i--)
204 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
205 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
206
207 return ret_code;
208 }
209
210 /**
211 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
212 * @hw: pointer to the hardware structure
213 **/
i40e_alloc_asq_bufs(struct i40e_hw * hw)214 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
215 {
216 enum i40e_status_code ret_code;
217 struct i40e_dma_mem *bi;
218 int i;
219
220 /* No mapped memory needed yet, just the buffer info structures */
221 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
222 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
223 if (ret_code)
224 goto alloc_asq_bufs;
225 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
226
227 /* allocate the mapped buffers */
228 for (i = 0; i < hw->aq.num_asq_entries; i++) {
229 bi = &hw->aq.asq.r.asq_bi[i];
230 ret_code = i40e_allocate_dma_mem(hw, bi,
231 i40e_mem_asq_buf,
232 hw->aq.asq_buf_size,
233 I40E_ADMINQ_DESC_ALIGNMENT);
234 if (ret_code)
235 goto unwind_alloc_asq_bufs;
236 }
237 alloc_asq_bufs:
238 return ret_code;
239
240 unwind_alloc_asq_bufs:
241 /* don't try to free the one that failed... */
242 i--;
243 for (; i >= 0; i--)
244 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
245 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
246
247 return ret_code;
248 }
249
250 /**
251 * i40e_free_arq_bufs - Free receive queue buffer info elements
252 * @hw: pointer to the hardware structure
253 **/
i40e_free_arq_bufs(struct i40e_hw * hw)254 static void i40e_free_arq_bufs(struct i40e_hw *hw)
255 {
256 int i;
257
258 /* free descriptors */
259 for (i = 0; i < hw->aq.num_arq_entries; i++)
260 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
261
262 /* free the descriptor memory */
263 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
264
265 /* free the dma header */
266 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
267 }
268
269 /**
270 * i40e_free_asq_bufs - Free send queue buffer info elements
271 * @hw: pointer to the hardware structure
272 **/
i40e_free_asq_bufs(struct i40e_hw * hw)273 static void i40e_free_asq_bufs(struct i40e_hw *hw)
274 {
275 int i;
276
277 /* only unmap if the address is non-NULL */
278 for (i = 0; i < hw->aq.num_asq_entries; i++)
279 if (hw->aq.asq.r.asq_bi[i].pa)
280 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
281
282 /* free the buffer info list */
283 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
284
285 /* free the descriptor memory */
286 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
287
288 /* free the dma header */
289 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
290 }
291
292 /**
293 * i40e_config_asq_regs - configure ASQ registers
294 * @hw: pointer to the hardware structure
295 *
296 * Configure base address and length registers for the transmit queue
297 **/
i40e_config_asq_regs(struct i40e_hw * hw)298 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
299 {
300 enum i40e_status_code ret_code = I40E_SUCCESS;
301 u32 reg = 0;
302
303 /* Clear Head and Tail */
304 wr32(hw, hw->aq.asq.head, 0);
305 wr32(hw, hw->aq.asq.tail, 0);
306
307 /* set starting point */
308 if (!i40e_is_vf(hw))
309 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
310 I40E_PF_ATQLEN_ATQENABLE_MASK));
311 if (i40e_is_vf(hw))
312 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313 I40E_VF_ATQLEN1_ATQENABLE_MASK));
314 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
315 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
316
317 /* Check one register to verify that config was applied */
318 reg = rd32(hw, hw->aq.asq.bal);
319 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
320 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
321
322 return ret_code;
323 }
324
325 /**
326 * i40e_config_arq_regs - ARQ register configuration
327 * @hw: pointer to the hardware structure
328 *
329 * Configure base address and length registers for the receive (event queue)
330 **/
i40e_config_arq_regs(struct i40e_hw * hw)331 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
332 {
333 enum i40e_status_code ret_code = I40E_SUCCESS;
334 u32 reg = 0;
335
336 /* Clear Head and Tail */
337 wr32(hw, hw->aq.arq.head, 0);
338 wr32(hw, hw->aq.arq.tail, 0);
339
340 /* set starting point */
341 if (!i40e_is_vf(hw))
342 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
343 I40E_PF_ARQLEN_ARQENABLE_MASK));
344 if (i40e_is_vf(hw))
345 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
346 I40E_VF_ARQLEN1_ARQENABLE_MASK));
347 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
348 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
349
350 /* Update tail in the HW to post pre-allocated buffers */
351 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
352
353 /* Check one register to verify that config was applied */
354 reg = rd32(hw, hw->aq.arq.bal);
355 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
356 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
357
358 return ret_code;
359 }
360
361 /**
362 * i40e_init_asq - main initialization routine for ASQ
363 * @hw: pointer to the hardware structure
364 *
365 * This is the main initialization routine for the Admin Send Queue
366 * Prior to calling this function, drivers *MUST* set the following fields
367 * in the hw->aq structure:
368 * - hw->aq.num_asq_entries
369 * - hw->aq.arq_buf_size
370 *
371 * Do *NOT* hold the lock when calling this as the memory allocation routines
372 * called are not going to be atomic context safe
373 **/
i40e_init_asq(struct i40e_hw * hw)374 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
375 {
376 enum i40e_status_code ret_code = I40E_SUCCESS;
377
378 if (hw->aq.asq.count > 0) {
379 /* queue already initialized */
380 ret_code = I40E_ERR_NOT_READY;
381 goto init_adminq_exit;
382 }
383
384 /* verify input for valid configuration */
385 if ((hw->aq.num_asq_entries == 0) ||
386 (hw->aq.asq_buf_size == 0)) {
387 ret_code = I40E_ERR_CONFIG;
388 goto init_adminq_exit;
389 }
390
391 hw->aq.asq.next_to_use = 0;
392 hw->aq.asq.next_to_clean = 0;
393
394 /* allocate the ring memory */
395 ret_code = i40e_alloc_adminq_asq_ring(hw);
396 if (ret_code != I40E_SUCCESS)
397 goto init_adminq_exit;
398
399 /* allocate buffers in the rings */
400 ret_code = i40e_alloc_asq_bufs(hw);
401 if (ret_code != I40E_SUCCESS)
402 goto init_adminq_free_rings;
403
404 /* initialize base registers */
405 ret_code = i40e_config_asq_regs(hw);
406 if (ret_code != I40E_SUCCESS)
407 goto init_config_regs;
408
409 /* success! */
410 hw->aq.asq.count = hw->aq.num_asq_entries;
411 goto init_adminq_exit;
412
413 init_adminq_free_rings:
414 i40e_free_adminq_asq(hw);
415 return ret_code;
416
417 init_config_regs:
418 i40e_free_asq_bufs(hw);
419
420 init_adminq_exit:
421 return ret_code;
422 }
423
424 /**
425 * i40e_init_arq - initialize ARQ
426 * @hw: pointer to the hardware structure
427 *
428 * The main initialization routine for the Admin Receive (Event) Queue.
429 * Prior to calling this function, drivers *MUST* set the following fields
430 * in the hw->aq structure:
431 * - hw->aq.num_asq_entries
432 * - hw->aq.arq_buf_size
433 *
434 * Do *NOT* hold the lock when calling this as the memory allocation routines
435 * called are not going to be atomic context safe
436 **/
i40e_init_arq(struct i40e_hw * hw)437 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
438 {
439 enum i40e_status_code ret_code = I40E_SUCCESS;
440
441 if (hw->aq.arq.count > 0) {
442 /* queue already initialized */
443 ret_code = I40E_ERR_NOT_READY;
444 goto init_adminq_exit;
445 }
446
447 /* verify input for valid configuration */
448 if ((hw->aq.num_arq_entries == 0) ||
449 (hw->aq.arq_buf_size == 0)) {
450 ret_code = I40E_ERR_CONFIG;
451 goto init_adminq_exit;
452 }
453
454 hw->aq.arq.next_to_use = 0;
455 hw->aq.arq.next_to_clean = 0;
456
457 /* allocate the ring memory */
458 ret_code = i40e_alloc_adminq_arq_ring(hw);
459 if (ret_code != I40E_SUCCESS)
460 goto init_adminq_exit;
461
462 /* allocate buffers in the rings */
463 ret_code = i40e_alloc_arq_bufs(hw);
464 if (ret_code != I40E_SUCCESS)
465 goto init_adminq_free_rings;
466
467 /* initialize base registers */
468 ret_code = i40e_config_arq_regs(hw);
469 if (ret_code != I40E_SUCCESS)
470 goto init_adminq_free_rings;
471
472 /* success! */
473 hw->aq.arq.count = hw->aq.num_arq_entries;
474 goto init_adminq_exit;
475
476 init_adminq_free_rings:
477 i40e_free_adminq_arq(hw);
478
479 init_adminq_exit:
480 return ret_code;
481 }
482
483 /**
484 * i40e_shutdown_asq - shutdown the ASQ
485 * @hw: pointer to the hardware structure
486 *
487 * The main shutdown routine for the Admin Send Queue
488 **/
i40e_shutdown_asq(struct i40e_hw * hw)489 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
490 {
491 enum i40e_status_code ret_code = I40E_SUCCESS;
492
493 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
494
495 if (hw->aq.asq.count == 0) {
496 ret_code = I40E_ERR_NOT_READY;
497 goto shutdown_asq_out;
498 }
499
500 /* Stop firmware AdminQ processing */
501 wr32(hw, hw->aq.asq.head, 0);
502 wr32(hw, hw->aq.asq.tail, 0);
503 wr32(hw, hw->aq.asq.len, 0);
504 wr32(hw, hw->aq.asq.bal, 0);
505 wr32(hw, hw->aq.asq.bah, 0);
506
507 hw->aq.asq.count = 0; /* to indicate uninitialized queue */
508
509 /* free ring buffers */
510 i40e_free_asq_bufs(hw);
511
512 shutdown_asq_out:
513 i40e_release_spinlock(&hw->aq.asq_spinlock);
514 return ret_code;
515 }
516
517 /**
518 * i40e_shutdown_arq - shutdown ARQ
519 * @hw: pointer to the hardware structure
520 *
521 * The main shutdown routine for the Admin Receive Queue
522 **/
i40e_shutdown_arq(struct i40e_hw * hw)523 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
524 {
525 enum i40e_status_code ret_code = I40E_SUCCESS;
526
527 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
528
529 if (hw->aq.arq.count == 0) {
530 ret_code = I40E_ERR_NOT_READY;
531 goto shutdown_arq_out;
532 }
533
534 /* Stop firmware AdminQ processing */
535 wr32(hw, hw->aq.arq.head, 0);
536 wr32(hw, hw->aq.arq.tail, 0);
537 wr32(hw, hw->aq.arq.len, 0);
538 wr32(hw, hw->aq.arq.bal, 0);
539 wr32(hw, hw->aq.arq.bah, 0);
540
541 hw->aq.arq.count = 0; /* to indicate uninitialized queue */
542
543 /* free ring buffers */
544 i40e_free_arq_bufs(hw);
545
546 shutdown_arq_out:
547 i40e_release_spinlock(&hw->aq.arq_spinlock);
548 return ret_code;
549 }
550
551 /**
552 * i40e_resume_aq - resume AQ processing from 0
553 * @hw: pointer to the hardware structure
554 **/
i40e_resume_aq(struct i40e_hw * hw)555 static void i40e_resume_aq(struct i40e_hw *hw)
556 {
557 /* Registers are reset after PF reset */
558 hw->aq.asq.next_to_use = 0;
559 hw->aq.asq.next_to_clean = 0;
560
561 i40e_config_asq_regs(hw);
562
563 hw->aq.arq.next_to_use = 0;
564 hw->aq.arq.next_to_clean = 0;
565
566 i40e_config_arq_regs(hw);
567 }
568
569 /**
570 * i40e_set_hw_flags - set HW flags
571 * @hw: pointer to the hardware structure
572 **/
i40e_set_hw_flags(struct i40e_hw * hw)573 static void i40e_set_hw_flags(struct i40e_hw *hw)
574 {
575 struct i40e_adminq_info *aq = &hw->aq;
576
577 hw->flags = 0;
578
579 switch (hw->mac.type) {
580 case I40E_MAC_XL710:
581 if (aq->api_maj_ver > 1 ||
582 (aq->api_maj_ver == 1 &&
583 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
584 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
585 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
586 /* The ability to RX (not drop) 802.1ad frames */
587 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
588 }
589 break;
590 case I40E_MAC_X722:
591 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
592 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
593
594 if (aq->api_maj_ver > 1 ||
595 (aq->api_maj_ver == 1 &&
596 aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
597 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
598
599 if (aq->api_maj_ver > 1 ||
600 (aq->api_maj_ver == 1 &&
601 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
602 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
603
604 if (aq->api_maj_ver > 1 ||
605 (aq->api_maj_ver == 1 &&
606 aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
607 hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
608
609 /* fall through */
610 default:
611 break;
612 }
613
614 /* Newer versions of firmware require lock when reading the NVM */
615 if (aq->api_maj_ver > 1 ||
616 (aq->api_maj_ver == 1 &&
617 aq->api_min_ver >= 5))
618 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
619
620 if (aq->api_maj_ver > 1 ||
621 (aq->api_maj_ver == 1 &&
622 aq->api_min_ver >= 8)) {
623 hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
624 hw->flags |= I40E_HW_FLAG_DROP_MODE;
625 }
626
627 if (aq->api_maj_ver > 1 ||
628 (aq->api_maj_ver == 1 &&
629 aq->api_min_ver >= 9))
630 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
631 }
632
633 /**
634 * i40e_init_adminq - main initialization routine for Admin Queue
635 * @hw: pointer to the hardware structure
636 *
637 * Prior to calling this function, drivers *MUST* set the following fields
638 * in the hw->aq structure:
639 * - hw->aq.num_asq_entries
640 * - hw->aq.num_arq_entries
641 * - hw->aq.arq_buf_size
642 * - hw->aq.asq_buf_size
643 **/
i40e_init_adminq(struct i40e_hw * hw)644 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
645 {
646 struct i40e_adminq_info *aq = &hw->aq;
647 enum i40e_status_code ret_code;
648 u16 oem_hi = 0, oem_lo = 0;
649 u16 eetrack_hi = 0;
650 u16 eetrack_lo = 0;
651 u16 cfg_ptr = 0;
652 int retry = 0;
653
654 /* verify input for valid configuration */
655 if (aq->num_arq_entries == 0 ||
656 aq->num_asq_entries == 0 ||
657 aq->arq_buf_size == 0 ||
658 aq->asq_buf_size == 0) {
659 ret_code = I40E_ERR_CONFIG;
660 goto init_adminq_exit;
661 }
662 i40e_init_spinlock(&aq->asq_spinlock);
663 i40e_init_spinlock(&aq->arq_spinlock);
664
665 /* Set up register offsets */
666 i40e_adminq_init_regs(hw);
667
668 /* setup ASQ command write back timeout */
669 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
670
671 /* allocate the ASQ */
672 ret_code = i40e_init_asq(hw);
673 if (ret_code != I40E_SUCCESS)
674 goto init_adminq_destroy_spinlocks;
675
676 /* allocate the ARQ */
677 ret_code = i40e_init_arq(hw);
678 if (ret_code != I40E_SUCCESS)
679 goto init_adminq_free_asq;
680
681 /* VF has no need of firmware */
682 if (i40e_is_vf(hw))
683 goto init_adminq_exit;
684 /* There are some cases where the firmware may not be quite ready
685 * for AdminQ operations, so we retry the AdminQ setup a few times
686 * if we see timeouts in this first AQ call.
687 */
688 do {
689 ret_code = i40e_aq_get_firmware_version(hw,
690 &aq->fw_maj_ver,
691 &aq->fw_min_ver,
692 &aq->fw_build,
693 &aq->api_maj_ver,
694 &aq->api_min_ver,
695 NULL);
696 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
697 break;
698 retry++;
699 i40e_msec_delay(100);
700 i40e_resume_aq(hw);
701 } while (retry < 10);
702 if (ret_code != I40E_SUCCESS)
703 goto init_adminq_free_arq;
704
705 /*
706 * Some features were introduced in different FW API version
707 * for different MAC type.
708 */
709 i40e_set_hw_flags(hw);
710
711 /* get the NVM version info */
712 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
713 &hw->nvm.version);
714 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
715 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
716 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
717 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
718 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
719 &oem_hi);
720 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
721 &oem_lo);
722 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
723
724 if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
725 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
726 goto init_adminq_free_arq;
727 }
728
729 /* pre-emptive resource lock release */
730 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
731 hw->nvm_release_on_done = FALSE;
732 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
733
734 ret_code = I40E_SUCCESS;
735
736 /* success! */
737 goto init_adminq_exit;
738
739 init_adminq_free_arq:
740 i40e_shutdown_arq(hw);
741 init_adminq_free_asq:
742 i40e_shutdown_asq(hw);
743 init_adminq_destroy_spinlocks:
744 i40e_destroy_spinlock(&aq->asq_spinlock);
745 i40e_destroy_spinlock(&aq->arq_spinlock);
746
747 init_adminq_exit:
748 return ret_code;
749 }
750
751 /**
752 * i40e_shutdown_adminq - shutdown routine for the Admin Queue
753 * @hw: pointer to the hardware structure
754 **/
i40e_shutdown_adminq(struct i40e_hw * hw)755 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
756 {
757 enum i40e_status_code ret_code = I40E_SUCCESS;
758
759 if (i40e_check_asq_alive(hw))
760 i40e_aq_queue_shutdown(hw, TRUE);
761
762 i40e_shutdown_asq(hw);
763 i40e_shutdown_arq(hw);
764 i40e_destroy_spinlock(&hw->aq.asq_spinlock);
765 i40e_destroy_spinlock(&hw->aq.arq_spinlock);
766
767 if (hw->nvm_buff.va)
768 i40e_free_virt_mem(hw, &hw->nvm_buff);
769
770 return ret_code;
771 }
772
773 /**
774 * i40e_clean_asq - cleans Admin send queue
775 * @hw: pointer to the hardware structure
776 *
777 * returns the number of free desc
778 **/
i40e_clean_asq(struct i40e_hw * hw)779 u16 i40e_clean_asq(struct i40e_hw *hw)
780 {
781 struct i40e_adminq_ring *asq = &(hw->aq.asq);
782 struct i40e_asq_cmd_details *details;
783 u16 ntc = asq->next_to_clean;
784 struct i40e_aq_desc desc_cb;
785 struct i40e_aq_desc *desc;
786
787 desc = I40E_ADMINQ_DESC(*asq, ntc);
788 details = I40E_ADMINQ_DETAILS(*asq, ntc);
789 while (rd32(hw, hw->aq.asq.head) != ntc) {
790 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
791 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
792
793 if (details->callback) {
794 I40E_ADMINQ_CALLBACK cb_func =
795 (I40E_ADMINQ_CALLBACK)details->callback;
796 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
797 I40E_DMA_TO_DMA);
798 cb_func(hw, &desc_cb);
799 }
800 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
801 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
802 ntc++;
803 if (ntc == asq->count)
804 ntc = 0;
805 desc = I40E_ADMINQ_DESC(*asq, ntc);
806 details = I40E_ADMINQ_DETAILS(*asq, ntc);
807 }
808
809 asq->next_to_clean = ntc;
810
811 return I40E_DESC_UNUSED(asq);
812 }
813
814 /**
815 * i40e_asq_done - check if FW has processed the Admin Send Queue
816 * @hw: pointer to the hw struct
817 *
818 * Returns TRUE if the firmware has processed all descriptors on the
819 * admin send queue. Returns FALSE if there are still requests pending.
820 **/
i40e_asq_done(struct i40e_hw * hw)821 bool i40e_asq_done(struct i40e_hw *hw)
822 {
823 /* AQ designers suggest use of head for better
824 * timing reliability than DD bit
825 */
826 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
827
828 }
829
830 /**
831 * i40e_asq_send_command - send command to Admin Queue
832 * @hw: pointer to the hw struct
833 * @desc: prefilled descriptor describing the command (non DMA mem)
834 * @buff: buffer to use for indirect commands
835 * @buff_size: size of buffer for indirect commands
836 * @cmd_details: pointer to command details structure
837 *
838 * This is the main send command driver routine for the Admin Queue send
839 * queue. It runs the queue, cleans the queue, etc
840 **/
i40e_asq_send_command(struct i40e_hw * hw,struct i40e_aq_desc * desc,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)841 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
842 struct i40e_aq_desc *desc,
843 void *buff, /* can be NULL */
844 u16 buff_size,
845 struct i40e_asq_cmd_details *cmd_details)
846 {
847 enum i40e_status_code status = I40E_SUCCESS;
848 struct i40e_dma_mem *dma_buff = NULL;
849 struct i40e_asq_cmd_details *details;
850 struct i40e_aq_desc *desc_on_ring;
851 bool cmd_completed = FALSE;
852 u16 retval = 0;
853 u32 val = 0;
854
855 i40e_acquire_spinlock(&hw->aq.asq_spinlock);
856
857 hw->aq.asq_last_status = I40E_AQ_RC_OK;
858
859 if (hw->aq.asq.count == 0) {
860 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
861 "AQTX: Admin queue not initialized.\n");
862 status = I40E_ERR_QUEUE_EMPTY;
863 goto asq_send_command_error;
864 }
865
866 val = rd32(hw, hw->aq.asq.head);
867 if (val >= hw->aq.num_asq_entries) {
868 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
869 "AQTX: head overrun at %d\n", val);
870 status = I40E_ERR_ADMIN_QUEUE_FULL;
871 goto asq_send_command_error;
872 }
873
874 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
875 if (cmd_details) {
876 i40e_memcpy(details,
877 cmd_details,
878 sizeof(struct i40e_asq_cmd_details),
879 I40E_NONDMA_TO_NONDMA);
880
881 /* If the cmd_details are defined copy the cookie. The
882 * CPU_TO_LE32 is not needed here because the data is ignored
883 * by the FW, only used by the driver
884 */
885 if (details->cookie) {
886 desc->cookie_high =
887 CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
888 desc->cookie_low =
889 CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
890 }
891 } else {
892 i40e_memset(details, 0,
893 sizeof(struct i40e_asq_cmd_details),
894 I40E_NONDMA_MEM);
895 }
896
897 /* clear requested flags and then set additional flags if defined */
898 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
899 desc->flags |= CPU_TO_LE16(details->flags_ena);
900
901 if (buff_size > hw->aq.asq_buf_size) {
902 i40e_debug(hw,
903 I40E_DEBUG_AQ_MESSAGE,
904 "AQTX: Invalid buffer size: %d.\n",
905 buff_size);
906 status = I40E_ERR_INVALID_SIZE;
907 goto asq_send_command_error;
908 }
909
910 if (details->postpone && !details->async) {
911 i40e_debug(hw,
912 I40E_DEBUG_AQ_MESSAGE,
913 "AQTX: Async flag not set along with postpone flag");
914 status = I40E_ERR_PARAM;
915 goto asq_send_command_error;
916 }
917
918 /* call clean and check queue available function to reclaim the
919 * descriptors that were processed by FW, the function returns the
920 * number of desc available
921 */
922 /* the clean function called here could be called in a separate thread
923 * in case of asynchronous completions
924 */
925 if (i40e_clean_asq(hw) == 0) {
926 i40e_debug(hw,
927 I40E_DEBUG_AQ_MESSAGE,
928 "AQTX: Error queue is full.\n");
929 status = I40E_ERR_ADMIN_QUEUE_FULL;
930 goto asq_send_command_error;
931 }
932
933 /* initialize the temp desc pointer with the right desc */
934 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
935
936 /* if the desc is available copy the temp desc to the right place */
937 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
938 I40E_NONDMA_TO_DMA);
939
940 /* if buff is not NULL assume indirect command */
941 if (buff != NULL) {
942 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
943 /* copy the user buff into the respective DMA buff */
944 i40e_memcpy(dma_buff->va, buff, buff_size,
945 I40E_NONDMA_TO_DMA);
946 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
947
948 /* Update the address values in the desc with the pa value
949 * for respective buffer
950 */
951 desc_on_ring->params.external.addr_high =
952 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
953 desc_on_ring->params.external.addr_low =
954 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
955 }
956
957 /* bump the tail */
958 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
959 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
960 buff, buff_size);
961 (hw->aq.asq.next_to_use)++;
962 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
963 hw->aq.asq.next_to_use = 0;
964 if (!details->postpone)
965 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
966
967 /* if cmd_details are not defined or async flag is not set,
968 * we need to wait for desc write back
969 */
970 if (!details->async && !details->postpone) {
971 u32 total_delay = 0;
972
973 do {
974 /* AQ designers suggest use of head for better
975 * timing reliability than DD bit
976 */
977 if (i40e_asq_done(hw))
978 break;
979 i40e_usec_delay(50);
980 total_delay += 50;
981 } while (total_delay < hw->aq.asq_cmd_timeout);
982 }
983
984 /* if ready, copy the desc back to temp */
985 if (i40e_asq_done(hw)) {
986 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
987 I40E_DMA_TO_NONDMA);
988 if (buff != NULL)
989 i40e_memcpy(buff, dma_buff->va, buff_size,
990 I40E_DMA_TO_NONDMA);
991 retval = LE16_TO_CPU(desc->retval);
992 if (retval != 0) {
993 i40e_debug(hw,
994 I40E_DEBUG_AQ_MESSAGE,
995 "AQTX: Command completed with error 0x%X.\n",
996 retval);
997
998 /* strip off FW internal code */
999 retval &= 0xff;
1000 }
1001 cmd_completed = TRUE;
1002 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
1003 status = I40E_SUCCESS;
1004 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
1005 status = I40E_ERR_NOT_READY;
1006 else
1007 status = I40E_ERR_ADMIN_QUEUE_ERROR;
1008 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
1009 }
1010
1011 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
1012 "AQTX: desc and buffer writeback:\n");
1013 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
1014
1015 /* save writeback aq if requested */
1016 if (details->wb_desc)
1017 i40e_memcpy(details->wb_desc, desc_on_ring,
1018 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
1019
1020 /* update the error if time out occurred */
1021 if ((!cmd_completed) &&
1022 (!details->async && !details->postpone)) {
1023 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
1024 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1025 "AQTX: AQ Critical error.\n");
1026 status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
1027 } else {
1028 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1029 "AQTX: Writeback timeout.\n");
1030 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
1031 }
1032 }
1033
1034 asq_send_command_error:
1035 i40e_release_spinlock(&hw->aq.asq_spinlock);
1036 return status;
1037 }
1038
1039 /**
1040 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1041 * @desc: pointer to the temp descriptor (non DMA mem)
1042 * @opcode: the opcode can be used to decide which flags to turn off or on
1043 *
1044 * Fill the desc with default values
1045 **/
i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc * desc,u16 opcode)1046 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1047 u16 opcode)
1048 {
1049 /* zero out the desc */
1050 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1051 I40E_NONDMA_MEM);
1052 desc->opcode = CPU_TO_LE16(opcode);
1053 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1054 }
1055
1056 /**
1057 * i40e_clean_arq_element
1058 * @hw: pointer to the hw struct
1059 * @e: event info from the receive descriptor, includes any buffers
1060 * @pending: number of events that could be left to process
1061 *
1062 * This function cleans one Admin Receive Queue element and returns
1063 * the contents through e. It can also return how many events are
1064 * left to process through 'pending'
1065 **/
i40e_clean_arq_element(struct i40e_hw * hw,struct i40e_arq_event_info * e,u16 * pending)1066 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1067 struct i40e_arq_event_info *e,
1068 u16 *pending)
1069 {
1070 enum i40e_status_code ret_code = I40E_SUCCESS;
1071 u16 ntc = hw->aq.arq.next_to_clean;
1072 struct i40e_aq_desc *desc;
1073 struct i40e_dma_mem *bi;
1074 u16 desc_idx;
1075 u16 datalen;
1076 u16 flags;
1077 u16 ntu;
1078
1079 /* pre-clean the event info */
1080 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1081
1082 /* take the lock before we start messing with the ring */
1083 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1084
1085 if (hw->aq.arq.count == 0) {
1086 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1087 "AQRX: Admin queue not initialized.\n");
1088 ret_code = I40E_ERR_QUEUE_EMPTY;
1089 goto clean_arq_element_err;
1090 }
1091
1092 /* set next_to_use to head */
1093 if (!i40e_is_vf(hw))
1094 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1095 else
1096 ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1097 if (ntu == ntc) {
1098 /* nothing to do - shouldn't need to update ring's values */
1099 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1100 goto clean_arq_element_out;
1101 }
1102
1103 /* now clean the next descriptor */
1104 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1105 desc_idx = ntc;
1106
1107 hw->aq.arq_last_status =
1108 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1109 flags = LE16_TO_CPU(desc->flags);
1110 if (flags & I40E_AQ_FLAG_ERR) {
1111 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1112 i40e_debug(hw,
1113 I40E_DEBUG_AQ_MESSAGE,
1114 "AQRX: Event received with error 0x%X.\n",
1115 hw->aq.arq_last_status);
1116 }
1117
1118 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1119 I40E_DMA_TO_NONDMA);
1120 datalen = LE16_TO_CPU(desc->datalen);
1121 e->msg_len = min(datalen, e->buf_len);
1122 if (e->msg_buf != NULL && (e->msg_len != 0))
1123 i40e_memcpy(e->msg_buf,
1124 hw->aq.arq.r.arq_bi[desc_idx].va,
1125 e->msg_len, I40E_DMA_TO_NONDMA);
1126
1127 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1128 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1129 hw->aq.arq_buf_size);
1130
1131 /* Restore the original datalen and buffer address in the desc,
1132 * FW updates datalen to indicate the event message
1133 * size
1134 */
1135 bi = &hw->aq.arq.r.arq_bi[ntc];
1136 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1137
1138 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1139 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1140 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1141 desc->datalen = CPU_TO_LE16((u16)bi->size);
1142 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1143 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1144
1145 /* set tail = the last cleaned desc index. */
1146 wr32(hw, hw->aq.arq.tail, ntc);
1147 /* ntc is updated to tail + 1 */
1148 ntc++;
1149 if (ntc == hw->aq.num_arq_entries)
1150 ntc = 0;
1151 hw->aq.arq.next_to_clean = ntc;
1152 hw->aq.arq.next_to_use = ntu;
1153
1154 i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1155 clean_arq_element_out:
1156 /* Set pending if needed, unlock and return */
1157 if (pending != NULL)
1158 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1159 clean_arq_element_err:
1160 i40e_release_spinlock(&hw->aq.arq_spinlock);
1161
1162 return ret_code;
1163 }
1164
1165