1 /****************************************************************************** 2 3 Copyright (c) 2013-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "i40e_status.h" 36 #include "i40e_type.h" 37 #include "i40e_register.h" 38 #include "i40e_adminq.h" 39 #include "i40e_prototype.h" 40 41 /** 42 * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation 43 * @desc: API request descriptor 44 **/ 45 static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) 46 { 47 return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase)) || 48 (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update)); 49 } 50 51 /** 52 * i40e_adminq_init_regs - Initialize AdminQ registers 53 * @hw: pointer to the hardware structure 54 * 55 * This assumes the alloc_asq and alloc_arq functions have already been called 56 **/ 57 static void i40e_adminq_init_regs(struct i40e_hw *hw) 58 { 59 /* set head and tail registers in our local struct */ 60 if (i40e_is_vf(hw)) { 61 hw->aq.asq.tail = I40E_VF_ATQT1; 62 hw->aq.asq.head = I40E_VF_ATQH1; 63 hw->aq.asq.len = I40E_VF_ATQLEN1; 64 hw->aq.asq.bal = I40E_VF_ATQBAL1; 65 hw->aq.asq.bah = I40E_VF_ATQBAH1; 66 hw->aq.arq.tail = I40E_VF_ARQT1; 67 hw->aq.arq.head = I40E_VF_ARQH1; 68 hw->aq.arq.len = I40E_VF_ARQLEN1; 69 hw->aq.arq.bal = I40E_VF_ARQBAL1; 70 hw->aq.arq.bah = I40E_VF_ARQBAH1; 71 } else { 72 hw->aq.asq.tail = I40E_PF_ATQT; 73 hw->aq.asq.head = I40E_PF_ATQH; 74 hw->aq.asq.len = I40E_PF_ATQLEN; 75 hw->aq.asq.bal = I40E_PF_ATQBAL; 76 hw->aq.asq.bah = I40E_PF_ATQBAH; 77 hw->aq.arq.tail = I40E_PF_ARQT; 78 hw->aq.arq.head = I40E_PF_ARQH; 79 hw->aq.arq.len = I40E_PF_ARQLEN; 80 hw->aq.arq.bal = I40E_PF_ARQBAL; 81 hw->aq.arq.bah = I40E_PF_ARQBAH; 82 } 83 } 84 85 /** 86 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings 87 * @hw: pointer to the hardware structure 88 **/ 89 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) 90 { 91 enum i40e_status_code ret_code; 92 93 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, 94 i40e_mem_atq_ring, 95 (hw->aq.num_asq_entries * 96 sizeof(struct i40e_aq_desc)), 97 I40E_ADMINQ_DESC_ALIGNMENT); 98 if (ret_code) 99 return ret_code; 100 101 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, 102 (hw->aq.num_asq_entries * 103 sizeof(struct i40e_asq_cmd_details))); 104 if (ret_code) { 105 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 106 return ret_code; 107 } 108 109 return ret_code; 110 } 111 112 /** 113 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings 114 * @hw: pointer to the hardware structure 115 **/ 116 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) 117 { 118 enum i40e_status_code ret_code; 119 120 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, 121 i40e_mem_arq_ring, 122 (hw->aq.num_arq_entries * 123 sizeof(struct i40e_aq_desc)), 124 I40E_ADMINQ_DESC_ALIGNMENT); 125 126 return ret_code; 127 } 128 129 /** 130 * i40e_free_adminq_asq - Free Admin Queue send rings 131 * @hw: pointer to the hardware structure 132 * 133 * This assumes the posted send buffers have already been cleaned 134 * and de-allocated 135 **/ 136 void i40e_free_adminq_asq(struct i40e_hw *hw) 137 { 138 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 139 } 140 141 /** 142 * i40e_free_adminq_arq - Free Admin Queue receive rings 143 * @hw: pointer to the hardware structure 144 * 145 * This assumes the posted receive buffers have already been cleaned 146 * and de-allocated 147 **/ 148 void i40e_free_adminq_arq(struct i40e_hw *hw) 149 { 150 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 151 } 152 153 /** 154 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue 155 * @hw: pointer to the hardware structure 156 **/ 157 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) 158 { 159 enum i40e_status_code ret_code; 160 struct i40e_aq_desc *desc; 161 struct i40e_dma_mem *bi; 162 int i; 163 164 /* We'll be allocating the buffer info memory first, then we can 165 * allocate the mapped buffers for the event processing 166 */ 167 168 /* buffer_info structures do not need alignment */ 169 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, 170 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); 171 if (ret_code) 172 goto alloc_arq_bufs; 173 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; 174 175 /* allocate the mapped buffers */ 176 for (i = 0; i < hw->aq.num_arq_entries; i++) { 177 bi = &hw->aq.arq.r.arq_bi[i]; 178 ret_code = i40e_allocate_dma_mem(hw, bi, 179 i40e_mem_arq_buf, 180 hw->aq.arq_buf_size, 181 I40E_ADMINQ_DESC_ALIGNMENT); 182 if (ret_code) 183 goto unwind_alloc_arq_bufs; 184 185 /* now configure the descriptors for use */ 186 desc = I40E_ADMINQ_DESC(hw->aq.arq, i); 187 188 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 189 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 190 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 191 desc->opcode = 0; 192 /* This is in accordance with Admin queue design, there is no 193 * register for buffer size configuration 194 */ 195 desc->datalen = CPU_TO_LE16((u16)bi->size); 196 desc->retval = 0; 197 desc->cookie_high = 0; 198 desc->cookie_low = 0; 199 desc->params.external.addr_high = 200 CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 201 desc->params.external.addr_low = 202 CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 203 desc->params.external.param0 = 0; 204 desc->params.external.param1 = 0; 205 } 206 207 alloc_arq_bufs: 208 return ret_code; 209 210 unwind_alloc_arq_bufs: 211 /* don't try to free the one that failed... */ 212 i--; 213 for (; i >= 0; i--) 214 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 215 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 216 217 return ret_code; 218 } 219 220 /** 221 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue 222 * @hw: pointer to the hardware structure 223 **/ 224 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw) 225 { 226 enum i40e_status_code ret_code; 227 struct i40e_dma_mem *bi; 228 int i; 229 230 /* No mapped memory needed yet, just the buffer info structures */ 231 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, 232 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); 233 if (ret_code) 234 goto alloc_asq_bufs; 235 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; 236 237 /* allocate the mapped buffers */ 238 for (i = 0; i < hw->aq.num_asq_entries; i++) { 239 bi = &hw->aq.asq.r.asq_bi[i]; 240 ret_code = i40e_allocate_dma_mem(hw, bi, 241 i40e_mem_asq_buf, 242 hw->aq.asq_buf_size, 243 I40E_ADMINQ_DESC_ALIGNMENT); 244 if (ret_code) 245 goto unwind_alloc_asq_bufs; 246 } 247 alloc_asq_bufs: 248 return ret_code; 249 250 unwind_alloc_asq_bufs: 251 /* don't try to free the one that failed... */ 252 i--; 253 for (; i >= 0; i--) 254 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 255 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 256 257 return ret_code; 258 } 259 260 /** 261 * i40e_free_arq_bufs - Free receive queue buffer info elements 262 * @hw: pointer to the hardware structure 263 **/ 264 static void i40e_free_arq_bufs(struct i40e_hw *hw) 265 { 266 int i; 267 268 /* free descriptors */ 269 for (i = 0; i < hw->aq.num_arq_entries; i++) 270 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 271 272 /* free the descriptor memory */ 273 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 274 275 /* free the dma header */ 276 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 277 } 278 279 /** 280 * i40e_free_asq_bufs - Free send queue buffer info elements 281 * @hw: pointer to the hardware structure 282 **/ 283 static void i40e_free_asq_bufs(struct i40e_hw *hw) 284 { 285 int i; 286 287 /* only unmap if the address is non-NULL */ 288 for (i = 0; i < hw->aq.num_asq_entries; i++) 289 if (hw->aq.asq.r.asq_bi[i].pa) 290 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 291 292 /* free the buffer info list */ 293 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 294 295 /* free the descriptor memory */ 296 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 297 298 /* free the dma header */ 299 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 300 } 301 302 /** 303 * i40e_config_asq_regs - configure ASQ registers 304 * @hw: pointer to the hardware structure 305 * 306 * Configure base address and length registers for the transmit queue 307 **/ 308 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw) 309 { 310 enum i40e_status_code ret_code = I40E_SUCCESS; 311 u32 reg = 0; 312 313 /* Clear Head and Tail */ 314 wr32(hw, hw->aq.asq.head, 0); 315 wr32(hw, hw->aq.asq.tail, 0); 316 317 /* set starting point */ 318 if (!i40e_is_vf(hw)) 319 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 320 I40E_PF_ATQLEN_ATQENABLE_MASK)); 321 if (i40e_is_vf(hw)) 322 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 323 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 324 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)); 325 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa)); 326 327 /* Check one register to verify that config was applied */ 328 reg = rd32(hw, hw->aq.asq.bal); 329 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)) 330 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 331 332 return ret_code; 333 } 334 335 /** 336 * i40e_config_arq_regs - ARQ register configuration 337 * @hw: pointer to the hardware structure 338 * 339 * Configure base address and length registers for the receive (event queue) 340 **/ 341 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw) 342 { 343 enum i40e_status_code ret_code = I40E_SUCCESS; 344 u32 reg = 0; 345 346 /* Clear Head and Tail */ 347 wr32(hw, hw->aq.arq.head, 0); 348 wr32(hw, hw->aq.arq.tail, 0); 349 350 /* set starting point */ 351 if (!i40e_is_vf(hw)) 352 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 353 I40E_PF_ARQLEN_ARQENABLE_MASK)); 354 if (i40e_is_vf(hw)) 355 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 356 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 357 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)); 358 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa)); 359 360 /* Update tail in the HW to post pre-allocated buffers */ 361 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 362 363 /* Check one register to verify that config was applied */ 364 reg = rd32(hw, hw->aq.arq.bal); 365 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)) 366 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 367 368 return ret_code; 369 } 370 371 /** 372 * i40e_init_asq - main initialization routine for ASQ 373 * @hw: pointer to the hardware structure 374 * 375 * This is the main initialization routine for the Admin Send Queue 376 * Prior to calling this function, drivers *MUST* set the following fields 377 * in the hw->aq structure: 378 * - hw->aq.num_asq_entries 379 * - hw->aq.arq_buf_size 380 * 381 * Do *NOT* hold the lock when calling this as the memory allocation routines 382 * called are not going to be atomic context safe 383 **/ 384 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw) 385 { 386 enum i40e_status_code ret_code = I40E_SUCCESS; 387 388 if (hw->aq.asq.count > 0) { 389 /* queue already initialized */ 390 ret_code = I40E_ERR_NOT_READY; 391 goto init_adminq_exit; 392 } 393 394 /* verify input for valid configuration */ 395 if ((hw->aq.num_asq_entries == 0) || 396 (hw->aq.asq_buf_size == 0)) { 397 ret_code = I40E_ERR_CONFIG; 398 goto init_adminq_exit; 399 } 400 401 hw->aq.asq.next_to_use = 0; 402 hw->aq.asq.next_to_clean = 0; 403 404 /* allocate the ring memory */ 405 ret_code = i40e_alloc_adminq_asq_ring(hw); 406 if (ret_code != I40E_SUCCESS) 407 goto init_adminq_exit; 408 409 /* allocate buffers in the rings */ 410 ret_code = i40e_alloc_asq_bufs(hw); 411 if (ret_code != I40E_SUCCESS) 412 goto init_adminq_free_rings; 413 414 /* initialize base registers */ 415 ret_code = i40e_config_asq_regs(hw); 416 if (ret_code != I40E_SUCCESS) 417 goto init_adminq_free_rings; 418 419 /* success! */ 420 hw->aq.asq.count = hw->aq.num_asq_entries; 421 goto init_adminq_exit; 422 423 init_adminq_free_rings: 424 i40e_free_adminq_asq(hw); 425 426 init_adminq_exit: 427 return ret_code; 428 } 429 430 /** 431 * i40e_init_arq - initialize ARQ 432 * @hw: pointer to the hardware structure 433 * 434 * The main initialization routine for the Admin Receive (Event) Queue. 435 * Prior to calling this function, drivers *MUST* set the following fields 436 * in the hw->aq structure: 437 * - hw->aq.num_asq_entries 438 * - hw->aq.arq_buf_size 439 * 440 * Do *NOT* hold the lock when calling this as the memory allocation routines 441 * called are not going to be atomic context safe 442 **/ 443 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw) 444 { 445 enum i40e_status_code ret_code = I40E_SUCCESS; 446 447 if (hw->aq.arq.count > 0) { 448 /* queue already initialized */ 449 ret_code = I40E_ERR_NOT_READY; 450 goto init_adminq_exit; 451 } 452 453 /* verify input for valid configuration */ 454 if ((hw->aq.num_arq_entries == 0) || 455 (hw->aq.arq_buf_size == 0)) { 456 ret_code = I40E_ERR_CONFIG; 457 goto init_adminq_exit; 458 } 459 460 hw->aq.arq.next_to_use = 0; 461 hw->aq.arq.next_to_clean = 0; 462 463 /* allocate the ring memory */ 464 ret_code = i40e_alloc_adminq_arq_ring(hw); 465 if (ret_code != I40E_SUCCESS) 466 goto init_adminq_exit; 467 468 /* allocate buffers in the rings */ 469 ret_code = i40e_alloc_arq_bufs(hw); 470 if (ret_code != I40E_SUCCESS) 471 goto init_adminq_free_rings; 472 473 /* initialize base registers */ 474 ret_code = i40e_config_arq_regs(hw); 475 if (ret_code != I40E_SUCCESS) 476 goto init_adminq_free_rings; 477 478 /* success! */ 479 hw->aq.arq.count = hw->aq.num_arq_entries; 480 goto init_adminq_exit; 481 482 init_adminq_free_rings: 483 i40e_free_adminq_arq(hw); 484 485 init_adminq_exit: 486 return ret_code; 487 } 488 489 /** 490 * i40e_shutdown_asq - shutdown the ASQ 491 * @hw: pointer to the hardware structure 492 * 493 * The main shutdown routine for the Admin Send Queue 494 **/ 495 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw) 496 { 497 enum i40e_status_code ret_code = I40E_SUCCESS; 498 499 i40e_acquire_spinlock(&hw->aq.asq_spinlock); 500 501 if (hw->aq.asq.count == 0) { 502 ret_code = I40E_ERR_NOT_READY; 503 goto shutdown_asq_out; 504 } 505 506 /* Stop firmware AdminQ processing */ 507 wr32(hw, hw->aq.asq.head, 0); 508 wr32(hw, hw->aq.asq.tail, 0); 509 wr32(hw, hw->aq.asq.len, 0); 510 wr32(hw, hw->aq.asq.bal, 0); 511 wr32(hw, hw->aq.asq.bah, 0); 512 513 hw->aq.asq.count = 0; /* to indicate uninitialized queue */ 514 515 /* free ring buffers */ 516 i40e_free_asq_bufs(hw); 517 518 shutdown_asq_out: 519 i40e_release_spinlock(&hw->aq.asq_spinlock); 520 return ret_code; 521 } 522 523 /** 524 * i40e_shutdown_arq - shutdown ARQ 525 * @hw: pointer to the hardware structure 526 * 527 * The main shutdown routine for the Admin Receive Queue 528 **/ 529 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw) 530 { 531 enum i40e_status_code ret_code = I40E_SUCCESS; 532 533 i40e_acquire_spinlock(&hw->aq.arq_spinlock); 534 535 if (hw->aq.arq.count == 0) { 536 ret_code = I40E_ERR_NOT_READY; 537 goto shutdown_arq_out; 538 } 539 540 /* Stop firmware AdminQ processing */ 541 wr32(hw, hw->aq.arq.head, 0); 542 wr32(hw, hw->aq.arq.tail, 0); 543 wr32(hw, hw->aq.arq.len, 0); 544 wr32(hw, hw->aq.arq.bal, 0); 545 wr32(hw, hw->aq.arq.bah, 0); 546 547 hw->aq.arq.count = 0; /* to indicate uninitialized queue */ 548 549 /* free ring buffers */ 550 i40e_free_arq_bufs(hw); 551 552 shutdown_arq_out: 553 i40e_release_spinlock(&hw->aq.arq_spinlock); 554 return ret_code; 555 } 556 557 /** 558 * i40e_resume_aq - resume AQ processing from 0 559 * @hw: pointer to the hardware structure 560 **/ 561 static void i40e_resume_aq(struct i40e_hw *hw) 562 { 563 /* Registers are reset after PF reset */ 564 hw->aq.asq.next_to_use = 0; 565 hw->aq.asq.next_to_clean = 0; 566 567 i40e_config_asq_regs(hw); 568 569 hw->aq.arq.next_to_use = 0; 570 hw->aq.arq.next_to_clean = 0; 571 572 i40e_config_arq_regs(hw); 573 } 574 575 /** 576 * i40e_init_adminq - main initialization routine for Admin Queue 577 * @hw: pointer to the hardware structure 578 * 579 * Prior to calling this function, drivers *MUST* set the following fields 580 * in the hw->aq structure: 581 * - hw->aq.num_asq_entries 582 * - hw->aq.num_arq_entries 583 * - hw->aq.arq_buf_size 584 * - hw->aq.asq_buf_size 585 **/ 586 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) 587 { 588 u16 cfg_ptr, oem_hi, oem_lo; 589 u16 eetrack_lo, eetrack_hi; 590 enum i40e_status_code ret_code; 591 int retry = 0; 592 593 /* verify input for valid configuration */ 594 if ((hw->aq.num_arq_entries == 0) || 595 (hw->aq.num_asq_entries == 0) || 596 (hw->aq.arq_buf_size == 0) || 597 (hw->aq.asq_buf_size == 0)) { 598 ret_code = I40E_ERR_CONFIG; 599 goto init_adminq_exit; 600 } 601 i40e_init_spinlock(&hw->aq.asq_spinlock); 602 i40e_init_spinlock(&hw->aq.arq_spinlock); 603 604 /* Set up register offsets */ 605 i40e_adminq_init_regs(hw); 606 607 /* setup ASQ command write back timeout */ 608 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; 609 610 /* allocate the ASQ */ 611 ret_code = i40e_init_asq(hw); 612 if (ret_code != I40E_SUCCESS) 613 goto init_adminq_destroy_spinlocks; 614 615 /* allocate the ARQ */ 616 ret_code = i40e_init_arq(hw); 617 if (ret_code != I40E_SUCCESS) 618 goto init_adminq_free_asq; 619 620 /* VF has no need of firmware */ 621 if (i40e_is_vf(hw)) 622 goto init_adminq_exit; 623 /* There are some cases where the firmware may not be quite ready 624 * for AdminQ operations, so we retry the AdminQ setup a few times 625 * if we see timeouts in this first AQ call. 626 */ 627 do { 628 ret_code = i40e_aq_get_firmware_version(hw, 629 &hw->aq.fw_maj_ver, 630 &hw->aq.fw_min_ver, 631 &hw->aq.fw_build, 632 &hw->aq.api_maj_ver, 633 &hw->aq.api_min_ver, 634 NULL); 635 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) 636 break; 637 retry++; 638 i40e_msec_delay(100); 639 i40e_resume_aq(hw); 640 } while (retry < 10); 641 if (ret_code != I40E_SUCCESS) 642 goto init_adminq_free_arq; 643 644 /* get the NVM version info */ 645 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, 646 &hw->nvm.version); 647 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 648 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 649 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 650 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 651 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), 652 &oem_hi); 653 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), 654 &oem_lo); 655 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; 656 657 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { 658 ret_code = I40E_ERR_FIRMWARE_API_VERSION; 659 goto init_adminq_free_arq; 660 } 661 662 /* pre-emptive resource lock release */ 663 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 664 hw->aq.nvm_release_on_done = FALSE; 665 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 666 667 ret_code = i40e_aq_set_hmc_resource_profile(hw, 668 I40E_HMC_PROFILE_DEFAULT, 669 0, 670 NULL); 671 ret_code = I40E_SUCCESS; 672 673 /* success! */ 674 goto init_adminq_exit; 675 676 init_adminq_free_arq: 677 i40e_shutdown_arq(hw); 678 init_adminq_free_asq: 679 i40e_shutdown_asq(hw); 680 init_adminq_destroy_spinlocks: 681 i40e_destroy_spinlock(&hw->aq.asq_spinlock); 682 i40e_destroy_spinlock(&hw->aq.arq_spinlock); 683 684 init_adminq_exit: 685 return ret_code; 686 } 687 688 /** 689 * i40e_shutdown_adminq - shutdown routine for the Admin Queue 690 * @hw: pointer to the hardware structure 691 **/ 692 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) 693 { 694 enum i40e_status_code ret_code = I40E_SUCCESS; 695 696 if (i40e_check_asq_alive(hw)) 697 i40e_aq_queue_shutdown(hw, TRUE); 698 699 i40e_shutdown_asq(hw); 700 i40e_shutdown_arq(hw); 701 i40e_destroy_spinlock(&hw->aq.asq_spinlock); 702 i40e_destroy_spinlock(&hw->aq.arq_spinlock); 703 704 if (hw->nvm_buff.va) 705 i40e_free_virt_mem(hw, &hw->nvm_buff); 706 707 return ret_code; 708 } 709 710 /** 711 * i40e_clean_asq - cleans Admin send queue 712 * @hw: pointer to the hardware structure 713 * 714 * returns the number of free desc 715 **/ 716 u16 i40e_clean_asq(struct i40e_hw *hw) 717 { 718 struct i40e_adminq_ring *asq = &(hw->aq.asq); 719 struct i40e_asq_cmd_details *details; 720 u16 ntc = asq->next_to_clean; 721 struct i40e_aq_desc desc_cb; 722 struct i40e_aq_desc *desc; 723 724 desc = I40E_ADMINQ_DESC(*asq, ntc); 725 details = I40E_ADMINQ_DETAILS(*asq, ntc); 726 while (rd32(hw, hw->aq.asq.head) != ntc) { 727 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 728 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); 729 730 if (details->callback) { 731 I40E_ADMINQ_CALLBACK cb_func = 732 (I40E_ADMINQ_CALLBACK)details->callback; 733 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc), 734 I40E_DMA_TO_DMA); 735 cb_func(hw, &desc_cb); 736 } 737 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); 738 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); 739 ntc++; 740 if (ntc == asq->count) 741 ntc = 0; 742 desc = I40E_ADMINQ_DESC(*asq, ntc); 743 details = I40E_ADMINQ_DETAILS(*asq, ntc); 744 } 745 746 asq->next_to_clean = ntc; 747 748 return I40E_DESC_UNUSED(asq); 749 } 750 751 /** 752 * i40e_asq_done - check if FW has processed the Admin Send Queue 753 * @hw: pointer to the hw struct 754 * 755 * Returns TRUE if the firmware has processed all descriptors on the 756 * admin send queue. Returns FALSE if there are still requests pending. 757 **/ 758 bool i40e_asq_done(struct i40e_hw *hw) 759 { 760 /* AQ designers suggest use of head for better 761 * timing reliability than DD bit 762 */ 763 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; 764 765 } 766 767 /** 768 * i40e_asq_send_command - send command to Admin Queue 769 * @hw: pointer to the hw struct 770 * @desc: prefilled descriptor describing the command (non DMA mem) 771 * @buff: buffer to use for indirect commands 772 * @buff_size: size of buffer for indirect commands 773 * @cmd_details: pointer to command details structure 774 * 775 * This is the main send command driver routine for the Admin Queue send 776 * queue. It runs the queue, cleans the queue, etc 777 **/ 778 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, 779 struct i40e_aq_desc *desc, 780 void *buff, /* can be NULL */ 781 u16 buff_size, 782 struct i40e_asq_cmd_details *cmd_details) 783 { 784 enum i40e_status_code status = I40E_SUCCESS; 785 struct i40e_dma_mem *dma_buff = NULL; 786 struct i40e_asq_cmd_details *details; 787 struct i40e_aq_desc *desc_on_ring; 788 bool cmd_completed = FALSE; 789 u16 retval = 0; 790 u32 val = 0; 791 792 i40e_acquire_spinlock(&hw->aq.asq_spinlock); 793 794 hw->aq.asq_last_status = I40E_AQ_RC_OK; 795 796 if (hw->aq.asq.count == 0) { 797 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 798 "AQTX: Admin queue not initialized.\n"); 799 status = I40E_ERR_QUEUE_EMPTY; 800 goto asq_send_command_error; 801 } 802 803 val = rd32(hw, hw->aq.asq.head); 804 if (val >= hw->aq.num_asq_entries) { 805 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 806 "AQTX: head overrun at %d\n", val); 807 status = I40E_ERR_QUEUE_EMPTY; 808 goto asq_send_command_error; 809 } 810 811 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 812 if (cmd_details) { 813 i40e_memcpy(details, 814 cmd_details, 815 sizeof(struct i40e_asq_cmd_details), 816 I40E_NONDMA_TO_NONDMA); 817 818 /* If the cmd_details are defined copy the cookie. The 819 * CPU_TO_LE32 is not needed here because the data is ignored 820 * by the FW, only used by the driver 821 */ 822 if (details->cookie) { 823 desc->cookie_high = 824 CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); 825 desc->cookie_low = 826 CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); 827 } 828 } else { 829 i40e_memset(details, 0, 830 sizeof(struct i40e_asq_cmd_details), 831 I40E_NONDMA_MEM); 832 } 833 834 /* clear requested flags and then set additional flags if defined */ 835 desc->flags &= ~CPU_TO_LE16(details->flags_dis); 836 desc->flags |= CPU_TO_LE16(details->flags_ena); 837 838 if (buff_size > hw->aq.asq_buf_size) { 839 i40e_debug(hw, 840 I40E_DEBUG_AQ_MESSAGE, 841 "AQTX: Invalid buffer size: %d.\n", 842 buff_size); 843 status = I40E_ERR_INVALID_SIZE; 844 goto asq_send_command_error; 845 } 846 847 if (details->postpone && !details->async) { 848 i40e_debug(hw, 849 I40E_DEBUG_AQ_MESSAGE, 850 "AQTX: Async flag not set along with postpone flag"); 851 status = I40E_ERR_PARAM; 852 goto asq_send_command_error; 853 } 854 855 /* call clean and check queue available function to reclaim the 856 * descriptors that were processed by FW, the function returns the 857 * number of desc available 858 */ 859 /* the clean function called here could be called in a separate thread 860 * in case of asynchronous completions 861 */ 862 if (i40e_clean_asq(hw) == 0) { 863 i40e_debug(hw, 864 I40E_DEBUG_AQ_MESSAGE, 865 "AQTX: Error queue is full.\n"); 866 status = I40E_ERR_ADMIN_QUEUE_FULL; 867 goto asq_send_command_error; 868 } 869 870 /* initialize the temp desc pointer with the right desc */ 871 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 872 873 /* if the desc is available copy the temp desc to the right place */ 874 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), 875 I40E_NONDMA_TO_DMA); 876 877 /* if buff is not NULL assume indirect command */ 878 if (buff != NULL) { 879 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); 880 /* copy the user buff into the respective DMA buff */ 881 i40e_memcpy(dma_buff->va, buff, buff_size, 882 I40E_NONDMA_TO_DMA); 883 desc_on_ring->datalen = CPU_TO_LE16(buff_size); 884 885 /* Update the address values in the desc with the pa value 886 * for respective buffer 887 */ 888 desc_on_ring->params.external.addr_high = 889 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); 890 desc_on_ring->params.external.addr_low = 891 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); 892 } 893 894 /* bump the tail */ 895 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); 896 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, 897 buff, buff_size); 898 (hw->aq.asq.next_to_use)++; 899 if (hw->aq.asq.next_to_use == hw->aq.asq.count) 900 hw->aq.asq.next_to_use = 0; 901 if (!details->postpone) 902 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); 903 904 /* if cmd_details are not defined or async flag is not set, 905 * we need to wait for desc write back 906 */ 907 if (!details->async && !details->postpone) { 908 u32 total_delay = 0; 909 910 do { 911 /* AQ designers suggest use of head for better 912 * timing reliability than DD bit 913 */ 914 if (i40e_asq_done(hw)) 915 break; 916 i40e_msec_delay(1); 917 total_delay++; 918 } while (total_delay < hw->aq.asq_cmd_timeout); 919 } 920 921 /* if ready, copy the desc back to temp */ 922 if (i40e_asq_done(hw)) { 923 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), 924 I40E_DMA_TO_NONDMA); 925 if (buff != NULL) 926 i40e_memcpy(buff, dma_buff->va, buff_size, 927 I40E_DMA_TO_NONDMA); 928 retval = LE16_TO_CPU(desc->retval); 929 if (retval != 0) { 930 i40e_debug(hw, 931 I40E_DEBUG_AQ_MESSAGE, 932 "AQTX: Command completed with error 0x%X.\n", 933 retval); 934 935 /* strip off FW internal code */ 936 retval &= 0xff; 937 } 938 cmd_completed = TRUE; 939 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) 940 status = I40E_SUCCESS; 941 else 942 status = I40E_ERR_ADMIN_QUEUE_ERROR; 943 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 944 } 945 946 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 947 "AQTX: desc and buffer writeback:\n"); 948 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); 949 950 /* save writeback aq if requested */ 951 if (details->wb_desc) 952 i40e_memcpy(details->wb_desc, desc_on_ring, 953 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); 954 955 /* update the error if time out occurred */ 956 if ((!cmd_completed) && 957 (!details->async && !details->postpone)) { 958 i40e_debug(hw, 959 I40E_DEBUG_AQ_MESSAGE, 960 "AQTX: Writeback timeout.\n"); 961 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; 962 } 963 964 asq_send_command_error: 965 i40e_release_spinlock(&hw->aq.asq_spinlock); 966 return status; 967 } 968 969 /** 970 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function 971 * @desc: pointer to the temp descriptor (non DMA mem) 972 * @opcode: the opcode can be used to decide which flags to turn off or on 973 * 974 * Fill the desc with default values 975 **/ 976 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 977 u16 opcode) 978 { 979 /* zero out the desc */ 980 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), 981 I40E_NONDMA_MEM); 982 desc->opcode = CPU_TO_LE16(opcode); 983 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); 984 } 985 986 /** 987 * i40e_clean_arq_element 988 * @hw: pointer to the hw struct 989 * @e: event info from the receive descriptor, includes any buffers 990 * @pending: number of events that could be left to process 991 * 992 * This function cleans one Admin Receive Queue element and returns 993 * the contents through e. It can also return how many events are 994 * left to process through 'pending' 995 **/ 996 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, 997 struct i40e_arq_event_info *e, 998 u16 *pending) 999 { 1000 enum i40e_status_code ret_code = I40E_SUCCESS; 1001 u16 ntc = hw->aq.arq.next_to_clean; 1002 struct i40e_aq_desc *desc; 1003 struct i40e_dma_mem *bi; 1004 u16 desc_idx; 1005 u16 datalen; 1006 u16 flags; 1007 u16 ntu; 1008 1009 /* pre-clean the event info */ 1010 i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM); 1011 1012 /* take the lock before we start messing with the ring */ 1013 i40e_acquire_spinlock(&hw->aq.arq_spinlock); 1014 1015 if (hw->aq.arq.count == 0) { 1016 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 1017 "AQRX: Admin queue not initialized.\n"); 1018 ret_code = I40E_ERR_QUEUE_EMPTY; 1019 goto clean_arq_element_err; 1020 } 1021 1022 /* set next_to_use to head */ 1023 if (!i40e_is_vf(hw)) 1024 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); 1025 if (i40e_is_vf(hw)) 1026 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); 1027 if (ntu == ntc) { 1028 /* nothing to do - shouldn't need to update ring's values */ 1029 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; 1030 goto clean_arq_element_out; 1031 } 1032 1033 /* now clean the next descriptor */ 1034 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); 1035 desc_idx = ntc; 1036 1037 flags = LE16_TO_CPU(desc->flags); 1038 if (flags & I40E_AQ_FLAG_ERR) { 1039 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 1040 hw->aq.arq_last_status = 1041 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); 1042 i40e_debug(hw, 1043 I40E_DEBUG_AQ_MESSAGE, 1044 "AQRX: Event received with error 0x%X.\n", 1045 hw->aq.arq_last_status); 1046 } 1047 1048 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), 1049 I40E_DMA_TO_NONDMA); 1050 datalen = LE16_TO_CPU(desc->datalen); 1051 e->msg_len = min(datalen, e->buf_len); 1052 if (e->msg_buf != NULL && (e->msg_len != 0)) 1053 i40e_memcpy(e->msg_buf, 1054 hw->aq.arq.r.arq_bi[desc_idx].va, 1055 e->msg_len, I40E_DMA_TO_NONDMA); 1056 1057 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 1058 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 1059 hw->aq.arq_buf_size); 1060 1061 /* Restore the original datalen and buffer address in the desc, 1062 * FW updates datalen to indicate the event message 1063 * size 1064 */ 1065 bi = &hw->aq.arq.r.arq_bi[ntc]; 1066 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); 1067 1068 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 1069 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 1070 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 1071 desc->datalen = CPU_TO_LE16((u16)bi->size); 1072 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 1073 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 1074 1075 /* set tail = the last cleaned desc index. */ 1076 wr32(hw, hw->aq.arq.tail, ntc); 1077 /* ntc is updated to tail + 1 */ 1078 ntc++; 1079 if (ntc == hw->aq.num_arq_entries) 1080 ntc = 0; 1081 hw->aq.arq.next_to_clean = ntc; 1082 hw->aq.arq.next_to_use = ntu; 1083 1084 if (i40e_is_nvm_update_op(&e->desc)) { 1085 if (hw->aq.nvm_release_on_done) { 1086 i40e_release_nvm(hw); 1087 hw->aq.nvm_release_on_done = FALSE; 1088 } 1089 1090 switch (hw->nvmupd_state) { 1091 case I40E_NVMUPD_STATE_INIT_WAIT: 1092 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1093 break; 1094 1095 case I40E_NVMUPD_STATE_WRITE_WAIT: 1096 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; 1097 break; 1098 1099 default: 1100 break; 1101 } 1102 } 1103 1104 clean_arq_element_out: 1105 /* Set pending if needed, unlock and return */ 1106 if (pending != NULL) 1107 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1108 clean_arq_element_err: 1109 i40e_release_spinlock(&hw->aq.arq_spinlock); 1110 1111 return ret_code; 1112 } 1113 1114