1 /****************************************************************************** 2 3 Copyright (c) 2013-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "i40e_status.h" 36 #include "i40e_type.h" 37 #include "i40e_register.h" 38 #include "i40e_adminq.h" 39 #include "i40e_prototype.h" 40 41 /** 42 * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation 43 * @desc: API request descriptor 44 **/ 45 static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) 46 { 47 return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) || 48 desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update)); 49 } 50 51 /** 52 * i40e_adminq_init_regs - Initialize AdminQ registers 53 * @hw: pointer to the hardware structure 54 * 55 * This assumes the alloc_asq and alloc_arq functions have already been called 56 **/ 57 static void i40e_adminq_init_regs(struct i40e_hw *hw) 58 { 59 /* set head and tail registers in our local struct */ 60 if (i40e_is_vf(hw)) { 61 hw->aq.asq.tail = I40E_VF_ATQT1; 62 hw->aq.asq.head = I40E_VF_ATQH1; 63 hw->aq.asq.len = I40E_VF_ATQLEN1; 64 hw->aq.asq.bal = I40E_VF_ATQBAL1; 65 hw->aq.asq.bah = I40E_VF_ATQBAH1; 66 hw->aq.arq.tail = I40E_VF_ARQT1; 67 hw->aq.arq.head = I40E_VF_ARQH1; 68 hw->aq.arq.len = I40E_VF_ARQLEN1; 69 hw->aq.arq.bal = I40E_VF_ARQBAL1; 70 hw->aq.arq.bah = I40E_VF_ARQBAH1; 71 } else { 72 hw->aq.asq.tail = I40E_PF_ATQT; 73 hw->aq.asq.head = I40E_PF_ATQH; 74 hw->aq.asq.len = I40E_PF_ATQLEN; 75 hw->aq.asq.bal = I40E_PF_ATQBAL; 76 hw->aq.asq.bah = I40E_PF_ATQBAH; 77 hw->aq.arq.tail = I40E_PF_ARQT; 78 hw->aq.arq.head = I40E_PF_ARQH; 79 hw->aq.arq.len = I40E_PF_ARQLEN; 80 hw->aq.arq.bal = I40E_PF_ARQBAL; 81 hw->aq.arq.bah = I40E_PF_ARQBAH; 82 } 83 } 84 85 /** 86 * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings 87 * @hw: pointer to the hardware structure 88 **/ 89 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) 90 { 91 enum i40e_status_code ret_code; 92 93 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, 94 i40e_mem_atq_ring, 95 (hw->aq.num_asq_entries * 96 sizeof(struct i40e_aq_desc)), 97 I40E_ADMINQ_DESC_ALIGNMENT); 98 if (ret_code) 99 return ret_code; 100 101 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, 102 (hw->aq.num_asq_entries * 103 sizeof(struct i40e_asq_cmd_details))); 104 if (ret_code) { 105 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 106 return ret_code; 107 } 108 109 return ret_code; 110 } 111 112 /** 113 * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings 114 * @hw: pointer to the hardware structure 115 **/ 116 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) 117 { 118 enum i40e_status_code ret_code; 119 120 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, 121 i40e_mem_arq_ring, 122 (hw->aq.num_arq_entries * 123 sizeof(struct i40e_aq_desc)), 124 I40E_ADMINQ_DESC_ALIGNMENT); 125 126 return ret_code; 127 } 128 129 /** 130 * i40e_free_adminq_asq - Free Admin Queue send rings 131 * @hw: pointer to the hardware structure 132 * 133 * This assumes the posted send buffers have already been cleaned 134 * and de-allocated 135 **/ 136 void i40e_free_adminq_asq(struct i40e_hw *hw) 137 { 138 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 139 } 140 141 /** 142 * i40e_free_adminq_arq - Free Admin Queue receive rings 143 * @hw: pointer to the hardware structure 144 * 145 * This assumes the posted receive buffers have already been cleaned 146 * and de-allocated 147 **/ 148 void i40e_free_adminq_arq(struct i40e_hw *hw) 149 { 150 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 151 } 152 153 /** 154 * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue 155 * @hw: pointer to the hardware structure 156 **/ 157 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) 158 { 159 enum i40e_status_code ret_code; 160 struct i40e_aq_desc *desc; 161 struct i40e_dma_mem *bi; 162 int i; 163 164 /* We'll be allocating the buffer info memory first, then we can 165 * allocate the mapped buffers for the event processing 166 */ 167 168 /* buffer_info structures do not need alignment */ 169 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, 170 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); 171 if (ret_code) 172 goto alloc_arq_bufs; 173 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; 174 175 /* allocate the mapped buffers */ 176 for (i = 0; i < hw->aq.num_arq_entries; i++) { 177 bi = &hw->aq.arq.r.arq_bi[i]; 178 ret_code = i40e_allocate_dma_mem(hw, bi, 179 i40e_mem_arq_buf, 180 hw->aq.arq_buf_size, 181 I40E_ADMINQ_DESC_ALIGNMENT); 182 if (ret_code) 183 goto unwind_alloc_arq_bufs; 184 185 /* now configure the descriptors for use */ 186 desc = I40E_ADMINQ_DESC(hw->aq.arq, i); 187 188 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 189 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 190 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 191 desc->opcode = 0; 192 /* This is in accordance with Admin queue design, there is no 193 * register for buffer size configuration 194 */ 195 desc->datalen = CPU_TO_LE16((u16)bi->size); 196 desc->retval = 0; 197 desc->cookie_high = 0; 198 desc->cookie_low = 0; 199 desc->params.external.addr_high = 200 CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 201 desc->params.external.addr_low = 202 CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 203 desc->params.external.param0 = 0; 204 desc->params.external.param1 = 0; 205 } 206 207 alloc_arq_bufs: 208 return ret_code; 209 210 unwind_alloc_arq_bufs: 211 /* don't try to free the one that failed... */ 212 i--; 213 for (; i >= 0; i--) 214 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 215 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 216 217 return ret_code; 218 } 219 220 /** 221 * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue 222 * @hw: pointer to the hardware structure 223 **/ 224 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw) 225 { 226 enum i40e_status_code ret_code; 227 struct i40e_dma_mem *bi; 228 int i; 229 230 /* No mapped memory needed yet, just the buffer info structures */ 231 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, 232 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); 233 if (ret_code) 234 goto alloc_asq_bufs; 235 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; 236 237 /* allocate the mapped buffers */ 238 for (i = 0; i < hw->aq.num_asq_entries; i++) { 239 bi = &hw->aq.asq.r.asq_bi[i]; 240 ret_code = i40e_allocate_dma_mem(hw, bi, 241 i40e_mem_asq_buf, 242 hw->aq.asq_buf_size, 243 I40E_ADMINQ_DESC_ALIGNMENT); 244 if (ret_code) 245 goto unwind_alloc_asq_bufs; 246 } 247 alloc_asq_bufs: 248 return ret_code; 249 250 unwind_alloc_asq_bufs: 251 /* don't try to free the one that failed... */ 252 i--; 253 for (; i >= 0; i--) 254 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 255 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 256 257 return ret_code; 258 } 259 260 /** 261 * i40e_free_arq_bufs - Free receive queue buffer info elements 262 * @hw: pointer to the hardware structure 263 **/ 264 static void i40e_free_arq_bufs(struct i40e_hw *hw) 265 { 266 int i; 267 268 /* free descriptors */ 269 for (i = 0; i < hw->aq.num_arq_entries; i++) 270 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 271 272 /* free the descriptor memory */ 273 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 274 275 /* free the dma header */ 276 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 277 } 278 279 /** 280 * i40e_free_asq_bufs - Free send queue buffer info elements 281 * @hw: pointer to the hardware structure 282 **/ 283 static void i40e_free_asq_bufs(struct i40e_hw *hw) 284 { 285 int i; 286 287 /* only unmap if the address is non-NULL */ 288 for (i = 0; i < hw->aq.num_asq_entries; i++) 289 if (hw->aq.asq.r.asq_bi[i].pa) 290 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 291 292 /* free the buffer info list */ 293 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 294 295 /* free the descriptor memory */ 296 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 297 298 /* free the dma header */ 299 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 300 } 301 302 /** 303 * i40e_config_asq_regs - configure ASQ registers 304 * @hw: pointer to the hardware structure 305 * 306 * Configure base address and length registers for the transmit queue 307 **/ 308 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw) 309 { 310 enum i40e_status_code ret_code = I40E_SUCCESS; 311 u32 reg = 0; 312 313 /* Clear Head and Tail */ 314 wr32(hw, hw->aq.asq.head, 0); 315 wr32(hw, hw->aq.asq.tail, 0); 316 317 /* set starting point */ 318 if (!i40e_is_vf(hw)) 319 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 320 I40E_PF_ATQLEN_ATQENABLE_MASK)); 321 if (i40e_is_vf(hw)) 322 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 323 I40E_VF_ATQLEN1_ATQENABLE_MASK)); 324 wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)); 325 wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa)); 326 327 /* Check one register to verify that config was applied */ 328 reg = rd32(hw, hw->aq.asq.bal); 329 if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)) 330 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 331 332 return ret_code; 333 } 334 335 /** 336 * i40e_config_arq_regs - ARQ register configuration 337 * @hw: pointer to the hardware structure 338 * 339 * Configure base address and length registers for the receive (event queue) 340 **/ 341 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw) 342 { 343 enum i40e_status_code ret_code = I40E_SUCCESS; 344 u32 reg = 0; 345 346 /* Clear Head and Tail */ 347 wr32(hw, hw->aq.arq.head, 0); 348 wr32(hw, hw->aq.arq.tail, 0); 349 350 /* set starting point */ 351 if (!i40e_is_vf(hw)) 352 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 353 I40E_PF_ARQLEN_ARQENABLE_MASK)); 354 if (i40e_is_vf(hw)) 355 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 356 I40E_VF_ARQLEN1_ARQENABLE_MASK)); 357 wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)); 358 wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa)); 359 360 /* Update tail in the HW to post pre-allocated buffers */ 361 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 362 363 /* Check one register to verify that config was applied */ 364 reg = rd32(hw, hw->aq.arq.bal); 365 if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)) 366 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 367 368 return ret_code; 369 } 370 371 /** 372 * i40e_init_asq - main initialization routine for ASQ 373 * @hw: pointer to the hardware structure 374 * 375 * This is the main initialization routine for the Admin Send Queue 376 * Prior to calling this function, drivers *MUST* set the following fields 377 * in the hw->aq structure: 378 * - hw->aq.num_asq_entries 379 * - hw->aq.arq_buf_size 380 * 381 * Do *NOT* hold the lock when calling this as the memory allocation routines 382 * called are not going to be atomic context safe 383 **/ 384 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw) 385 { 386 enum i40e_status_code ret_code = I40E_SUCCESS; 387 388 if (hw->aq.asq.count > 0) { 389 /* queue already initialized */ 390 ret_code = I40E_ERR_NOT_READY; 391 goto init_adminq_exit; 392 } 393 394 /* verify input for valid configuration */ 395 if ((hw->aq.num_asq_entries == 0) || 396 (hw->aq.asq_buf_size == 0)) { 397 ret_code = I40E_ERR_CONFIG; 398 goto init_adminq_exit; 399 } 400 401 hw->aq.asq.next_to_use = 0; 402 hw->aq.asq.next_to_clean = 0; 403 404 /* allocate the ring memory */ 405 ret_code = i40e_alloc_adminq_asq_ring(hw); 406 if (ret_code != I40E_SUCCESS) 407 goto init_adminq_exit; 408 409 /* allocate buffers in the rings */ 410 ret_code = i40e_alloc_asq_bufs(hw); 411 if (ret_code != I40E_SUCCESS) 412 goto init_adminq_free_rings; 413 414 /* initialize base registers */ 415 ret_code = i40e_config_asq_regs(hw); 416 if (ret_code != I40E_SUCCESS) 417 goto init_adminq_free_rings; 418 419 /* success! */ 420 hw->aq.asq.count = hw->aq.num_asq_entries; 421 goto init_adminq_exit; 422 423 init_adminq_free_rings: 424 i40e_free_adminq_asq(hw); 425 426 init_adminq_exit: 427 return ret_code; 428 } 429 430 /** 431 * i40e_init_arq - initialize ARQ 432 * @hw: pointer to the hardware structure 433 * 434 * The main initialization routine for the Admin Receive (Event) Queue. 435 * Prior to calling this function, drivers *MUST* set the following fields 436 * in the hw->aq structure: 437 * - hw->aq.num_asq_entries 438 * - hw->aq.arq_buf_size 439 * 440 * Do *NOT* hold the lock when calling this as the memory allocation routines 441 * called are not going to be atomic context safe 442 **/ 443 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw) 444 { 445 enum i40e_status_code ret_code = I40E_SUCCESS; 446 447 if (hw->aq.arq.count > 0) { 448 /* queue already initialized */ 449 ret_code = I40E_ERR_NOT_READY; 450 goto init_adminq_exit; 451 } 452 453 /* verify input for valid configuration */ 454 if ((hw->aq.num_arq_entries == 0) || 455 (hw->aq.arq_buf_size == 0)) { 456 ret_code = I40E_ERR_CONFIG; 457 goto init_adminq_exit; 458 } 459 460 hw->aq.arq.next_to_use = 0; 461 hw->aq.arq.next_to_clean = 0; 462 463 /* allocate the ring memory */ 464 ret_code = i40e_alloc_adminq_arq_ring(hw); 465 if (ret_code != I40E_SUCCESS) 466 goto init_adminq_exit; 467 468 /* allocate buffers in the rings */ 469 ret_code = i40e_alloc_arq_bufs(hw); 470 if (ret_code != I40E_SUCCESS) 471 goto init_adminq_free_rings; 472 473 /* initialize base registers */ 474 ret_code = i40e_config_arq_regs(hw); 475 if (ret_code != I40E_SUCCESS) 476 goto init_adminq_free_rings; 477 478 /* success! */ 479 hw->aq.arq.count = hw->aq.num_arq_entries; 480 goto init_adminq_exit; 481 482 init_adminq_free_rings: 483 i40e_free_adminq_arq(hw); 484 485 init_adminq_exit: 486 return ret_code; 487 } 488 489 /** 490 * i40e_shutdown_asq - shutdown the ASQ 491 * @hw: pointer to the hardware structure 492 * 493 * The main shutdown routine for the Admin Send Queue 494 **/ 495 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw) 496 { 497 enum i40e_status_code ret_code = I40E_SUCCESS; 498 499 i40e_acquire_spinlock(&hw->aq.asq_spinlock); 500 501 if (hw->aq.asq.count == 0) { 502 ret_code = I40E_ERR_NOT_READY; 503 goto shutdown_asq_out; 504 } 505 506 /* Stop firmware AdminQ processing */ 507 wr32(hw, hw->aq.asq.head, 0); 508 wr32(hw, hw->aq.asq.tail, 0); 509 wr32(hw, hw->aq.asq.len, 0); 510 wr32(hw, hw->aq.asq.bal, 0); 511 wr32(hw, hw->aq.asq.bah, 0); 512 513 hw->aq.asq.count = 0; /* to indicate uninitialized queue */ 514 515 /* free ring buffers */ 516 i40e_free_asq_bufs(hw); 517 518 shutdown_asq_out: 519 i40e_release_spinlock(&hw->aq.asq_spinlock); 520 return ret_code; 521 } 522 523 /** 524 * i40e_shutdown_arq - shutdown ARQ 525 * @hw: pointer to the hardware structure 526 * 527 * The main shutdown routine for the Admin Receive Queue 528 **/ 529 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw) 530 { 531 enum i40e_status_code ret_code = I40E_SUCCESS; 532 533 i40e_acquire_spinlock(&hw->aq.arq_spinlock); 534 535 if (hw->aq.arq.count == 0) { 536 ret_code = I40E_ERR_NOT_READY; 537 goto shutdown_arq_out; 538 } 539 540 /* Stop firmware AdminQ processing */ 541 wr32(hw, hw->aq.arq.head, 0); 542 wr32(hw, hw->aq.arq.tail, 0); 543 wr32(hw, hw->aq.arq.len, 0); 544 wr32(hw, hw->aq.arq.bal, 0); 545 wr32(hw, hw->aq.arq.bah, 0); 546 547 hw->aq.arq.count = 0; /* to indicate uninitialized queue */ 548 549 /* free ring buffers */ 550 i40e_free_arq_bufs(hw); 551 552 shutdown_arq_out: 553 i40e_release_spinlock(&hw->aq.arq_spinlock); 554 return ret_code; 555 } 556 557 /** 558 * i40e_init_adminq - main initialization routine for Admin Queue 559 * @hw: pointer to the hardware structure 560 * 561 * Prior to calling this function, drivers *MUST* set the following fields 562 * in the hw->aq structure: 563 * - hw->aq.num_asq_entries 564 * - hw->aq.num_arq_entries 565 * - hw->aq.arq_buf_size 566 * - hw->aq.asq_buf_size 567 **/ 568 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) 569 { 570 enum i40e_status_code ret_code; 571 u16 eetrack_lo, eetrack_hi; 572 u16 cfg_ptr, oem_hi, oem_lo; 573 int retry = 0; 574 /* verify input for valid configuration */ 575 if ((hw->aq.num_arq_entries == 0) || 576 (hw->aq.num_asq_entries == 0) || 577 (hw->aq.arq_buf_size == 0) || 578 (hw->aq.asq_buf_size == 0)) { 579 ret_code = I40E_ERR_CONFIG; 580 goto init_adminq_exit; 581 } 582 583 /* initialize spin locks */ 584 i40e_init_spinlock(&hw->aq.asq_spinlock); 585 i40e_init_spinlock(&hw->aq.arq_spinlock); 586 587 /* Set up register offsets */ 588 i40e_adminq_init_regs(hw); 589 590 /* setup ASQ command write back timeout */ 591 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; 592 593 /* allocate the ASQ */ 594 ret_code = i40e_init_asq(hw); 595 if (ret_code != I40E_SUCCESS) 596 goto init_adminq_destroy_spinlocks; 597 598 /* allocate the ARQ */ 599 ret_code = i40e_init_arq(hw); 600 if (ret_code != I40E_SUCCESS) 601 goto init_adminq_free_asq; 602 603 /* VF has no need of firmware */ 604 if (i40e_is_vf(hw)) 605 goto init_adminq_exit; 606 /* There are some cases where the firmware may not be quite ready 607 * for AdminQ operations, so we retry the AdminQ setup a few times 608 * if we see timeouts in this first AQ call. 609 */ 610 do { 611 ret_code = i40e_aq_get_firmware_version(hw, 612 &hw->aq.fw_maj_ver, 613 &hw->aq.fw_min_ver, 614 &hw->aq.fw_build, 615 &hw->aq.api_maj_ver, 616 &hw->aq.api_min_ver, 617 NULL); 618 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) 619 break; 620 retry++; 621 i40e_msec_delay(100); 622 i40e_resume_aq(hw); 623 } while (retry < 10); 624 if (ret_code != I40E_SUCCESS) 625 goto init_adminq_free_arq; 626 627 /* get the NVM version info */ 628 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, 629 &hw->nvm.version); 630 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 631 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 632 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 633 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 634 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), 635 &oem_hi); 636 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), 637 &oem_lo); 638 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; 639 640 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { 641 ret_code = I40E_ERR_FIRMWARE_API_VERSION; 642 goto init_adminq_free_arq; 643 } 644 645 /* pre-emptive resource lock release */ 646 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 647 hw->aq.nvm_release_on_done = FALSE; 648 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 649 650 ret_code = i40e_aq_set_hmc_resource_profile(hw, 651 I40E_HMC_PROFILE_DEFAULT, 652 0, 653 NULL); 654 ret_code = I40E_SUCCESS; 655 656 /* success! */ 657 goto init_adminq_exit; 658 659 init_adminq_free_arq: 660 i40e_shutdown_arq(hw); 661 init_adminq_free_asq: 662 i40e_shutdown_asq(hw); 663 init_adminq_destroy_spinlocks: 664 i40e_destroy_spinlock(&hw->aq.asq_spinlock); 665 i40e_destroy_spinlock(&hw->aq.arq_spinlock); 666 667 init_adminq_exit: 668 return ret_code; 669 } 670 671 /** 672 * i40e_shutdown_adminq - shutdown routine for the Admin Queue 673 * @hw: pointer to the hardware structure 674 **/ 675 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) 676 { 677 enum i40e_status_code ret_code = I40E_SUCCESS; 678 679 if (i40e_check_asq_alive(hw)) 680 i40e_aq_queue_shutdown(hw, TRUE); 681 682 i40e_shutdown_asq(hw); 683 i40e_shutdown_arq(hw); 684 685 /* destroy the spinlocks */ 686 i40e_destroy_spinlock(&hw->aq.asq_spinlock); 687 i40e_destroy_spinlock(&hw->aq.arq_spinlock); 688 689 if (hw->nvm_buff.va) 690 i40e_free_virt_mem(hw, &hw->nvm_buff); 691 692 return ret_code; 693 } 694 695 /** 696 * i40e_clean_asq - cleans Admin send queue 697 * @hw: pointer to the hardware structure 698 * 699 * returns the number of free desc 700 **/ 701 u16 i40e_clean_asq(struct i40e_hw *hw) 702 { 703 struct i40e_adminq_ring *asq = &(hw->aq.asq); 704 struct i40e_asq_cmd_details *details; 705 u16 ntc = asq->next_to_clean; 706 struct i40e_aq_desc desc_cb; 707 struct i40e_aq_desc *desc; 708 709 desc = I40E_ADMINQ_DESC(*asq, ntc); 710 details = I40E_ADMINQ_DETAILS(*asq, ntc); 711 712 while (rd32(hw, hw->aq.asq.head) != ntc) { 713 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 714 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); 715 716 if (details->callback) { 717 I40E_ADMINQ_CALLBACK cb_func = 718 (I40E_ADMINQ_CALLBACK)details->callback; 719 i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc), 720 I40E_DMA_TO_DMA); 721 cb_func(hw, &desc_cb); 722 } 723 i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); 724 i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); 725 ntc++; 726 if (ntc == asq->count) 727 ntc = 0; 728 desc = I40E_ADMINQ_DESC(*asq, ntc); 729 details = I40E_ADMINQ_DETAILS(*asq, ntc); 730 } 731 732 asq->next_to_clean = ntc; 733 734 return I40E_DESC_UNUSED(asq); 735 } 736 737 /** 738 * i40e_asq_done - check if FW has processed the Admin Send Queue 739 * @hw: pointer to the hw struct 740 * 741 * Returns TRUE if the firmware has processed all descriptors on the 742 * admin send queue. Returns FALSE if there are still requests pending. 743 **/ 744 bool i40e_asq_done(struct i40e_hw *hw) 745 { 746 /* AQ designers suggest use of head for better 747 * timing reliability than DD bit 748 */ 749 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; 750 751 } 752 753 /** 754 * i40e_asq_send_command - send command to Admin Queue 755 * @hw: pointer to the hw struct 756 * @desc: prefilled descriptor describing the command (non DMA mem) 757 * @buff: buffer to use for indirect commands 758 * @buff_size: size of buffer for indirect commands 759 * @cmd_details: pointer to command details structure 760 * 761 * This is the main send command driver routine for the Admin Queue send 762 * queue. It runs the queue, cleans the queue, etc 763 **/ 764 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, 765 struct i40e_aq_desc *desc, 766 void *buff, /* can be NULL */ 767 u16 buff_size, 768 struct i40e_asq_cmd_details *cmd_details) 769 { 770 enum i40e_status_code status = I40E_SUCCESS; 771 struct i40e_dma_mem *dma_buff = NULL; 772 struct i40e_asq_cmd_details *details; 773 struct i40e_aq_desc *desc_on_ring; 774 bool cmd_completed = FALSE; 775 u16 retval = 0; 776 u32 val = 0; 777 778 i40e_acquire_spinlock(&hw->aq.asq_spinlock); 779 780 hw->aq.asq_last_status = I40E_AQ_RC_OK; 781 782 if (hw->aq.asq.count == 0) { 783 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 784 "AQTX: Admin queue not initialized.\n"); 785 status = I40E_ERR_QUEUE_EMPTY; 786 goto asq_send_command_error; 787 } 788 789 val = rd32(hw, hw->aq.asq.head); 790 if (val >= hw->aq.num_asq_entries) { 791 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 792 "AQTX: head overrun at %d\n", val); 793 status = I40E_ERR_QUEUE_EMPTY; 794 goto asq_send_command_error; 795 } 796 797 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 798 if (cmd_details) { 799 i40e_memcpy(details, 800 cmd_details, 801 sizeof(struct i40e_asq_cmd_details), 802 I40E_NONDMA_TO_NONDMA); 803 804 /* If the cmd_details are defined copy the cookie. The 805 * CPU_TO_LE32 is not needed here because the data is ignored 806 * by the FW, only used by the driver 807 */ 808 if (details->cookie) { 809 desc->cookie_high = 810 CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); 811 desc->cookie_low = 812 CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); 813 } 814 } else { 815 i40e_memset(details, 0, 816 sizeof(struct i40e_asq_cmd_details), 817 I40E_NONDMA_MEM); 818 } 819 820 /* clear requested flags and then set additional flags if defined */ 821 desc->flags &= ~CPU_TO_LE16(details->flags_dis); 822 desc->flags |= CPU_TO_LE16(details->flags_ena); 823 824 if (buff_size > hw->aq.asq_buf_size) { 825 i40e_debug(hw, 826 I40E_DEBUG_AQ_MESSAGE, 827 "AQTX: Invalid buffer size: %d.\n", 828 buff_size); 829 status = I40E_ERR_INVALID_SIZE; 830 goto asq_send_command_error; 831 } 832 833 if (details->postpone && !details->async) { 834 i40e_debug(hw, 835 I40E_DEBUG_AQ_MESSAGE, 836 "AQTX: Async flag not set along with postpone flag"); 837 status = I40E_ERR_PARAM; 838 goto asq_send_command_error; 839 } 840 841 /* call clean and check queue available function to reclaim the 842 * descriptors that were processed by FW, the function returns the 843 * number of desc available 844 */ 845 /* the clean function called here could be called in a separate thread 846 * in case of asynchronous completions 847 */ 848 if (i40e_clean_asq(hw) == 0) { 849 i40e_debug(hw, 850 I40E_DEBUG_AQ_MESSAGE, 851 "AQTX: Error queue is full.\n"); 852 status = I40E_ERR_ADMIN_QUEUE_FULL; 853 goto asq_send_command_error; 854 } 855 856 /* initialize the temp desc pointer with the right desc */ 857 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 858 859 /* if the desc is available copy the temp desc to the right place */ 860 i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), 861 I40E_NONDMA_TO_DMA); 862 863 /* if buff is not NULL assume indirect command */ 864 if (buff != NULL) { 865 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); 866 /* copy the user buff into the respective DMA buff */ 867 i40e_memcpy(dma_buff->va, buff, buff_size, 868 I40E_NONDMA_TO_DMA); 869 desc_on_ring->datalen = CPU_TO_LE16(buff_size); 870 871 /* Update the address values in the desc with the pa value 872 * for respective buffer 873 */ 874 desc_on_ring->params.external.addr_high = 875 CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); 876 desc_on_ring->params.external.addr_low = 877 CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); 878 } 879 880 /* bump the tail */ 881 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); 882 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, 883 buff, buff_size); 884 (hw->aq.asq.next_to_use)++; 885 if (hw->aq.asq.next_to_use == hw->aq.asq.count) 886 hw->aq.asq.next_to_use = 0; 887 if (!details->postpone) 888 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); 889 890 /* if cmd_details are not defined or async flag is not set, 891 * we need to wait for desc write back 892 */ 893 if (!details->async && !details->postpone) { 894 u32 total_delay = 0; 895 896 do { 897 /* AQ designers suggest use of head for better 898 * timing reliability than DD bit 899 */ 900 if (i40e_asq_done(hw)) 901 break; 902 /* ugh! delay while spin_lock */ 903 i40e_msec_delay(1); 904 total_delay++; 905 } while (total_delay < hw->aq.asq_cmd_timeout); 906 } 907 908 /* if ready, copy the desc back to temp */ 909 if (i40e_asq_done(hw)) { 910 i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), 911 I40E_DMA_TO_NONDMA); 912 if (buff != NULL) 913 i40e_memcpy(buff, dma_buff->va, buff_size, 914 I40E_DMA_TO_NONDMA); 915 retval = LE16_TO_CPU(desc->retval); 916 if (retval != 0) { 917 i40e_debug(hw, 918 I40E_DEBUG_AQ_MESSAGE, 919 "AQTX: Command completed with error 0x%X.\n", 920 retval); 921 922 /* strip off FW internal code */ 923 retval &= 0xff; 924 } 925 cmd_completed = TRUE; 926 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) 927 status = I40E_SUCCESS; 928 else 929 status = I40E_ERR_ADMIN_QUEUE_ERROR; 930 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 931 } 932 933 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 934 "AQTX: desc and buffer writeback:\n"); 935 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); 936 937 /* save writeback aq if requested */ 938 if (details->wb_desc) 939 i40e_memcpy(details->wb_desc, desc_on_ring, 940 sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); 941 942 /* update the error if time out occurred */ 943 if ((!cmd_completed) && 944 (!details->async && !details->postpone)) { 945 i40e_debug(hw, 946 I40E_DEBUG_AQ_MESSAGE, 947 "AQTX: Writeback timeout.\n"); 948 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; 949 } 950 951 asq_send_command_error: 952 i40e_release_spinlock(&hw->aq.asq_spinlock); 953 return status; 954 } 955 956 /** 957 * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function 958 * @desc: pointer to the temp descriptor (non DMA mem) 959 * @opcode: the opcode can be used to decide which flags to turn off or on 960 * 961 * Fill the desc with default values 962 **/ 963 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 964 u16 opcode) 965 { 966 /* zero out the desc */ 967 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), 968 I40E_NONDMA_MEM); 969 desc->opcode = CPU_TO_LE16(opcode); 970 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); 971 } 972 973 /** 974 * i40e_clean_arq_element 975 * @hw: pointer to the hw struct 976 * @e: event info from the receive descriptor, includes any buffers 977 * @pending: number of events that could be left to process 978 * 979 * This function cleans one Admin Receive Queue element and returns 980 * the contents through e. It can also return how many events are 981 * left to process through 'pending' 982 **/ 983 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, 984 struct i40e_arq_event_info *e, 985 u16 *pending) 986 { 987 enum i40e_status_code ret_code = I40E_SUCCESS; 988 u16 ntc = hw->aq.arq.next_to_clean; 989 struct i40e_aq_desc *desc; 990 struct i40e_dma_mem *bi; 991 u16 desc_idx; 992 u16 datalen; 993 u16 flags; 994 u16 ntu; 995 996 /* take the lock before we start messing with the ring */ 997 i40e_acquire_spinlock(&hw->aq.arq_spinlock); 998 999 if (hw->aq.arq.count == 0) { 1000 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 1001 "AQRX: Admin queue not initialized.\n"); 1002 ret_code = I40E_ERR_QUEUE_EMPTY; 1003 goto clean_arq_element_err; 1004 } 1005 1006 /* set next_to_use to head */ 1007 if (!i40e_is_vf(hw)) 1008 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); 1009 if (i40e_is_vf(hw)) 1010 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); 1011 if (ntu == ntc) { 1012 /* nothing to do - shouldn't need to update ring's values */ 1013 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; 1014 goto clean_arq_element_out; 1015 } 1016 1017 /* now clean the next descriptor */ 1018 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); 1019 desc_idx = ntc; 1020 1021 flags = LE16_TO_CPU(desc->flags); 1022 if (flags & I40E_AQ_FLAG_ERR) { 1023 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 1024 hw->aq.arq_last_status = 1025 (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); 1026 i40e_debug(hw, 1027 I40E_DEBUG_AQ_MESSAGE, 1028 "AQRX: Event received with error 0x%X.\n", 1029 hw->aq.arq_last_status); 1030 } 1031 1032 i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), 1033 I40E_DMA_TO_NONDMA); 1034 datalen = LE16_TO_CPU(desc->datalen); 1035 e->msg_len = min(datalen, e->buf_len); 1036 if (e->msg_buf != NULL && (e->msg_len != 0)) 1037 i40e_memcpy(e->msg_buf, 1038 hw->aq.arq.r.arq_bi[desc_idx].va, 1039 e->msg_len, I40E_DMA_TO_NONDMA); 1040 1041 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 1042 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 1043 hw->aq.arq_buf_size); 1044 1045 /* Restore the original datalen and buffer address in the desc, 1046 * FW updates datalen to indicate the event message 1047 * size 1048 */ 1049 bi = &hw->aq.arq.r.arq_bi[ntc]; 1050 i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); 1051 1052 desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 1053 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 1054 desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 1055 desc->datalen = CPU_TO_LE16((u16)bi->size); 1056 desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 1057 desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 1058 1059 /* set tail = the last cleaned desc index. */ 1060 wr32(hw, hw->aq.arq.tail, ntc); 1061 /* ntc is updated to tail + 1 */ 1062 ntc++; 1063 if (ntc == hw->aq.num_arq_entries) 1064 ntc = 0; 1065 hw->aq.arq.next_to_clean = ntc; 1066 hw->aq.arq.next_to_use = ntu; 1067 1068 clean_arq_element_out: 1069 /* Set pending if needed, unlock and return */ 1070 if (pending != NULL) 1071 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1072 clean_arq_element_err: 1073 i40e_release_spinlock(&hw->aq.arq_spinlock); 1074 1075 if (i40e_is_nvm_update_op(&e->desc)) { 1076 if (hw->aq.nvm_release_on_done) { 1077 i40e_release_nvm(hw); 1078 hw->aq.nvm_release_on_done = FALSE; 1079 } 1080 1081 switch (hw->nvmupd_state) { 1082 case I40E_NVMUPD_STATE_INIT_WAIT: 1083 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 1084 break; 1085 1086 case I40E_NVMUPD_STATE_WRITE_WAIT: 1087 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; 1088 break; 1089 1090 default: 1091 break; 1092 } 1093 } 1094 1095 return ret_code; 1096 } 1097 1098 void i40e_resume_aq(struct i40e_hw *hw) 1099 { 1100 /* Registers are reset after PF reset */ 1101 hw->aq.asq.next_to_use = 0; 1102 hw->aq.asq.next_to_clean = 0; 1103 1104 i40e_config_asq_regs(hw); 1105 1106 hw->aq.arq.next_to_use = 0; 1107 hw->aq.arq.next_to_clean = 0; 1108 1109 i40e_config_arq_regs(hw); 1110 } 1111