1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "iavf_status.h" 5 #include "iavf_type.h" 6 #include "iavf_register.h" 7 #include "iavf_adminq.h" 8 #include "iavf_prototype.h" 9 10 /** 11 * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings 12 * @hw: pointer to the hardware structure 13 **/ 14 static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw) 15 { 16 enum iavf_status ret_code; 17 18 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, 19 iavf_mem_atq_ring, 20 (hw->aq.num_asq_entries * 21 sizeof(struct iavf_aq_desc)), 22 IAVF_ADMINQ_DESC_ALIGNMENT); 23 if (ret_code) 24 return ret_code; 25 26 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, 27 (hw->aq.num_asq_entries * 28 sizeof(struct iavf_asq_cmd_details))); 29 if (ret_code) { 30 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); 31 return ret_code; 32 } 33 34 return ret_code; 35 } 36 37 /** 38 * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings 39 * @hw: pointer to the hardware structure 40 **/ 41 static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw) 42 { 43 enum iavf_status ret_code; 44 45 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, 46 iavf_mem_arq_ring, 47 (hw->aq.num_arq_entries * 48 sizeof(struct iavf_aq_desc)), 49 IAVF_ADMINQ_DESC_ALIGNMENT); 50 51 return ret_code; 52 } 53 54 /** 55 * iavf_free_adminq_asq - Free Admin Queue send rings 56 * @hw: pointer to the hardware structure 57 * 58 * This assumes the posted send buffers have already been cleaned 59 * and de-allocated 60 **/ 61 static void iavf_free_adminq_asq(struct iavf_hw *hw) 62 { 63 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); 64 } 65 66 /** 67 * iavf_free_adminq_arq - Free Admin Queue receive rings 68 * @hw: pointer to the hardware structure 69 * 70 * This assumes the posted receive buffers have already been cleaned 71 * and de-allocated 72 **/ 73 static void iavf_free_adminq_arq(struct iavf_hw *hw) 74 { 75 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); 76 } 77 78 /** 79 * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue 80 * @hw: pointer to the hardware structure 81 **/ 82 static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw) 83 { 84 struct iavf_aq_desc *desc; 85 struct iavf_dma_mem *bi; 86 enum iavf_status ret_code; 87 int i; 88 89 /* We'll be allocating the buffer info memory first, then we can 90 * allocate the mapped buffers for the event processing 91 */ 92 93 /* buffer_info structures do not need alignment */ 94 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head, 95 (hw->aq.num_arq_entries * 96 sizeof(struct iavf_dma_mem))); 97 if (ret_code) 98 goto alloc_arq_bufs; 99 hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va; 100 101 /* allocate the mapped buffers */ 102 for (i = 0; i < hw->aq.num_arq_entries; i++) { 103 bi = &hw->aq.arq.r.arq_bi[i]; 104 ret_code = iavf_allocate_dma_mem(hw, bi, 105 iavf_mem_arq_buf, 106 hw->aq.arq_buf_size, 107 IAVF_ADMINQ_DESC_ALIGNMENT); 108 if (ret_code) 109 goto unwind_alloc_arq_bufs; 110 111 /* now configure the descriptors for use */ 112 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i); 113 114 desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF); 115 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) 116 desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB); 117 desc->opcode = 0; 118 /* This is in accordance with Admin queue design, there is no 119 * register for buffer size configuration 120 */ 121 desc->datalen = cpu_to_le16((u16)bi->size); 122 desc->retval = 0; 123 desc->cookie_high = 0; 124 desc->cookie_low = 0; 125 desc->params.external.addr_high = 126 cpu_to_le32(upper_32_bits(bi->pa)); 127 desc->params.external.addr_low = 128 cpu_to_le32(lower_32_bits(bi->pa)); 129 desc->params.external.param0 = 0; 130 desc->params.external.param1 = 0; 131 } 132 133 alloc_arq_bufs: 134 return ret_code; 135 136 unwind_alloc_arq_bufs: 137 /* don't try to free the one that failed... */ 138 i--; 139 for (; i >= 0; i--) 140 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 141 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); 142 143 return ret_code; 144 } 145 146 /** 147 * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue 148 * @hw: pointer to the hardware structure 149 **/ 150 static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw) 151 { 152 struct iavf_dma_mem *bi; 153 enum iavf_status ret_code; 154 int i; 155 156 /* No mapped memory needed yet, just the buffer info structures */ 157 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head, 158 (hw->aq.num_asq_entries * 159 sizeof(struct iavf_dma_mem))); 160 if (ret_code) 161 goto alloc_asq_bufs; 162 hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va; 163 164 /* allocate the mapped buffers */ 165 for (i = 0; i < hw->aq.num_asq_entries; i++) { 166 bi = &hw->aq.asq.r.asq_bi[i]; 167 ret_code = iavf_allocate_dma_mem(hw, bi, 168 iavf_mem_asq_buf, 169 hw->aq.asq_buf_size, 170 IAVF_ADMINQ_DESC_ALIGNMENT); 171 if (ret_code) 172 goto unwind_alloc_asq_bufs; 173 } 174 alloc_asq_bufs: 175 return ret_code; 176 177 unwind_alloc_asq_bufs: 178 /* don't try to free the one that failed... */ 179 i--; 180 for (; i >= 0; i--) 181 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 182 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); 183 184 return ret_code; 185 } 186 187 /** 188 * iavf_free_arq_bufs - Free receive queue buffer info elements 189 * @hw: pointer to the hardware structure 190 **/ 191 static void iavf_free_arq_bufs(struct iavf_hw *hw) 192 { 193 int i; 194 195 /* free descriptors */ 196 for (i = 0; i < hw->aq.num_arq_entries; i++) 197 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 198 199 /* free the descriptor memory */ 200 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); 201 202 /* free the dma header */ 203 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); 204 } 205 206 /** 207 * iavf_free_asq_bufs - Free send queue buffer info elements 208 * @hw: pointer to the hardware structure 209 **/ 210 static void iavf_free_asq_bufs(struct iavf_hw *hw) 211 { 212 int i; 213 214 /* only unmap if the address is non-NULL */ 215 for (i = 0; i < hw->aq.num_asq_entries; i++) 216 if (hw->aq.asq.r.asq_bi[i].pa) 217 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 218 219 /* free the buffer info list */ 220 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 221 222 /* free the descriptor memory */ 223 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); 224 225 /* free the dma header */ 226 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); 227 } 228 229 /** 230 * iavf_config_asq_regs - configure ASQ registers 231 * @hw: pointer to the hardware structure 232 * 233 * Configure base address and length registers for the transmit queue 234 **/ 235 static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw) 236 { 237 enum iavf_status ret_code = 0; 238 u32 reg = 0; 239 240 /* Clear Head and Tail */ 241 wr32(hw, IAVF_VF_ATQH1, 0); 242 wr32(hw, IAVF_VF_ATQT1, 0); 243 244 /* set starting point */ 245 wr32(hw, IAVF_VF_ATQLEN1, (hw->aq.num_asq_entries | 246 IAVF_VF_ATQLEN1_ATQENABLE_MASK)); 247 wr32(hw, IAVF_VF_ATQBAL1, lower_32_bits(hw->aq.asq.desc_buf.pa)); 248 wr32(hw, IAVF_VF_ATQBAH1, upper_32_bits(hw->aq.asq.desc_buf.pa)); 249 250 /* Check one register to verify that config was applied */ 251 reg = rd32(hw, IAVF_VF_ATQBAL1); 252 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) 253 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; 254 255 return ret_code; 256 } 257 258 /** 259 * iavf_config_arq_regs - ARQ register configuration 260 * @hw: pointer to the hardware structure 261 * 262 * Configure base address and length registers for the receive (event queue) 263 **/ 264 static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) 265 { 266 enum iavf_status ret_code = 0; 267 u32 reg = 0; 268 269 /* Clear Head and Tail */ 270 wr32(hw, IAVF_VF_ARQH1, 0); 271 wr32(hw, IAVF_VF_ARQT1, 0); 272 273 /* set starting point */ 274 wr32(hw, IAVF_VF_ARQLEN1, (hw->aq.num_arq_entries | 275 IAVF_VF_ARQLEN1_ARQENABLE_MASK)); 276 wr32(hw, IAVF_VF_ARQBAL1, lower_32_bits(hw->aq.arq.desc_buf.pa)); 277 wr32(hw, IAVF_VF_ARQBAH1, upper_32_bits(hw->aq.arq.desc_buf.pa)); 278 279 /* Update tail in the HW to post pre-allocated buffers */ 280 wr32(hw, IAVF_VF_ARQT1, hw->aq.num_arq_entries - 1); 281 282 /* Check one register to verify that config was applied */ 283 reg = rd32(hw, IAVF_VF_ARQBAL1); 284 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) 285 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; 286 287 return ret_code; 288 } 289 290 /** 291 * iavf_init_asq - main initialization routine for ASQ 292 * @hw: pointer to the hardware structure 293 * 294 * This is the main initialization routine for the Admin Send Queue 295 * Prior to calling this function, drivers *MUST* set the following fields 296 * in the hw->aq structure: 297 * - hw->aq.num_asq_entries 298 * - hw->aq.arq_buf_size 299 * 300 * Do *NOT* hold the lock when calling this as the memory allocation routines 301 * called are not going to be atomic context safe 302 **/ 303 static enum iavf_status iavf_init_asq(struct iavf_hw *hw) 304 { 305 enum iavf_status ret_code = 0; 306 int i; 307 308 if (hw->aq.asq.count > 0) { 309 /* queue already initialized */ 310 ret_code = IAVF_ERR_NOT_READY; 311 goto init_adminq_exit; 312 } 313 314 /* verify input for valid configuration */ 315 if ((hw->aq.num_asq_entries == 0) || 316 (hw->aq.asq_buf_size == 0)) { 317 ret_code = IAVF_ERR_CONFIG; 318 goto init_adminq_exit; 319 } 320 321 hw->aq.asq.next_to_use = 0; 322 hw->aq.asq.next_to_clean = 0; 323 324 /* allocate the ring memory */ 325 ret_code = iavf_alloc_adminq_asq_ring(hw); 326 if (ret_code) 327 goto init_adminq_exit; 328 329 /* allocate buffers in the rings */ 330 ret_code = iavf_alloc_asq_bufs(hw); 331 if (ret_code) 332 goto init_adminq_free_rings; 333 334 /* initialize base registers */ 335 ret_code = iavf_config_asq_regs(hw); 336 if (ret_code) 337 goto init_free_asq_bufs; 338 339 /* success! */ 340 hw->aq.asq.count = hw->aq.num_asq_entries; 341 goto init_adminq_exit; 342 343 init_free_asq_bufs: 344 for (i = 0; i < hw->aq.num_asq_entries; i++) 345 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 346 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); 347 348 init_adminq_free_rings: 349 iavf_free_adminq_asq(hw); 350 351 init_adminq_exit: 352 return ret_code; 353 } 354 355 /** 356 * iavf_init_arq - initialize ARQ 357 * @hw: pointer to the hardware structure 358 * 359 * The main initialization routine for the Admin Receive (Event) Queue. 360 * Prior to calling this function, drivers *MUST* set the following fields 361 * in the hw->aq structure: 362 * - hw->aq.num_asq_entries 363 * - hw->aq.arq_buf_size 364 * 365 * Do *NOT* hold the lock when calling this as the memory allocation routines 366 * called are not going to be atomic context safe 367 **/ 368 static enum iavf_status iavf_init_arq(struct iavf_hw *hw) 369 { 370 enum iavf_status ret_code = 0; 371 int i; 372 373 if (hw->aq.arq.count > 0) { 374 /* queue already initialized */ 375 ret_code = IAVF_ERR_NOT_READY; 376 goto init_adminq_exit; 377 } 378 379 /* verify input for valid configuration */ 380 if ((hw->aq.num_arq_entries == 0) || 381 (hw->aq.arq_buf_size == 0)) { 382 ret_code = IAVF_ERR_CONFIG; 383 goto init_adminq_exit; 384 } 385 386 hw->aq.arq.next_to_use = 0; 387 hw->aq.arq.next_to_clean = 0; 388 389 /* allocate the ring memory */ 390 ret_code = iavf_alloc_adminq_arq_ring(hw); 391 if (ret_code) 392 goto init_adminq_exit; 393 394 /* allocate buffers in the rings */ 395 ret_code = iavf_alloc_arq_bufs(hw); 396 if (ret_code) 397 goto init_adminq_free_rings; 398 399 /* initialize base registers */ 400 ret_code = iavf_config_arq_regs(hw); 401 if (ret_code) 402 goto init_free_arq_bufs; 403 404 /* success! */ 405 hw->aq.arq.count = hw->aq.num_arq_entries; 406 goto init_adminq_exit; 407 408 init_free_arq_bufs: 409 for (i = 0; i < hw->aq.num_arq_entries; i++) 410 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 411 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); 412 init_adminq_free_rings: 413 iavf_free_adminq_arq(hw); 414 415 init_adminq_exit: 416 return ret_code; 417 } 418 419 /** 420 * iavf_shutdown_asq - shutdown the ASQ 421 * @hw: pointer to the hardware structure 422 * 423 * The main shutdown routine for the Admin Send Queue 424 **/ 425 static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) 426 { 427 enum iavf_status ret_code = 0; 428 429 mutex_lock(&hw->aq.asq_mutex); 430 431 if (hw->aq.asq.count == 0) { 432 ret_code = IAVF_ERR_NOT_READY; 433 goto shutdown_asq_out; 434 } 435 436 /* Stop firmware AdminQ processing */ 437 wr32(hw, IAVF_VF_ATQH1, 0); 438 wr32(hw, IAVF_VF_ATQT1, 0); 439 wr32(hw, IAVF_VF_ATQLEN1, 0); 440 wr32(hw, IAVF_VF_ATQBAL1, 0); 441 wr32(hw, IAVF_VF_ATQBAH1, 0); 442 443 hw->aq.asq.count = 0; /* to indicate uninitialized queue */ 444 445 /* free ring buffers */ 446 iavf_free_asq_bufs(hw); 447 448 shutdown_asq_out: 449 mutex_unlock(&hw->aq.asq_mutex); 450 return ret_code; 451 } 452 453 /** 454 * iavf_shutdown_arq - shutdown ARQ 455 * @hw: pointer to the hardware structure 456 * 457 * The main shutdown routine for the Admin Receive Queue 458 **/ 459 static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) 460 { 461 enum iavf_status ret_code = 0; 462 463 mutex_lock(&hw->aq.arq_mutex); 464 465 if (hw->aq.arq.count == 0) { 466 ret_code = IAVF_ERR_NOT_READY; 467 goto shutdown_arq_out; 468 } 469 470 /* Stop firmware AdminQ processing */ 471 wr32(hw, IAVF_VF_ARQH1, 0); 472 wr32(hw, IAVF_VF_ARQT1, 0); 473 wr32(hw, IAVF_VF_ARQLEN1, 0); 474 wr32(hw, IAVF_VF_ARQBAL1, 0); 475 wr32(hw, IAVF_VF_ARQBAH1, 0); 476 477 hw->aq.arq.count = 0; /* to indicate uninitialized queue */ 478 479 /* free ring buffers */ 480 iavf_free_arq_bufs(hw); 481 482 shutdown_arq_out: 483 mutex_unlock(&hw->aq.arq_mutex); 484 return ret_code; 485 } 486 487 /** 488 * iavf_init_adminq - main initialization routine for Admin Queue 489 * @hw: pointer to the hardware structure 490 * 491 * Prior to calling this function, drivers *MUST* set the following fields 492 * in the hw->aq structure: 493 * - hw->aq.num_asq_entries 494 * - hw->aq.num_arq_entries 495 * - hw->aq.arq_buf_size 496 * - hw->aq.asq_buf_size 497 **/ 498 enum iavf_status iavf_init_adminq(struct iavf_hw *hw) 499 { 500 enum iavf_status ret_code; 501 502 /* verify input for valid configuration */ 503 if ((hw->aq.num_arq_entries == 0) || 504 (hw->aq.num_asq_entries == 0) || 505 (hw->aq.arq_buf_size == 0) || 506 (hw->aq.asq_buf_size == 0)) { 507 ret_code = IAVF_ERR_CONFIG; 508 goto init_adminq_exit; 509 } 510 511 /* setup ASQ command write back timeout */ 512 hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT; 513 514 /* allocate the ASQ */ 515 ret_code = iavf_init_asq(hw); 516 if (ret_code) 517 goto init_adminq_destroy_locks; 518 519 /* allocate the ARQ */ 520 ret_code = iavf_init_arq(hw); 521 if (ret_code) 522 goto init_adminq_free_asq; 523 524 /* success! */ 525 goto init_adminq_exit; 526 527 init_adminq_free_asq: 528 iavf_shutdown_asq(hw); 529 init_adminq_destroy_locks: 530 531 init_adminq_exit: 532 return ret_code; 533 } 534 535 /** 536 * iavf_shutdown_adminq - shutdown routine for the Admin Queue 537 * @hw: pointer to the hardware structure 538 **/ 539 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) 540 { 541 if (iavf_check_asq_alive(hw)) 542 iavf_aq_queue_shutdown(hw, true); 543 544 iavf_shutdown_asq(hw); 545 iavf_shutdown_arq(hw); 546 547 return 0; 548 } 549 550 /** 551 * iavf_clean_asq - cleans Admin send queue 552 * @hw: pointer to the hardware structure 553 * 554 * returns the number of free desc 555 **/ 556 static u16 iavf_clean_asq(struct iavf_hw *hw) 557 { 558 struct iavf_adminq_ring *asq = &hw->aq.asq; 559 struct iavf_asq_cmd_details *details; 560 u16 ntc = asq->next_to_clean; 561 struct iavf_aq_desc desc_cb; 562 struct iavf_aq_desc *desc; 563 564 desc = IAVF_ADMINQ_DESC(*asq, ntc); 565 details = IAVF_ADMINQ_DETAILS(*asq, ntc); 566 while (rd32(hw, IAVF_VF_ATQH1) != ntc) { 567 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 568 "ntc %d head %d.\n", ntc, rd32(hw, IAVF_VF_ATQH1)); 569 570 if (details->callback) { 571 IAVF_ADMINQ_CALLBACK cb_func = 572 (IAVF_ADMINQ_CALLBACK)details->callback; 573 desc_cb = *desc; 574 cb_func(hw, &desc_cb); 575 } 576 memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); 577 memset((void *)details, 0, 578 sizeof(struct iavf_asq_cmd_details)); 579 ntc++; 580 if (ntc == asq->count) 581 ntc = 0; 582 desc = IAVF_ADMINQ_DESC(*asq, ntc); 583 details = IAVF_ADMINQ_DETAILS(*asq, ntc); 584 } 585 586 asq->next_to_clean = ntc; 587 588 return IAVF_DESC_UNUSED(asq); 589 } 590 591 /** 592 * iavf_asq_done - check if FW has processed the Admin Send Queue 593 * @hw: pointer to the hw struct 594 * 595 * Returns true if the firmware has processed all descriptors on the 596 * admin send queue. Returns false if there are still requests pending. 597 **/ 598 bool iavf_asq_done(struct iavf_hw *hw) 599 { 600 /* AQ designers suggest use of head for better 601 * timing reliability than DD bit 602 */ 603 return rd32(hw, IAVF_VF_ATQH1) == hw->aq.asq.next_to_use; 604 } 605 606 /** 607 * iavf_asq_send_command - send command to Admin Queue 608 * @hw: pointer to the hw struct 609 * @desc: prefilled descriptor describing the command (non DMA mem) 610 * @buff: buffer to use for indirect commands 611 * @buff_size: size of buffer for indirect commands 612 * @cmd_details: pointer to command details structure 613 * 614 * This is the main send command driver routine for the Admin Queue send 615 * queue. It runs the queue, cleans the queue, etc 616 **/ 617 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, 618 struct iavf_aq_desc *desc, 619 void *buff, /* can be NULL */ 620 u16 buff_size, 621 struct iavf_asq_cmd_details *cmd_details) 622 { 623 struct iavf_dma_mem *dma_buff = NULL; 624 struct iavf_asq_cmd_details *details; 625 struct iavf_aq_desc *desc_on_ring; 626 bool cmd_completed = false; 627 enum iavf_status status = 0; 628 u16 retval = 0; 629 u32 val = 0; 630 631 mutex_lock(&hw->aq.asq_mutex); 632 633 if (hw->aq.asq.count == 0) { 634 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 635 "AQTX: Admin queue not initialized.\n"); 636 status = IAVF_ERR_QUEUE_EMPTY; 637 goto asq_send_command_error; 638 } 639 640 hw->aq.asq_last_status = IAVF_AQ_RC_OK; 641 642 val = rd32(hw, IAVF_VF_ATQH1); 643 if (val >= hw->aq.num_asq_entries) { 644 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 645 "AQTX: head overrun at %d\n", val); 646 status = IAVF_ERR_QUEUE_EMPTY; 647 goto asq_send_command_error; 648 } 649 650 details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 651 if (cmd_details) { 652 *details = *cmd_details; 653 654 /* If the cmd_details are defined copy the cookie. The 655 * cpu_to_le32 is not needed here because the data is ignored 656 * by the FW, only used by the driver 657 */ 658 if (details->cookie) { 659 desc->cookie_high = 660 cpu_to_le32(upper_32_bits(details->cookie)); 661 desc->cookie_low = 662 cpu_to_le32(lower_32_bits(details->cookie)); 663 } 664 } else { 665 memset(details, 0, sizeof(struct iavf_asq_cmd_details)); 666 } 667 668 /* clear requested flags and then set additional flags if defined */ 669 desc->flags &= ~cpu_to_le16(details->flags_dis); 670 desc->flags |= cpu_to_le16(details->flags_ena); 671 672 if (buff_size > hw->aq.asq_buf_size) { 673 iavf_debug(hw, 674 IAVF_DEBUG_AQ_MESSAGE, 675 "AQTX: Invalid buffer size: %d.\n", 676 buff_size); 677 status = IAVF_ERR_INVALID_SIZE; 678 goto asq_send_command_error; 679 } 680 681 if (details->postpone && !details->async) { 682 iavf_debug(hw, 683 IAVF_DEBUG_AQ_MESSAGE, 684 "AQTX: Async flag not set along with postpone flag"); 685 status = IAVF_ERR_PARAM; 686 goto asq_send_command_error; 687 } 688 689 /* call clean and check queue available function to reclaim the 690 * descriptors that were processed by FW, the function returns the 691 * number of desc available 692 */ 693 /* the clean function called here could be called in a separate thread 694 * in case of asynchronous completions 695 */ 696 if (iavf_clean_asq(hw) == 0) { 697 iavf_debug(hw, 698 IAVF_DEBUG_AQ_MESSAGE, 699 "AQTX: Error queue is full.\n"); 700 status = IAVF_ERR_ADMIN_QUEUE_FULL; 701 goto asq_send_command_error; 702 } 703 704 /* initialize the temp desc pointer with the right desc */ 705 desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 706 707 /* if the desc is available copy the temp desc to the right place */ 708 *desc_on_ring = *desc; 709 710 /* if buff is not NULL assume indirect command */ 711 if (buff) { 712 dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]; 713 /* copy the user buff into the respective DMA buff */ 714 memcpy(dma_buff->va, buff, buff_size); 715 desc_on_ring->datalen = cpu_to_le16(buff_size); 716 717 /* Update the address values in the desc with the pa value 718 * for respective buffer 719 */ 720 desc_on_ring->params.external.addr_high = 721 cpu_to_le32(upper_32_bits(dma_buff->pa)); 722 desc_on_ring->params.external.addr_low = 723 cpu_to_le32(lower_32_bits(dma_buff->pa)); 724 } 725 726 /* bump the tail */ 727 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); 728 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring, 729 buff, buff_size); 730 (hw->aq.asq.next_to_use)++; 731 if (hw->aq.asq.next_to_use == hw->aq.asq.count) 732 hw->aq.asq.next_to_use = 0; 733 if (!details->postpone) 734 wr32(hw, IAVF_VF_ATQT1, hw->aq.asq.next_to_use); 735 736 /* if cmd_details are not defined or async flag is not set, 737 * we need to wait for desc write back 738 */ 739 if (!details->async && !details->postpone) { 740 u32 total_delay = 0; 741 742 do { 743 /* AQ designers suggest use of head for better 744 * timing reliability than DD bit 745 */ 746 if (iavf_asq_done(hw)) 747 break; 748 udelay(50); 749 total_delay += 50; 750 } while (total_delay < hw->aq.asq_cmd_timeout); 751 } 752 753 /* if ready, copy the desc back to temp */ 754 if (iavf_asq_done(hw)) { 755 *desc = *desc_on_ring; 756 if (buff) 757 memcpy(buff, dma_buff->va, buff_size); 758 retval = le16_to_cpu(desc->retval); 759 if (retval != 0) { 760 iavf_debug(hw, 761 IAVF_DEBUG_AQ_MESSAGE, 762 "AQTX: Command completed with error 0x%X.\n", 763 retval); 764 765 /* strip off FW internal code */ 766 retval &= 0xff; 767 } 768 cmd_completed = true; 769 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK) 770 status = 0; 771 else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY) 772 status = IAVF_ERR_NOT_READY; 773 else 774 status = IAVF_ERR_ADMIN_QUEUE_ERROR; 775 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval; 776 } 777 778 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 779 "AQTX: desc and buffer writeback:\n"); 780 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); 781 782 /* save writeback aq if requested */ 783 if (details->wb_desc) 784 *details->wb_desc = *desc_on_ring; 785 786 /* update the error if time out occurred */ 787 if ((!cmd_completed) && 788 (!details->async && !details->postpone)) { 789 if (rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 790 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 791 "AQTX: AQ Critical error.\n"); 792 status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; 793 } else { 794 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 795 "AQTX: Writeback timeout.\n"); 796 status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT; 797 } 798 } 799 800 asq_send_command_error: 801 mutex_unlock(&hw->aq.asq_mutex); 802 return status; 803 } 804 805 /** 806 * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function 807 * @desc: pointer to the temp descriptor (non DMA mem) 808 * @opcode: the opcode can be used to decide which flags to turn off or on 809 * 810 * Fill the desc with default values 811 **/ 812 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode) 813 { 814 /* zero out the desc */ 815 memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); 816 desc->opcode = cpu_to_le16(opcode); 817 desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI); 818 } 819 820 /** 821 * iavf_clean_arq_element 822 * @hw: pointer to the hw struct 823 * @e: event info from the receive descriptor, includes any buffers 824 * @pending: number of events that could be left to process 825 * 826 * This function cleans one Admin Receive Queue element and returns 827 * the contents through e. It can also return how many events are 828 * left to process through 'pending' 829 **/ 830 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, 831 struct iavf_arq_event_info *e, 832 u16 *pending) 833 { 834 u16 ntc = hw->aq.arq.next_to_clean; 835 struct iavf_aq_desc *desc; 836 enum iavf_status ret_code = 0; 837 struct iavf_dma_mem *bi; 838 u16 desc_idx; 839 u16 datalen; 840 u16 flags; 841 u16 ntu; 842 843 /* pre-clean the event info */ 844 memset(&e->desc, 0, sizeof(e->desc)); 845 846 /* take the lock before we start messing with the ring */ 847 mutex_lock(&hw->aq.arq_mutex); 848 849 if (hw->aq.arq.count == 0) { 850 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 851 "AQRX: Admin queue not initialized.\n"); 852 ret_code = IAVF_ERR_QUEUE_EMPTY; 853 goto clean_arq_element_err; 854 } 855 856 /* set next_to_use to head */ 857 ntu = rd32(hw, IAVF_VF_ARQH1) & IAVF_VF_ARQH1_ARQH_MASK; 858 if (ntu == ntc) { 859 /* nothing to do - shouldn't need to update ring's values */ 860 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK; 861 goto clean_arq_element_out; 862 } 863 864 /* now clean the next descriptor */ 865 desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc); 866 desc_idx = ntc; 867 868 hw->aq.arq_last_status = 869 (enum iavf_admin_queue_err)le16_to_cpu(desc->retval); 870 flags = le16_to_cpu(desc->flags); 871 if (flags & IAVF_AQ_FLAG_ERR) { 872 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; 873 iavf_debug(hw, 874 IAVF_DEBUG_AQ_MESSAGE, 875 "AQRX: Event received with error 0x%X.\n", 876 hw->aq.arq_last_status); 877 } 878 879 e->desc = *desc; 880 datalen = le16_to_cpu(desc->datalen); 881 e->msg_len = min(datalen, e->buf_len); 882 if (e->msg_buf && (e->msg_len != 0)) 883 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, 884 e->msg_len); 885 886 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 887 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 888 hw->aq.arq_buf_size); 889 890 /* Restore the original datalen and buffer address in the desc, 891 * FW updates datalen to indicate the event message 892 * size 893 */ 894 bi = &hw->aq.arq.r.arq_bi[ntc]; 895 memset((void *)desc, 0, sizeof(struct iavf_aq_desc)); 896 897 desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF); 898 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) 899 desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB); 900 desc->datalen = cpu_to_le16((u16)bi->size); 901 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 902 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 903 904 /* set tail = the last cleaned desc index. */ 905 wr32(hw, IAVF_VF_ARQT1, ntc); 906 /* ntc is updated to tail + 1 */ 907 ntc++; 908 if (ntc == hw->aq.num_arq_entries) 909 ntc = 0; 910 hw->aq.arq.next_to_clean = ntc; 911 hw->aq.arq.next_to_use = ntu; 912 913 clean_arq_element_out: 914 /* Set pending if needed, unlock and return */ 915 if (pending) 916 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 917 918 clean_arq_element_err: 919 mutex_unlock(&hw->aq.arq_mutex); 920 921 return ret_code; 922 } 923