1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \ 7 do { \ 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ 16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 17 (qinfo)->rq.head = prefix##_ARQH; \ 18 (qinfo)->rq.tail = prefix##_ARQT; \ 19 (qinfo)->rq.len = prefix##_ARQLEN; \ 20 (qinfo)->rq.bah = prefix##_ARQBAH; \ 21 (qinfo)->rq.bal = prefix##_ARQBAL; \ 22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ 23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ 24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ 25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ 26 } while (0) 27 28 /** 29 * ice_adminq_init_regs - Initialize AdminQ registers 30 * @hw: pointer to the hardware structure 31 * 32 * This assumes the alloc_sq and alloc_rq functions have already been called 33 */ 34 static void ice_adminq_init_regs(struct ice_hw *hw) 35 { 36 struct ice_ctl_q_info *cq = &hw->adminq; 37 38 ICE_CQ_INIT_REGS(cq, PF_FW); 39 } 40 41 /** 42 * ice_mailbox_init_regs - Initialize Mailbox registers 43 * @hw: pointer to the hardware structure 44 * 45 * This assumes the alloc_sq and alloc_rq functions have already been called 46 */ 47 static void ice_mailbox_init_regs(struct ice_hw *hw) 48 { 49 struct ice_ctl_q_info *cq = &hw->mailboxq; 50 51 ICE_CQ_INIT_REGS(cq, PF_MBX); 52 } 53 54 /** 55 * ice_sb_init_regs - Initialize Sideband registers 56 * @hw: pointer to the hardware structure 57 * 58 * This assumes the alloc_sq and alloc_rq functions have already been called 59 */ 60 static void ice_sb_init_regs(struct ice_hw *hw) 61 { 62 struct ice_ctl_q_info *cq = &hw->sbq; 63 64 ICE_CQ_INIT_REGS(cq, PF_SB); 65 } 66 67 /** 68 * ice_check_sq_alive 69 * @hw: pointer to the HW struct 70 * @cq: pointer to the specific Control queue 71 * 72 * Returns true if Queue is enabled else false. 73 */ 74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 75 { 76 /* check both queue-length and queue-enable fields */ 77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 79 cq->sq.len_ena_mask)) == 80 (cq->num_sq_entries | cq->sq.len_ena_mask); 81 82 return false; 83 } 84 85 /** 86 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 87 * @hw: pointer to the hardware structure 88 * @cq: pointer to the specific Control queue 89 */ 90 static int 91 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 92 { 93 size_t size = cq->num_sq_entries * sizeof(struct libie_aq_desc); 94 95 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 96 &cq->sq.desc_buf.pa, 97 GFP_KERNEL | __GFP_ZERO); 98 if (!cq->sq.desc_buf.va) 99 return -ENOMEM; 100 cq->sq.desc_buf.size = size; 101 102 return 0; 103 } 104 105 /** 106 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 107 * @hw: pointer to the hardware structure 108 * @cq: pointer to the specific Control queue 109 */ 110 static int 111 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 112 { 113 size_t size = cq->num_rq_entries * sizeof(struct libie_aq_desc); 114 115 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 116 &cq->rq.desc_buf.pa, 117 GFP_KERNEL | __GFP_ZERO); 118 if (!cq->rq.desc_buf.va) 119 return -ENOMEM; 120 cq->rq.desc_buf.size = size; 121 return 0; 122 } 123 124 /** 125 * ice_free_cq_ring - Free control queue ring 126 * @hw: pointer to the hardware structure 127 * @ring: pointer to the specific control queue ring 128 * 129 * This assumes the posted buffers have already been cleaned 130 * and de-allocated 131 */ 132 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) 133 { 134 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, 135 ring->desc_buf.va, ring->desc_buf.pa); 136 ring->desc_buf.va = NULL; 137 ring->desc_buf.pa = 0; 138 ring->desc_buf.size = 0; 139 } 140 141 /** 142 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 143 * @hw: pointer to the hardware structure 144 * @cq: pointer to the specific Control queue 145 */ 146 static int 147 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 148 { 149 int i; 150 151 /* We'll be allocating the buffer info memory first, then we can 152 * allocate the mapped buffers for the event processing 153 */ 154 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 155 sizeof(cq->rq.desc_buf), GFP_KERNEL); 156 if (!cq->rq.dma_head) 157 return -ENOMEM; 158 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 159 160 /* allocate the mapped buffers */ 161 for (i = 0; i < cq->num_rq_entries; i++) { 162 struct libie_aq_desc *desc; 163 struct ice_dma_mem *bi; 164 165 bi = &cq->rq.r.rq_bi[i]; 166 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 167 cq->rq_buf_size, &bi->pa, 168 GFP_KERNEL | __GFP_ZERO); 169 if (!bi->va) 170 goto unwind_alloc_rq_bufs; 171 bi->size = cq->rq_buf_size; 172 173 /* now configure the descriptors for use */ 174 desc = ICE_CTL_Q_DESC(cq->rq, i); 175 176 desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF); 177 if (cq->rq_buf_size > LIBIE_AQ_LG_BUF) 178 desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB); 179 desc->opcode = 0; 180 /* This is in accordance with control queue design, there is no 181 * register for buffer size configuration 182 */ 183 desc->datalen = cpu_to_le16(bi->size); 184 desc->retval = 0; 185 desc->cookie_high = 0; 186 desc->cookie_low = 0; 187 desc->params.generic.addr_high = 188 cpu_to_le32(upper_32_bits(bi->pa)); 189 desc->params.generic.addr_low = 190 cpu_to_le32(lower_32_bits(bi->pa)); 191 desc->params.generic.param0 = 0; 192 desc->params.generic.param1 = 0; 193 } 194 return 0; 195 196 unwind_alloc_rq_bufs: 197 /* don't try to free the one that failed... */ 198 i--; 199 for (; i >= 0; i--) { 200 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 201 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 202 cq->rq.r.rq_bi[i].va = NULL; 203 cq->rq.r.rq_bi[i].pa = 0; 204 cq->rq.r.rq_bi[i].size = 0; 205 } 206 cq->rq.r.rq_bi = NULL; 207 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 208 cq->rq.dma_head = NULL; 209 210 return -ENOMEM; 211 } 212 213 /** 214 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 215 * @hw: pointer to the hardware structure 216 * @cq: pointer to the specific Control queue 217 */ 218 static int 219 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 220 { 221 int i; 222 223 /* No mapped memory needed yet, just the buffer info structures */ 224 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 225 sizeof(cq->sq.desc_buf), GFP_KERNEL); 226 if (!cq->sq.dma_head) 227 return -ENOMEM; 228 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 229 230 /* allocate the mapped buffers */ 231 for (i = 0; i < cq->num_sq_entries; i++) { 232 struct ice_dma_mem *bi; 233 234 bi = &cq->sq.r.sq_bi[i]; 235 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 236 cq->sq_buf_size, &bi->pa, 237 GFP_KERNEL | __GFP_ZERO); 238 if (!bi->va) 239 goto unwind_alloc_sq_bufs; 240 bi->size = cq->sq_buf_size; 241 } 242 return 0; 243 244 unwind_alloc_sq_bufs: 245 /* don't try to free the one that failed... */ 246 i--; 247 for (; i >= 0; i--) { 248 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 249 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 250 cq->sq.r.sq_bi[i].va = NULL; 251 cq->sq.r.sq_bi[i].pa = 0; 252 cq->sq.r.sq_bi[i].size = 0; 253 } 254 cq->sq.r.sq_bi = NULL; 255 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 256 cq->sq.dma_head = NULL; 257 258 return -ENOMEM; 259 } 260 261 static int 262 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) 263 { 264 /* Clear Head and Tail */ 265 wr32(hw, ring->head, 0); 266 wr32(hw, ring->tail, 0); 267 268 /* set starting point */ 269 wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); 270 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); 271 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); 272 273 /* Check one register to verify that config was applied */ 274 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) 275 return -EIO; 276 277 return 0; 278 } 279 280 /** 281 * ice_cfg_sq_regs - configure Control ATQ registers 282 * @hw: pointer to the hardware structure 283 * @cq: pointer to the specific Control queue 284 * 285 * Configure base address and length registers for the transmit queue 286 */ 287 static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 288 { 289 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); 290 } 291 292 /** 293 * ice_cfg_rq_regs - configure Control ARQ register 294 * @hw: pointer to the hardware structure 295 * @cq: pointer to the specific Control queue 296 * 297 * Configure base address and length registers for the receive (event queue) 298 */ 299 static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 300 { 301 int status; 302 303 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); 304 if (status) 305 return status; 306 307 /* Update tail in the HW to post pre-allocated buffers */ 308 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 309 310 return 0; 311 } 312 313 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ 314 do { \ 315 /* free descriptors */ \ 316 if ((qi)->ring.r.ring##_bi) { \ 317 int i; \ 318 \ 319 for (i = 0; i < (qi)->num_##ring##_entries; i++) \ 320 if ((qi)->ring.r.ring##_bi[i].pa) { \ 321 dmam_free_coherent(ice_hw_to_dev(hw), \ 322 (qi)->ring.r.ring##_bi[i].size, \ 323 (qi)->ring.r.ring##_bi[i].va, \ 324 (qi)->ring.r.ring##_bi[i].pa); \ 325 (qi)->ring.r.ring##_bi[i].va = NULL;\ 326 (qi)->ring.r.ring##_bi[i].pa = 0;\ 327 (qi)->ring.r.ring##_bi[i].size = 0;\ 328 } \ 329 } \ 330 /* free DMA head */ \ 331 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ 332 } while (0) 333 334 /** 335 * ice_init_sq - main initialization routine for Control ATQ 336 * @hw: pointer to the hardware structure 337 * @cq: pointer to the specific Control queue 338 * 339 * This is the main initialization routine for the Control Send Queue 340 * Prior to calling this function, the driver *MUST* set the following fields 341 * in the cq->structure: 342 * - cq->num_sq_entries 343 * - cq->sq_buf_size 344 * 345 * Do *NOT* hold the lock when calling this as the memory allocation routines 346 * called are not going to be atomic context safe 347 */ 348 static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 349 { 350 int ret_code; 351 352 if (cq->sq.count > 0) { 353 /* queue already initialized */ 354 ret_code = -EBUSY; 355 goto init_ctrlq_exit; 356 } 357 358 /* verify input for valid configuration */ 359 if (!cq->num_sq_entries || !cq->sq_buf_size) { 360 ret_code = -EIO; 361 goto init_ctrlq_exit; 362 } 363 364 cq->sq.next_to_use = 0; 365 cq->sq.next_to_clean = 0; 366 367 /* allocate the ring memory */ 368 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 369 if (ret_code) 370 goto init_ctrlq_exit; 371 372 /* allocate buffers in the rings */ 373 ret_code = ice_alloc_sq_bufs(hw, cq); 374 if (ret_code) 375 goto init_ctrlq_free_rings; 376 377 /* initialize base registers */ 378 ret_code = ice_cfg_sq_regs(hw, cq); 379 if (ret_code) 380 goto init_ctrlq_free_rings; 381 382 /* success! */ 383 cq->sq.count = cq->num_sq_entries; 384 goto init_ctrlq_exit; 385 386 init_ctrlq_free_rings: 387 ICE_FREE_CQ_BUFS(hw, cq, sq); 388 ice_free_cq_ring(hw, &cq->sq); 389 390 init_ctrlq_exit: 391 return ret_code; 392 } 393 394 /** 395 * ice_init_rq - initialize receive side of a control queue 396 * @hw: pointer to the hardware structure 397 * @cq: pointer to the specific Control queue 398 * 399 * The main initialization routine for Receive side of a control queue. 400 * Prior to calling this function, the driver *MUST* set the following fields 401 * in the cq->structure: 402 * - cq->num_rq_entries 403 * - cq->rq_buf_size 404 * 405 * Do *NOT* hold the lock when calling this as the memory allocation routines 406 * called are not going to be atomic context safe 407 */ 408 static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 409 { 410 int ret_code; 411 412 if (cq->rq.count > 0) { 413 /* queue already initialized */ 414 ret_code = -EBUSY; 415 goto init_ctrlq_exit; 416 } 417 418 /* verify input for valid configuration */ 419 if (!cq->num_rq_entries || !cq->rq_buf_size) { 420 ret_code = -EIO; 421 goto init_ctrlq_exit; 422 } 423 424 cq->rq.next_to_use = 0; 425 cq->rq.next_to_clean = 0; 426 427 /* allocate the ring memory */ 428 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 429 if (ret_code) 430 goto init_ctrlq_exit; 431 432 /* allocate buffers in the rings */ 433 ret_code = ice_alloc_rq_bufs(hw, cq); 434 if (ret_code) 435 goto init_ctrlq_free_rings; 436 437 /* initialize base registers */ 438 ret_code = ice_cfg_rq_regs(hw, cq); 439 if (ret_code) 440 goto init_ctrlq_free_rings; 441 442 /* success! */ 443 cq->rq.count = cq->num_rq_entries; 444 goto init_ctrlq_exit; 445 446 init_ctrlq_free_rings: 447 ICE_FREE_CQ_BUFS(hw, cq, rq); 448 ice_free_cq_ring(hw, &cq->rq); 449 450 init_ctrlq_exit: 451 return ret_code; 452 } 453 454 /** 455 * ice_shutdown_sq - shutdown the transmit side of a control queue 456 * @hw: pointer to the hardware structure 457 * @cq: pointer to the specific Control queue 458 * 459 * The main shutdown routine for the Control Transmit Queue 460 */ 461 static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 462 { 463 int ret_code = 0; 464 465 mutex_lock(&cq->sq_lock); 466 467 if (!cq->sq.count) { 468 ret_code = -EBUSY; 469 goto shutdown_sq_out; 470 } 471 472 /* Stop processing of the control queue */ 473 wr32(hw, cq->sq.head, 0); 474 wr32(hw, cq->sq.tail, 0); 475 wr32(hw, cq->sq.len, 0); 476 wr32(hw, cq->sq.bal, 0); 477 wr32(hw, cq->sq.bah, 0); 478 479 cq->sq.count = 0; /* to indicate uninitialized queue */ 480 481 /* free ring buffers and the ring itself */ 482 ICE_FREE_CQ_BUFS(hw, cq, sq); 483 ice_free_cq_ring(hw, &cq->sq); 484 485 shutdown_sq_out: 486 mutex_unlock(&cq->sq_lock); 487 return ret_code; 488 } 489 490 /** 491 * ice_aq_ver_check - Check the reported AQ API version 492 * @hw: pointer to the hardware structure 493 * 494 * Checks if the driver should load on a given AQ API version. 495 * 496 * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 497 */ 498 static bool ice_aq_ver_check(struct ice_hw *hw) 499 { 500 u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw); 501 u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw); 502 503 if (hw->api_maj_ver > exp_fw_api_ver_major) { 504 /* Major API version is newer than expected, don't load */ 505 dev_warn(ice_hw_to_dev(hw), 506 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 507 return false; 508 } else if (hw->api_maj_ver == exp_fw_api_ver_major) { 509 if (hw->api_min_ver > (exp_fw_api_ver_minor + 2)) 510 dev_info(ice_hw_to_dev(hw), 511 "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n", 512 hw->api_maj_ver, hw->api_min_ver, 513 exp_fw_api_ver_major, exp_fw_api_ver_minor); 514 else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor) 515 dev_info(ice_hw_to_dev(hw), 516 "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", 517 hw->api_maj_ver, hw->api_min_ver, 518 exp_fw_api_ver_major, exp_fw_api_ver_minor); 519 } else { 520 /* Major API version is older than expected, log a warning */ 521 dev_info(ice_hw_to_dev(hw), 522 "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", 523 hw->api_maj_ver, hw->api_min_ver, 524 exp_fw_api_ver_major, exp_fw_api_ver_minor); 525 } 526 return true; 527 } 528 529 /** 530 * ice_shutdown_rq - shutdown Control ARQ 531 * @hw: pointer to the hardware structure 532 * @cq: pointer to the specific Control queue 533 * 534 * The main shutdown routine for the Control Receive Queue 535 */ 536 static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 537 { 538 int ret_code = 0; 539 540 mutex_lock(&cq->rq_lock); 541 542 if (!cq->rq.count) { 543 ret_code = -EBUSY; 544 goto shutdown_rq_out; 545 } 546 547 /* Stop Control Queue processing */ 548 wr32(hw, cq->rq.head, 0); 549 wr32(hw, cq->rq.tail, 0); 550 wr32(hw, cq->rq.len, 0); 551 wr32(hw, cq->rq.bal, 0); 552 wr32(hw, cq->rq.bah, 0); 553 554 /* set rq.count to 0 to indicate uninitialized queue */ 555 cq->rq.count = 0; 556 557 /* free ring buffers and the ring itself */ 558 ICE_FREE_CQ_BUFS(hw, cq, rq); 559 ice_free_cq_ring(hw, &cq->rq); 560 561 shutdown_rq_out: 562 mutex_unlock(&cq->rq_lock); 563 return ret_code; 564 } 565 566 /** 567 * ice_init_check_adminq - Check version for Admin Queue to know if its alive 568 * @hw: pointer to the hardware structure 569 */ 570 static int ice_init_check_adminq(struct ice_hw *hw) 571 { 572 struct ice_ctl_q_info *cq = &hw->adminq; 573 int status; 574 575 status = ice_aq_get_fw_ver(hw, NULL); 576 if (status) 577 goto init_ctrlq_free_rq; 578 579 if (!ice_aq_ver_check(hw)) { 580 status = -EIO; 581 goto init_ctrlq_free_rq; 582 } 583 584 return 0; 585 586 init_ctrlq_free_rq: 587 ice_shutdown_rq(hw, cq); 588 ice_shutdown_sq(hw, cq); 589 return status; 590 } 591 592 /** 593 * ice_init_ctrlq - main initialization routine for any control Queue 594 * @hw: pointer to the hardware structure 595 * @q_type: specific Control queue type 596 * 597 * Prior to calling this function, the driver *MUST* set the following fields 598 * in the cq->structure: 599 * - cq->num_sq_entries 600 * - cq->num_rq_entries 601 * - cq->rq_buf_size 602 * - cq->sq_buf_size 603 * 604 * NOTE: this function does not initialize the controlq locks 605 */ 606 static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 607 { 608 struct ice_ctl_q_info *cq; 609 int ret_code; 610 611 switch (q_type) { 612 case ICE_CTL_Q_ADMIN: 613 ice_adminq_init_regs(hw); 614 cq = &hw->adminq; 615 break; 616 case ICE_CTL_Q_SB: 617 ice_sb_init_regs(hw); 618 cq = &hw->sbq; 619 break; 620 case ICE_CTL_Q_MAILBOX: 621 ice_mailbox_init_regs(hw); 622 cq = &hw->mailboxq; 623 break; 624 default: 625 return -EINVAL; 626 } 627 cq->qtype = q_type; 628 629 /* verify input for valid configuration */ 630 if (!cq->num_rq_entries || !cq->num_sq_entries || 631 !cq->rq_buf_size || !cq->sq_buf_size) { 632 return -EIO; 633 } 634 635 /* allocate the ATQ */ 636 ret_code = ice_init_sq(hw, cq); 637 if (ret_code) 638 return ret_code; 639 640 /* allocate the ARQ */ 641 ret_code = ice_init_rq(hw, cq); 642 if (ret_code) 643 goto init_ctrlq_free_sq; 644 645 /* success! */ 646 return 0; 647 648 init_ctrlq_free_sq: 649 ice_shutdown_sq(hw, cq); 650 return ret_code; 651 } 652 653 /** 654 * ice_is_sbq_supported - is the sideband queue supported 655 * @hw: pointer to the hardware structure 656 * 657 * Returns true if the sideband control queue interface is 658 * supported for the device, false otherwise 659 */ 660 bool ice_is_sbq_supported(struct ice_hw *hw) 661 { 662 /* The device sideband queue is only supported on devices with the 663 * generic MAC type. 664 */ 665 return ice_is_generic_mac(hw); 666 } 667 668 /** 669 * ice_get_sbq - returns the right control queue to use for sideband 670 * @hw: pointer to the hardware structure 671 */ 672 struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw) 673 { 674 if (ice_is_sbq_supported(hw)) 675 return &hw->sbq; 676 return &hw->adminq; 677 } 678 679 /** 680 * ice_shutdown_ctrlq - shutdown routine for any control queue 681 * @hw: pointer to the hardware structure 682 * @q_type: specific Control queue type 683 * @unloading: is the driver unloading itself 684 * 685 * NOTE: this function does not destroy the control queue locks. 686 */ 687 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type, 688 bool unloading) 689 { 690 struct ice_ctl_q_info *cq; 691 692 switch (q_type) { 693 case ICE_CTL_Q_ADMIN: 694 cq = &hw->adminq; 695 if (ice_check_sq_alive(hw, cq)) 696 ice_aq_q_shutdown(hw, unloading); 697 break; 698 case ICE_CTL_Q_SB: 699 cq = &hw->sbq; 700 break; 701 case ICE_CTL_Q_MAILBOX: 702 cq = &hw->mailboxq; 703 break; 704 default: 705 return; 706 } 707 708 ice_shutdown_sq(hw, cq); 709 ice_shutdown_rq(hw, cq); 710 } 711 712 /** 713 * ice_shutdown_all_ctrlq - shutdown routine for all control queues 714 * @hw: pointer to the hardware structure 715 * @unloading: is the driver unloading itself 716 * 717 * NOTE: this function does not destroy the control queue locks. The driver 718 * may call this at runtime to shutdown and later restart control queues, such 719 * as in response to a reset event. 720 */ 721 void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading) 722 { 723 /* Shutdown FW admin queue */ 724 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading); 725 /* Shutdown PHY Sideband */ 726 if (ice_is_sbq_supported(hw)) 727 ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading); 728 /* Shutdown PF-VF Mailbox */ 729 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading); 730 } 731 732 /** 733 * ice_init_all_ctrlq - main initialization routine for all control queues 734 * @hw: pointer to the hardware structure 735 * 736 * Prior to calling this function, the driver MUST* set the following fields 737 * in the cq->structure for all control queues: 738 * - cq->num_sq_entries 739 * - cq->num_rq_entries 740 * - cq->rq_buf_size 741 * - cq->sq_buf_size 742 * 743 * NOTE: this function does not initialize the controlq locks. 744 */ 745 int ice_init_all_ctrlq(struct ice_hw *hw) 746 { 747 u32 retry = 0; 748 int status; 749 750 /* Init FW admin queue */ 751 do { 752 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 753 if (status) 754 return status; 755 756 status = ice_init_check_adminq(hw); 757 if (status != -EIO) 758 break; 759 760 ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); 761 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true); 762 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); 763 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); 764 765 if (status) 766 return status; 767 /* sideband control queue (SBQ) interface is not supported on some 768 * devices. Initialize if supported, else fallback to the admin queue 769 * interface 770 */ 771 if (ice_is_sbq_supported(hw)) { 772 status = ice_init_ctrlq(hw, ICE_CTL_Q_SB); 773 if (status) 774 return status; 775 } 776 /* Init Mailbox queue */ 777 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); 778 } 779 780 /** 781 * ice_init_ctrlq_locks - Initialize locks for a control queue 782 * @cq: pointer to the control queue 783 * 784 * Initializes the send and receive queue locks for a given control queue. 785 */ 786 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) 787 { 788 mutex_init(&cq->sq_lock); 789 mutex_init(&cq->rq_lock); 790 } 791 792 /** 793 * ice_create_all_ctrlq - main initialization routine for all control queues 794 * @hw: pointer to the hardware structure 795 * 796 * Prior to calling this function, the driver *MUST* set the following fields 797 * in the cq->structure for all control queues: 798 * - cq->num_sq_entries 799 * - cq->num_rq_entries 800 * - cq->rq_buf_size 801 * - cq->sq_buf_size 802 * 803 * This function creates all the control queue locks and then calls 804 * ice_init_all_ctrlq. It should be called once during driver load. If the 805 * driver needs to re-initialize control queues at run time it should call 806 * ice_init_all_ctrlq instead. 807 */ 808 int ice_create_all_ctrlq(struct ice_hw *hw) 809 { 810 ice_init_ctrlq_locks(&hw->adminq); 811 if (ice_is_sbq_supported(hw)) 812 ice_init_ctrlq_locks(&hw->sbq); 813 ice_init_ctrlq_locks(&hw->mailboxq); 814 815 return ice_init_all_ctrlq(hw); 816 } 817 818 /** 819 * ice_destroy_ctrlq_locks - Destroy locks for a control queue 820 * @cq: pointer to the control queue 821 * 822 * Destroys the send and receive queue locks for a given control queue. 823 */ 824 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) 825 { 826 mutex_destroy(&cq->sq_lock); 827 mutex_destroy(&cq->rq_lock); 828 } 829 830 /** 831 * ice_destroy_all_ctrlq - exit routine for all control queues 832 * @hw: pointer to the hardware structure 833 * 834 * This function shuts down all the control queues and then destroys the 835 * control queue locks. It should be called once during driver unload. The 836 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and 837 * reinitialize control queues, such as in response to a reset event. 838 */ 839 void ice_destroy_all_ctrlq(struct ice_hw *hw) 840 { 841 /* shut down all the control queues first */ 842 ice_shutdown_all_ctrlq(hw, true); 843 844 ice_destroy_ctrlq_locks(&hw->adminq); 845 if (ice_is_sbq_supported(hw)) 846 ice_destroy_ctrlq_locks(&hw->sbq); 847 ice_destroy_ctrlq_locks(&hw->mailboxq); 848 } 849 850 /** 851 * ice_clean_sq - cleans send side of a control queue 852 * @hw: pointer to the hardware structure 853 * @cq: pointer to the specific Control queue 854 * 855 * returns the number of free desc 856 */ 857 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 858 { 859 struct ice_ctl_q_ring *sq = &cq->sq; 860 u16 ntc = sq->next_to_clean; 861 struct libie_aq_desc *desc; 862 863 desc = ICE_CTL_Q_DESC(*sq, ntc); 864 865 while (rd32(hw, cq->sq.head) != ntc) { 866 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 867 memset(desc, 0, sizeof(*desc)); 868 ntc++; 869 if (ntc == sq->count) 870 ntc = 0; 871 desc = ICE_CTL_Q_DESC(*sq, ntc); 872 } 873 874 sq->next_to_clean = ntc; 875 876 return ICE_CTL_Q_DESC_UNUSED(sq); 877 } 878 879 /** 880 * ice_ctl_q_str - Convert control queue type to string 881 * @qtype: the control queue type 882 * 883 * Return: A string name for the given control queue type. 884 */ 885 static const char *ice_ctl_q_str(enum ice_ctl_q qtype) 886 { 887 switch (qtype) { 888 case ICE_CTL_Q_UNKNOWN: 889 return "Unknown CQ"; 890 case ICE_CTL_Q_ADMIN: 891 return "AQ"; 892 case ICE_CTL_Q_MAILBOX: 893 return "MBXQ"; 894 case ICE_CTL_Q_SB: 895 return "SBQ"; 896 default: 897 return "Unrecognized CQ"; 898 } 899 } 900 901 /** 902 * ice_debug_cq 903 * @hw: pointer to the hardware structure 904 * @cq: pointer to the specific Control queue 905 * @desc: pointer to control queue descriptor 906 * @buf: pointer to command buffer 907 * @buf_len: max length of buf 908 * @response: true if this is the writeback response 909 * 910 * Dumps debug log about control command with descriptor contents. 911 */ 912 static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq, 913 void *desc, void *buf, u16 buf_len, bool response) 914 { 915 struct libie_aq_desc *cq_desc = desc; 916 u16 datalen, flags; 917 918 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && 919 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) 920 return; 921 922 if (!desc) 923 return; 924 925 datalen = le16_to_cpu(cq_desc->datalen); 926 flags = le16_to_cpu(cq_desc->flags); 927 928 ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n\tcookie (h,l) 0x%08X 0x%08X\n\tparam (0,1) 0x%08X 0x%08X\n\taddr (h,l) 0x%08X 0x%08X\n", 929 ice_ctl_q_str(cq->qtype), response ? "Response" : "Command", 930 le16_to_cpu(cq_desc->opcode), flags, datalen, 931 le16_to_cpu(cq_desc->retval), 932 le32_to_cpu(cq_desc->cookie_high), 933 le32_to_cpu(cq_desc->cookie_low), 934 le32_to_cpu(cq_desc->params.generic.param0), 935 le32_to_cpu(cq_desc->params.generic.param1), 936 le32_to_cpu(cq_desc->params.generic.addr_high), 937 le32_to_cpu(cq_desc->params.generic.addr_low)); 938 /* Dump buffer iff 1) one exists and 2) is either a response indicated 939 * by the DD and/or CMP flag set or a command with the RD flag set. 940 */ 941 if (buf && cq_desc->datalen && 942 (flags & (LIBIE_AQ_FLAG_DD | LIBIE_AQ_FLAG_CMP | 943 LIBIE_AQ_FLAG_RD))) { 944 char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 "; 945 946 sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ", 947 le32_to_cpu(cq_desc->params.generic.addr_high), 948 le32_to_cpu(cq_desc->params.generic.addr_low)); 949 ice_debug_array_w_prefix(hw, ICE_DBG_AQ_DESC_BUF, prefix, 950 buf, 951 min_t(u16, buf_len, datalen)); 952 } 953 } 954 955 /** 956 * ice_sq_done - poll until the last send on a control queue has completed 957 * @hw: pointer to the HW struct 958 * @cq: pointer to the specific Control queue 959 * 960 * Use read_poll_timeout to poll the control queue head, checking until it 961 * matches next_to_use. According to the control queue designers, this has 962 * better timing reliability than the DD bit. 963 * 964 * Return: true if all the descriptors on the send side of a control queue 965 * are finished processing, false otherwise. 966 */ 967 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 968 { 969 u32 head; 970 971 /* Wait a short time before the initial check, to allow hardware time 972 * for completion. 973 */ 974 udelay(5); 975 976 return !rd32_poll_timeout(hw, cq->sq.head, 977 head, head == cq->sq.next_to_use, 978 20, ICE_CTL_Q_SQ_CMD_TIMEOUT); 979 } 980 981 /** 982 * ice_sq_send_cmd - send command to a control queue 983 * @hw: pointer to the HW struct 984 * @cq: pointer to the specific Control queue 985 * @desc: prefilled descriptor describing the command 986 * @buf: buffer to use for indirect commands (or NULL for direct commands) 987 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 988 * @cd: pointer to command details structure 989 * 990 * Main command for the transmit side of a control queue. It puts the command 991 * on the queue, bumps the tail, waits for processing of the command, captures 992 * command status and results, etc. 993 */ 994 int 995 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 996 struct libie_aq_desc *desc, void *buf, u16 buf_size, 997 struct ice_sq_cd *cd) 998 { 999 struct ice_dma_mem *dma_buf = NULL; 1000 struct libie_aq_desc *desc_on_ring; 1001 bool cmd_completed = false; 1002 int status = 0; 1003 u16 retval = 0; 1004 u32 val = 0; 1005 1006 /* if reset is in progress return a soft error */ 1007 if (hw->reset_ongoing) 1008 return -EBUSY; 1009 mutex_lock(&cq->sq_lock); 1010 1011 cq->sq_last_status = LIBIE_AQ_RC_OK; 1012 1013 if (!cq->sq.count) { 1014 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); 1015 status = -EIO; 1016 goto sq_send_command_error; 1017 } 1018 1019 if ((buf && !buf_size) || (!buf && buf_size)) { 1020 status = -EINVAL; 1021 goto sq_send_command_error; 1022 } 1023 1024 if (buf) { 1025 if (buf_size > cq->sq_buf_size) { 1026 ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", 1027 buf_size); 1028 status = -EINVAL; 1029 goto sq_send_command_error; 1030 } 1031 1032 desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF); 1033 if (buf_size > LIBIE_AQ_LG_BUF) 1034 desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB); 1035 } 1036 1037 val = rd32(hw, cq->sq.head); 1038 if (val >= cq->num_sq_entries) { 1039 ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", 1040 val); 1041 status = -EIO; 1042 goto sq_send_command_error; 1043 } 1044 1045 /* Call clean and check queue available function to reclaim the 1046 * descriptors that were processed by FW/MBX; the function returns the 1047 * number of desc available. The clean function called here could be 1048 * called in a separate thread in case of asynchronous completions. 1049 */ 1050 if (ice_clean_sq(hw, cq) == 0) { 1051 ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); 1052 status = -ENOSPC; 1053 goto sq_send_command_error; 1054 } 1055 1056 /* initialize the temp desc pointer with the right desc */ 1057 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 1058 1059 /* if the desc is available copy the temp desc to the right place */ 1060 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 1061 1062 /* if buf is not NULL assume indirect command */ 1063 if (buf) { 1064 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 1065 /* copy the user buf into the respective DMA buf */ 1066 memcpy(dma_buf->va, buf, buf_size); 1067 desc_on_ring->datalen = cpu_to_le16(buf_size); 1068 1069 /* Update the address values in the desc with the pa value 1070 * for respective buffer 1071 */ 1072 desc_on_ring->params.generic.addr_high = 1073 cpu_to_le32(upper_32_bits(dma_buf->pa)); 1074 desc_on_ring->params.generic.addr_low = 1075 cpu_to_le32(lower_32_bits(dma_buf->pa)); 1076 } 1077 1078 /* Debug desc and buffer */ 1079 ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); 1080 1081 ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false); 1082 1083 (cq->sq.next_to_use)++; 1084 if (cq->sq.next_to_use == cq->sq.count) 1085 cq->sq.next_to_use = 0; 1086 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 1087 ice_flush(hw); 1088 1089 /* Wait for the command to complete. If it finishes within the 1090 * timeout, copy the descriptor back to temp. 1091 */ 1092 if (ice_sq_done(hw, cq)) { 1093 memcpy(desc, desc_on_ring, sizeof(*desc)); 1094 if (buf) { 1095 /* get returned length to copy */ 1096 u16 copy_size = le16_to_cpu(desc->datalen); 1097 1098 if (copy_size > buf_size) { 1099 ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", 1100 copy_size, buf_size); 1101 status = -EIO; 1102 } else { 1103 memcpy(buf, dma_buf->va, copy_size); 1104 } 1105 } 1106 retval = le16_to_cpu(desc->retval); 1107 if (retval) { 1108 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", 1109 le16_to_cpu(desc->opcode), 1110 retval); 1111 1112 /* strip off FW internal code */ 1113 retval &= 0xff; 1114 } 1115 cmd_completed = true; 1116 if (!status && retval != LIBIE_AQ_RC_OK) 1117 status = -EIO; 1118 cq->sq_last_status = (enum libie_aq_err)retval; 1119 } 1120 1121 ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); 1122 1123 ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true); 1124 1125 /* save writeback AQ if requested */ 1126 if (cd && cd->wb_desc) 1127 memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc)); 1128 1129 /* update the error if time out occurred */ 1130 if (!cmd_completed) { 1131 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || 1132 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { 1133 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); 1134 status = -EIO; 1135 } else { 1136 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); 1137 status = -EIO; 1138 } 1139 } 1140 1141 sq_send_command_error: 1142 mutex_unlock(&cq->sq_lock); 1143 return status; 1144 } 1145 1146 /** 1147 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 1148 * @desc: pointer to the temp descriptor (non DMA mem) 1149 * @opcode: the opcode can be used to decide which flags to turn off or on 1150 * 1151 * Fill the desc with default values 1152 */ 1153 void ice_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode) 1154 { 1155 /* zero out the desc */ 1156 memset(desc, 0, sizeof(*desc)); 1157 desc->opcode = cpu_to_le16(opcode); 1158 desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI); 1159 } 1160 1161 /** 1162 * ice_clean_rq_elem 1163 * @hw: pointer to the HW struct 1164 * @cq: pointer to the specific Control queue 1165 * @e: event info from the receive descriptor, includes any buffers 1166 * @pending: number of events that could be left to process 1167 * 1168 * Clean one element from the receive side of a control queue. On return 'e' 1169 * contains contents of the message, and 'pending' contains the number of 1170 * events left to process. 1171 */ 1172 int 1173 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1174 struct ice_rq_event_info *e, u16 *pending) 1175 { 1176 enum libie_aq_err rq_last_status; 1177 u16 ntc = cq->rq.next_to_clean; 1178 struct libie_aq_desc *desc; 1179 struct ice_dma_mem *bi; 1180 int ret_code = 0; 1181 u16 desc_idx; 1182 u16 datalen; 1183 u16 flags; 1184 u16 ntu; 1185 1186 /* pre-clean the event info */ 1187 memset(&e->desc, 0, sizeof(e->desc)); 1188 1189 /* take the lock before we start messing with the ring */ 1190 mutex_lock(&cq->rq_lock); 1191 1192 if (!cq->rq.count) { 1193 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); 1194 ret_code = -EIO; 1195 goto clean_rq_elem_err; 1196 } 1197 1198 /* set next_to_use to head */ 1199 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1200 1201 if (ntu == ntc) { 1202 /* nothing to do - shouldn't need to update ring's values */ 1203 ret_code = -EALREADY; 1204 goto clean_rq_elem_out; 1205 } 1206 1207 /* now clean the next descriptor */ 1208 desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1209 desc_idx = ntc; 1210 1211 rq_last_status = (enum libie_aq_err)le16_to_cpu(desc->retval); 1212 flags = le16_to_cpu(desc->flags); 1213 if (flags & LIBIE_AQ_FLAG_ERR) { 1214 ret_code = -EIO; 1215 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", 1216 le16_to_cpu(desc->opcode), rq_last_status); 1217 } 1218 memcpy(&e->desc, desc, sizeof(e->desc)); 1219 datalen = le16_to_cpu(desc->datalen); 1220 e->msg_len = min_t(u16, datalen, e->buf_len); 1221 if (e->msg_buf && e->msg_len) 1222 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1223 1224 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); 1225 1226 ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true); 1227 1228 /* Restore the original datalen and buffer address in the desc, 1229 * FW updates datalen to indicate the event message size 1230 */ 1231 bi = &cq->rq.r.rq_bi[ntc]; 1232 memset(desc, 0, sizeof(*desc)); 1233 1234 desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF); 1235 if (cq->rq_buf_size > LIBIE_AQ_LG_BUF) 1236 desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB); 1237 desc->datalen = cpu_to_le16(bi->size); 1238 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1239 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1240 1241 /* set tail = the last cleaned desc index. */ 1242 wr32(hw, cq->rq.tail, ntc); 1243 /* ntc is updated to tail + 1 */ 1244 ntc++; 1245 if (ntc == cq->num_rq_entries) 1246 ntc = 0; 1247 cq->rq.next_to_clean = ntc; 1248 cq->rq.next_to_use = ntu; 1249 1250 clean_rq_elem_out: 1251 /* Set pending if needed, unlock and return */ 1252 if (pending) { 1253 /* re-read HW head to calculate actual pending messages */ 1254 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1255 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1256 } 1257 clean_rq_elem_err: 1258 mutex_unlock(&cq->rq_lock); 1259 1260 return ret_code; 1261 } 1262