1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \ 7 do { \ 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ 16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 17 (qinfo)->rq.head = prefix##_ARQH; \ 18 (qinfo)->rq.tail = prefix##_ARQT; \ 19 (qinfo)->rq.len = prefix##_ARQLEN; \ 20 (qinfo)->rq.bah = prefix##_ARQBAH; \ 21 (qinfo)->rq.bal = prefix##_ARQBAL; \ 22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ 23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ 24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ 25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ 26 } while (0) 27 28 /** 29 * ice_adminq_init_regs - Initialize AdminQ registers 30 * @hw: pointer to the hardware structure 31 * 32 * This assumes the alloc_sq and alloc_rq functions have already been called 33 */ 34 static void ice_adminq_init_regs(struct ice_hw *hw) 35 { 36 struct ice_ctl_q_info *cq = &hw->adminq; 37 38 ICE_CQ_INIT_REGS(cq, PF_FW); 39 } 40 41 /** 42 * ice_mailbox_init_regs - Initialize Mailbox registers 43 * @hw: pointer to the hardware structure 44 * 45 * This assumes the alloc_sq and alloc_rq functions have already been called 46 */ 47 static void ice_mailbox_init_regs(struct ice_hw *hw) 48 { 49 struct ice_ctl_q_info *cq = &hw->mailboxq; 50 51 ICE_CQ_INIT_REGS(cq, PF_MBX); 52 } 53 54 /** 55 * ice_sb_init_regs - Initialize Sideband registers 56 * @hw: pointer to the hardware structure 57 * 58 * This assumes the alloc_sq and alloc_rq functions have already been called 59 */ 60 static void ice_sb_init_regs(struct ice_hw *hw) 61 { 62 struct ice_ctl_q_info *cq = &hw->sbq; 63 64 ICE_CQ_INIT_REGS(cq, PF_SB); 65 } 66 67 /** 68 * ice_check_sq_alive 69 * @hw: pointer to the HW struct 70 * @cq: pointer to the specific Control queue 71 * 72 * Returns true if Queue is enabled else false. 73 */ 74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) 75 { 76 /* check both queue-length and queue-enable fields */ 77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) 78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | 79 cq->sq.len_ena_mask)) == 80 (cq->num_sq_entries | cq->sq.len_ena_mask); 81 82 return false; 83 } 84 85 /** 86 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings 87 * @hw: pointer to the hardware structure 88 * @cq: pointer to the specific Control queue 89 */ 90 static int 91 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 92 { 93 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); 94 95 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 96 &cq->sq.desc_buf.pa, 97 GFP_KERNEL | __GFP_ZERO); 98 if (!cq->sq.desc_buf.va) 99 return -ENOMEM; 100 cq->sq.desc_buf.size = size; 101 102 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 103 sizeof(struct ice_sq_cd), GFP_KERNEL); 104 if (!cq->sq.cmd_buf) { 105 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, 106 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); 107 cq->sq.desc_buf.va = NULL; 108 cq->sq.desc_buf.pa = 0; 109 cq->sq.desc_buf.size = 0; 110 return -ENOMEM; 111 } 112 113 return 0; 114 } 115 116 /** 117 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings 118 * @hw: pointer to the hardware structure 119 * @cq: pointer to the specific Control queue 120 */ 121 static int 122 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) 123 { 124 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); 125 126 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, 127 &cq->rq.desc_buf.pa, 128 GFP_KERNEL | __GFP_ZERO); 129 if (!cq->rq.desc_buf.va) 130 return -ENOMEM; 131 cq->rq.desc_buf.size = size; 132 return 0; 133 } 134 135 /** 136 * ice_free_cq_ring - Free control queue ring 137 * @hw: pointer to the hardware structure 138 * @ring: pointer to the specific control queue ring 139 * 140 * This assumes the posted buffers have already been cleaned 141 * and de-allocated 142 */ 143 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) 144 { 145 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, 146 ring->desc_buf.va, ring->desc_buf.pa); 147 ring->desc_buf.va = NULL; 148 ring->desc_buf.pa = 0; 149 ring->desc_buf.size = 0; 150 } 151 152 /** 153 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ 154 * @hw: pointer to the hardware structure 155 * @cq: pointer to the specific Control queue 156 */ 157 static int 158 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 159 { 160 int i; 161 162 /* We'll be allocating the buffer info memory first, then we can 163 * allocate the mapped buffers for the event processing 164 */ 165 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, 166 sizeof(cq->rq.desc_buf), GFP_KERNEL); 167 if (!cq->rq.dma_head) 168 return -ENOMEM; 169 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; 170 171 /* allocate the mapped buffers */ 172 for (i = 0; i < cq->num_rq_entries; i++) { 173 struct ice_aq_desc *desc; 174 struct ice_dma_mem *bi; 175 176 bi = &cq->rq.r.rq_bi[i]; 177 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 178 cq->rq_buf_size, &bi->pa, 179 GFP_KERNEL | __GFP_ZERO); 180 if (!bi->va) 181 goto unwind_alloc_rq_bufs; 182 bi->size = cq->rq_buf_size; 183 184 /* now configure the descriptors for use */ 185 desc = ICE_CTL_Q_DESC(cq->rq, i); 186 187 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 188 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 189 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 190 desc->opcode = 0; 191 /* This is in accordance with Admin queue design, there is no 192 * register for buffer size configuration 193 */ 194 desc->datalen = cpu_to_le16(bi->size); 195 desc->retval = 0; 196 desc->cookie_high = 0; 197 desc->cookie_low = 0; 198 desc->params.generic.addr_high = 199 cpu_to_le32(upper_32_bits(bi->pa)); 200 desc->params.generic.addr_low = 201 cpu_to_le32(lower_32_bits(bi->pa)); 202 desc->params.generic.param0 = 0; 203 desc->params.generic.param1 = 0; 204 } 205 return 0; 206 207 unwind_alloc_rq_bufs: 208 /* don't try to free the one that failed... */ 209 i--; 210 for (; i >= 0; i--) { 211 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, 212 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); 213 cq->rq.r.rq_bi[i].va = NULL; 214 cq->rq.r.rq_bi[i].pa = 0; 215 cq->rq.r.rq_bi[i].size = 0; 216 } 217 cq->rq.r.rq_bi = NULL; 218 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); 219 cq->rq.dma_head = NULL; 220 221 return -ENOMEM; 222 } 223 224 /** 225 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ 226 * @hw: pointer to the hardware structure 227 * @cq: pointer to the specific Control queue 228 */ 229 static int 230 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 231 { 232 int i; 233 234 /* No mapped memory needed yet, just the buffer info structures */ 235 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, 236 sizeof(cq->sq.desc_buf), GFP_KERNEL); 237 if (!cq->sq.dma_head) 238 return -ENOMEM; 239 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; 240 241 /* allocate the mapped buffers */ 242 for (i = 0; i < cq->num_sq_entries; i++) { 243 struct ice_dma_mem *bi; 244 245 bi = &cq->sq.r.sq_bi[i]; 246 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw), 247 cq->sq_buf_size, &bi->pa, 248 GFP_KERNEL | __GFP_ZERO); 249 if (!bi->va) 250 goto unwind_alloc_sq_bufs; 251 bi->size = cq->sq_buf_size; 252 } 253 return 0; 254 255 unwind_alloc_sq_bufs: 256 /* don't try to free the one that failed... */ 257 i--; 258 for (; i >= 0; i--) { 259 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, 260 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); 261 cq->sq.r.sq_bi[i].va = NULL; 262 cq->sq.r.sq_bi[i].pa = 0; 263 cq->sq.r.sq_bi[i].size = 0; 264 } 265 cq->sq.r.sq_bi = NULL; 266 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); 267 cq->sq.dma_head = NULL; 268 269 return -ENOMEM; 270 } 271 272 static int 273 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) 274 { 275 /* Clear Head and Tail */ 276 wr32(hw, ring->head, 0); 277 wr32(hw, ring->tail, 0); 278 279 /* set starting point */ 280 wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); 281 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); 282 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); 283 284 /* Check one register to verify that config was applied */ 285 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) 286 return -EIO; 287 288 return 0; 289 } 290 291 /** 292 * ice_cfg_sq_regs - configure Control ATQ registers 293 * @hw: pointer to the hardware structure 294 * @cq: pointer to the specific Control queue 295 * 296 * Configure base address and length registers for the transmit queue 297 */ 298 static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 299 { 300 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); 301 } 302 303 /** 304 * ice_cfg_rq_regs - configure Control ARQ register 305 * @hw: pointer to the hardware structure 306 * @cq: pointer to the specific Control queue 307 * 308 * Configure base address and length registers for the receive (event queue) 309 */ 310 static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) 311 { 312 int status; 313 314 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); 315 if (status) 316 return status; 317 318 /* Update tail in the HW to post pre-allocated buffers */ 319 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); 320 321 return 0; 322 } 323 324 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ 325 do { \ 326 /* free descriptors */ \ 327 if ((qi)->ring.r.ring##_bi) { \ 328 int i; \ 329 \ 330 for (i = 0; i < (qi)->num_##ring##_entries; i++) \ 331 if ((qi)->ring.r.ring##_bi[i].pa) { \ 332 dmam_free_coherent(ice_hw_to_dev(hw), \ 333 (qi)->ring.r.ring##_bi[i].size, \ 334 (qi)->ring.r.ring##_bi[i].va, \ 335 (qi)->ring.r.ring##_bi[i].pa); \ 336 (qi)->ring.r.ring##_bi[i].va = NULL;\ 337 (qi)->ring.r.ring##_bi[i].pa = 0;\ 338 (qi)->ring.r.ring##_bi[i].size = 0;\ 339 } \ 340 } \ 341 /* free the buffer info list */ \ 342 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ 343 /* free DMA head */ \ 344 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ 345 } while (0) 346 347 /** 348 * ice_init_sq - main initialization routine for Control ATQ 349 * @hw: pointer to the hardware structure 350 * @cq: pointer to the specific Control queue 351 * 352 * This is the main initialization routine for the Control Send Queue 353 * Prior to calling this function, the driver *MUST* set the following fields 354 * in the cq->structure: 355 * - cq->num_sq_entries 356 * - cq->sq_buf_size 357 * 358 * Do *NOT* hold the lock when calling this as the memory allocation routines 359 * called are not going to be atomic context safe 360 */ 361 static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 362 { 363 int ret_code; 364 365 if (cq->sq.count > 0) { 366 /* queue already initialized */ 367 ret_code = -EBUSY; 368 goto init_ctrlq_exit; 369 } 370 371 /* verify input for valid configuration */ 372 if (!cq->num_sq_entries || !cq->sq_buf_size) { 373 ret_code = -EIO; 374 goto init_ctrlq_exit; 375 } 376 377 cq->sq.next_to_use = 0; 378 cq->sq.next_to_clean = 0; 379 380 /* allocate the ring memory */ 381 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); 382 if (ret_code) 383 goto init_ctrlq_exit; 384 385 /* allocate buffers in the rings */ 386 ret_code = ice_alloc_sq_bufs(hw, cq); 387 if (ret_code) 388 goto init_ctrlq_free_rings; 389 390 /* initialize base registers */ 391 ret_code = ice_cfg_sq_regs(hw, cq); 392 if (ret_code) 393 goto init_ctrlq_free_rings; 394 395 /* success! */ 396 cq->sq.count = cq->num_sq_entries; 397 goto init_ctrlq_exit; 398 399 init_ctrlq_free_rings: 400 ICE_FREE_CQ_BUFS(hw, cq, sq); 401 ice_free_cq_ring(hw, &cq->sq); 402 403 init_ctrlq_exit: 404 return ret_code; 405 } 406 407 /** 408 * ice_init_rq - initialize ARQ 409 * @hw: pointer to the hardware structure 410 * @cq: pointer to the specific Control queue 411 * 412 * The main initialization routine for the Admin Receive (Event) Queue. 413 * Prior to calling this function, the driver *MUST* set the following fields 414 * in the cq->structure: 415 * - cq->num_rq_entries 416 * - cq->rq_buf_size 417 * 418 * Do *NOT* hold the lock when calling this as the memory allocation routines 419 * called are not going to be atomic context safe 420 */ 421 static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 422 { 423 int ret_code; 424 425 if (cq->rq.count > 0) { 426 /* queue already initialized */ 427 ret_code = -EBUSY; 428 goto init_ctrlq_exit; 429 } 430 431 /* verify input for valid configuration */ 432 if (!cq->num_rq_entries || !cq->rq_buf_size) { 433 ret_code = -EIO; 434 goto init_ctrlq_exit; 435 } 436 437 cq->rq.next_to_use = 0; 438 cq->rq.next_to_clean = 0; 439 440 /* allocate the ring memory */ 441 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); 442 if (ret_code) 443 goto init_ctrlq_exit; 444 445 /* allocate buffers in the rings */ 446 ret_code = ice_alloc_rq_bufs(hw, cq); 447 if (ret_code) 448 goto init_ctrlq_free_rings; 449 450 /* initialize base registers */ 451 ret_code = ice_cfg_rq_regs(hw, cq); 452 if (ret_code) 453 goto init_ctrlq_free_rings; 454 455 /* success! */ 456 cq->rq.count = cq->num_rq_entries; 457 goto init_ctrlq_exit; 458 459 init_ctrlq_free_rings: 460 ICE_FREE_CQ_BUFS(hw, cq, rq); 461 ice_free_cq_ring(hw, &cq->rq); 462 463 init_ctrlq_exit: 464 return ret_code; 465 } 466 467 /** 468 * ice_shutdown_sq - shutdown the Control ATQ 469 * @hw: pointer to the hardware structure 470 * @cq: pointer to the specific Control queue 471 * 472 * The main shutdown routine for the Control Transmit Queue 473 */ 474 static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 475 { 476 int ret_code = 0; 477 478 mutex_lock(&cq->sq_lock); 479 480 if (!cq->sq.count) { 481 ret_code = -EBUSY; 482 goto shutdown_sq_out; 483 } 484 485 /* Stop firmware AdminQ processing */ 486 wr32(hw, cq->sq.head, 0); 487 wr32(hw, cq->sq.tail, 0); 488 wr32(hw, cq->sq.len, 0); 489 wr32(hw, cq->sq.bal, 0); 490 wr32(hw, cq->sq.bah, 0); 491 492 cq->sq.count = 0; /* to indicate uninitialized queue */ 493 494 /* free ring buffers and the ring itself */ 495 ICE_FREE_CQ_BUFS(hw, cq, sq); 496 ice_free_cq_ring(hw, &cq->sq); 497 498 shutdown_sq_out: 499 mutex_unlock(&cq->sq_lock); 500 return ret_code; 501 } 502 503 /** 504 * ice_aq_ver_check - Check the reported AQ API version. 505 * @hw: pointer to the hardware structure 506 * 507 * Checks if the driver should load on a given AQ API version. 508 * 509 * Return: 'true' iff the driver should attempt to load. 'false' otherwise. 510 */ 511 static bool ice_aq_ver_check(struct ice_hw *hw) 512 { 513 u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw); 514 u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw); 515 516 if (hw->api_maj_ver > exp_fw_api_ver_major) { 517 /* Major API version is newer than expected, don't load */ 518 dev_warn(ice_hw_to_dev(hw), 519 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); 520 return false; 521 } else if (hw->api_maj_ver == exp_fw_api_ver_major) { 522 if (hw->api_min_ver > (exp_fw_api_ver_minor + 2)) 523 dev_info(ice_hw_to_dev(hw), 524 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); 525 else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor) 526 dev_info(ice_hw_to_dev(hw), 527 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 528 } else { 529 /* Major API version is older than expected, log a warning */ 530 dev_info(ice_hw_to_dev(hw), 531 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); 532 } 533 return true; 534 } 535 536 /** 537 * ice_shutdown_rq - shutdown Control ARQ 538 * @hw: pointer to the hardware structure 539 * @cq: pointer to the specific Control queue 540 * 541 * The main shutdown routine for the Control Receive Queue 542 */ 543 static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 544 { 545 int ret_code = 0; 546 547 mutex_lock(&cq->rq_lock); 548 549 if (!cq->rq.count) { 550 ret_code = -EBUSY; 551 goto shutdown_rq_out; 552 } 553 554 /* Stop Control Queue processing */ 555 wr32(hw, cq->rq.head, 0); 556 wr32(hw, cq->rq.tail, 0); 557 wr32(hw, cq->rq.len, 0); 558 wr32(hw, cq->rq.bal, 0); 559 wr32(hw, cq->rq.bah, 0); 560 561 /* set rq.count to 0 to indicate uninitialized queue */ 562 cq->rq.count = 0; 563 564 /* free ring buffers and the ring itself */ 565 ICE_FREE_CQ_BUFS(hw, cq, rq); 566 ice_free_cq_ring(hw, &cq->rq); 567 568 shutdown_rq_out: 569 mutex_unlock(&cq->rq_lock); 570 return ret_code; 571 } 572 573 /** 574 * ice_init_check_adminq - Check version for Admin Queue to know if its alive 575 * @hw: pointer to the hardware structure 576 */ 577 static int ice_init_check_adminq(struct ice_hw *hw) 578 { 579 struct ice_ctl_q_info *cq = &hw->adminq; 580 int status; 581 582 status = ice_aq_get_fw_ver(hw, NULL); 583 if (status) 584 goto init_ctrlq_free_rq; 585 586 if (!ice_aq_ver_check(hw)) { 587 status = -EIO; 588 goto init_ctrlq_free_rq; 589 } 590 591 return 0; 592 593 init_ctrlq_free_rq: 594 ice_shutdown_rq(hw, cq); 595 ice_shutdown_sq(hw, cq); 596 return status; 597 } 598 599 /** 600 * ice_init_ctrlq - main initialization routine for any control Queue 601 * @hw: pointer to the hardware structure 602 * @q_type: specific Control queue type 603 * 604 * Prior to calling this function, the driver *MUST* set the following fields 605 * in the cq->structure: 606 * - cq->num_sq_entries 607 * - cq->num_rq_entries 608 * - cq->rq_buf_size 609 * - cq->sq_buf_size 610 * 611 * NOTE: this function does not initialize the controlq locks 612 */ 613 static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) 614 { 615 struct ice_ctl_q_info *cq; 616 int ret_code; 617 618 switch (q_type) { 619 case ICE_CTL_Q_ADMIN: 620 ice_adminq_init_regs(hw); 621 cq = &hw->adminq; 622 break; 623 case ICE_CTL_Q_SB: 624 ice_sb_init_regs(hw); 625 cq = &hw->sbq; 626 break; 627 case ICE_CTL_Q_MAILBOX: 628 ice_mailbox_init_regs(hw); 629 cq = &hw->mailboxq; 630 break; 631 default: 632 return -EINVAL; 633 } 634 cq->qtype = q_type; 635 636 /* verify input for valid configuration */ 637 if (!cq->num_rq_entries || !cq->num_sq_entries || 638 !cq->rq_buf_size || !cq->sq_buf_size) { 639 return -EIO; 640 } 641 642 /* allocate the ATQ */ 643 ret_code = ice_init_sq(hw, cq); 644 if (ret_code) 645 return ret_code; 646 647 /* allocate the ARQ */ 648 ret_code = ice_init_rq(hw, cq); 649 if (ret_code) 650 goto init_ctrlq_free_sq; 651 652 /* success! */ 653 return 0; 654 655 init_ctrlq_free_sq: 656 ice_shutdown_sq(hw, cq); 657 return ret_code; 658 } 659 660 /** 661 * ice_is_sbq_supported - is the sideband queue supported 662 * @hw: pointer to the hardware structure 663 * 664 * Returns true if the sideband control queue interface is 665 * supported for the device, false otherwise 666 */ 667 bool ice_is_sbq_supported(struct ice_hw *hw) 668 { 669 /* The device sideband queue is only supported on devices with the 670 * generic MAC type. 671 */ 672 return ice_is_generic_mac(hw); 673 } 674 675 /** 676 * ice_get_sbq - returns the right control queue to use for sideband 677 * @hw: pointer to the hardware structure 678 */ 679 struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw) 680 { 681 if (ice_is_sbq_supported(hw)) 682 return &hw->sbq; 683 return &hw->adminq; 684 } 685 686 /** 687 * ice_shutdown_ctrlq - shutdown routine for any control queue 688 * @hw: pointer to the hardware structure 689 * @q_type: specific Control queue type 690 * @unloading: is the driver unloading itself 691 * 692 * NOTE: this function does not destroy the control queue locks. 693 */ 694 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type, 695 bool unloading) 696 { 697 struct ice_ctl_q_info *cq; 698 699 switch (q_type) { 700 case ICE_CTL_Q_ADMIN: 701 cq = &hw->adminq; 702 if (ice_check_sq_alive(hw, cq)) 703 ice_aq_q_shutdown(hw, unloading); 704 break; 705 case ICE_CTL_Q_SB: 706 cq = &hw->sbq; 707 break; 708 case ICE_CTL_Q_MAILBOX: 709 cq = &hw->mailboxq; 710 break; 711 default: 712 return; 713 } 714 715 ice_shutdown_sq(hw, cq); 716 ice_shutdown_rq(hw, cq); 717 } 718 719 /** 720 * ice_shutdown_all_ctrlq - shutdown routine for all control queues 721 * @hw: pointer to the hardware structure 722 * @unloading: is the driver unloading itself 723 * 724 * NOTE: this function does not destroy the control queue locks. The driver 725 * may call this at runtime to shutdown and later restart control queues, such 726 * as in response to a reset event. 727 */ 728 void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading) 729 { 730 /* Shutdown FW admin queue */ 731 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading); 732 /* Shutdown PHY Sideband */ 733 if (ice_is_sbq_supported(hw)) 734 ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading); 735 /* Shutdown PF-VF Mailbox */ 736 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading); 737 } 738 739 /** 740 * ice_init_all_ctrlq - main initialization routine for all control queues 741 * @hw: pointer to the hardware structure 742 * 743 * Prior to calling this function, the driver MUST* set the following fields 744 * in the cq->structure for all control queues: 745 * - cq->num_sq_entries 746 * - cq->num_rq_entries 747 * - cq->rq_buf_size 748 * - cq->sq_buf_size 749 * 750 * NOTE: this function does not initialize the controlq locks. 751 */ 752 int ice_init_all_ctrlq(struct ice_hw *hw) 753 { 754 u32 retry = 0; 755 int status; 756 757 /* Init FW admin queue */ 758 do { 759 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); 760 if (status) 761 return status; 762 763 status = ice_init_check_adminq(hw); 764 if (status != -EIO) 765 break; 766 767 ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); 768 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true); 769 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); 770 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); 771 772 if (status) 773 return status; 774 /* sideband control queue (SBQ) interface is not supported on some 775 * devices. Initialize if supported, else fallback to the admin queue 776 * interface 777 */ 778 if (ice_is_sbq_supported(hw)) { 779 status = ice_init_ctrlq(hw, ICE_CTL_Q_SB); 780 if (status) 781 return status; 782 } 783 /* Init Mailbox queue */ 784 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); 785 } 786 787 /** 788 * ice_init_ctrlq_locks - Initialize locks for a control queue 789 * @cq: pointer to the control queue 790 * 791 * Initializes the send and receive queue locks for a given control queue. 792 */ 793 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) 794 { 795 mutex_init(&cq->sq_lock); 796 mutex_init(&cq->rq_lock); 797 } 798 799 /** 800 * ice_create_all_ctrlq - main initialization routine for all control queues 801 * @hw: pointer to the hardware structure 802 * 803 * Prior to calling this function, the driver *MUST* set the following fields 804 * in the cq->structure for all control queues: 805 * - cq->num_sq_entries 806 * - cq->num_rq_entries 807 * - cq->rq_buf_size 808 * - cq->sq_buf_size 809 * 810 * This function creates all the control queue locks and then calls 811 * ice_init_all_ctrlq. It should be called once during driver load. If the 812 * driver needs to re-initialize control queues at run time it should call 813 * ice_init_all_ctrlq instead. 814 */ 815 int ice_create_all_ctrlq(struct ice_hw *hw) 816 { 817 ice_init_ctrlq_locks(&hw->adminq); 818 if (ice_is_sbq_supported(hw)) 819 ice_init_ctrlq_locks(&hw->sbq); 820 ice_init_ctrlq_locks(&hw->mailboxq); 821 822 return ice_init_all_ctrlq(hw); 823 } 824 825 /** 826 * ice_destroy_ctrlq_locks - Destroy locks for a control queue 827 * @cq: pointer to the control queue 828 * 829 * Destroys the send and receive queue locks for a given control queue. 830 */ 831 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) 832 { 833 mutex_destroy(&cq->sq_lock); 834 mutex_destroy(&cq->rq_lock); 835 } 836 837 /** 838 * ice_destroy_all_ctrlq - exit routine for all control queues 839 * @hw: pointer to the hardware structure 840 * 841 * This function shuts down all the control queues and then destroys the 842 * control queue locks. It should be called once during driver unload. The 843 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and 844 * reinitialize control queues, such as in response to a reset event. 845 */ 846 void ice_destroy_all_ctrlq(struct ice_hw *hw) 847 { 848 /* shut down all the control queues first */ 849 ice_shutdown_all_ctrlq(hw, true); 850 851 ice_destroy_ctrlq_locks(&hw->adminq); 852 if (ice_is_sbq_supported(hw)) 853 ice_destroy_ctrlq_locks(&hw->sbq); 854 ice_destroy_ctrlq_locks(&hw->mailboxq); 855 } 856 857 /** 858 * ice_clean_sq - cleans Admin send queue (ATQ) 859 * @hw: pointer to the hardware structure 860 * @cq: pointer to the specific Control queue 861 * 862 * returns the number of free desc 863 */ 864 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) 865 { 866 struct ice_ctl_q_ring *sq = &cq->sq; 867 u16 ntc = sq->next_to_clean; 868 struct ice_sq_cd *details; 869 struct ice_aq_desc *desc; 870 871 desc = ICE_CTL_Q_DESC(*sq, ntc); 872 details = ICE_CTL_Q_DETAILS(*sq, ntc); 873 874 while (rd32(hw, cq->sq.head) != ntc) { 875 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); 876 memset(desc, 0, sizeof(*desc)); 877 memset(details, 0, sizeof(*details)); 878 ntc++; 879 if (ntc == sq->count) 880 ntc = 0; 881 desc = ICE_CTL_Q_DESC(*sq, ntc); 882 details = ICE_CTL_Q_DETAILS(*sq, ntc); 883 } 884 885 sq->next_to_clean = ntc; 886 887 return ICE_CTL_Q_DESC_UNUSED(sq); 888 } 889 890 /** 891 * ice_debug_cq 892 * @hw: pointer to the hardware structure 893 * @desc: pointer to control queue descriptor 894 * @buf: pointer to command buffer 895 * @buf_len: max length of buf 896 * 897 * Dumps debug log about control command with descriptor contents. 898 */ 899 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) 900 { 901 struct ice_aq_desc *cq_desc = desc; 902 u16 len; 903 904 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && 905 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) 906 return; 907 908 if (!desc) 909 return; 910 911 len = le16_to_cpu(cq_desc->datalen); 912 913 ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 914 le16_to_cpu(cq_desc->opcode), 915 le16_to_cpu(cq_desc->flags), 916 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); 917 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", 918 le32_to_cpu(cq_desc->cookie_high), 919 le32_to_cpu(cq_desc->cookie_low)); 920 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", 921 le32_to_cpu(cq_desc->params.generic.param0), 922 le32_to_cpu(cq_desc->params.generic.param1)); 923 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", 924 le32_to_cpu(cq_desc->params.generic.addr_high), 925 le32_to_cpu(cq_desc->params.generic.addr_low)); 926 if (buf && cq_desc->datalen != 0) { 927 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); 928 if (buf_len < len) 929 len = buf_len; 930 931 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len); 932 } 933 } 934 935 /** 936 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) 937 * @hw: pointer to the HW struct 938 * @cq: pointer to the specific Control queue 939 * 940 * Returns true if the firmware has processed all descriptors on the 941 * admin send queue. Returns false if there are still requests pending. 942 */ 943 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) 944 { 945 /* AQ designers suggest use of head for better 946 * timing reliability than DD bit 947 */ 948 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; 949 } 950 951 /** 952 * ice_sq_send_cmd - send command to Control Queue (ATQ) 953 * @hw: pointer to the HW struct 954 * @cq: pointer to the specific Control queue 955 * @desc: prefilled descriptor describing the command 956 * @buf: buffer to use for indirect commands (or NULL for direct commands) 957 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 958 * @cd: pointer to command details structure 959 * 960 * This is the main send command routine for the ATQ. It runs the queue, 961 * cleans the queue, etc. 962 */ 963 int 964 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 965 struct ice_aq_desc *desc, void *buf, u16 buf_size, 966 struct ice_sq_cd *cd) 967 { 968 struct ice_dma_mem *dma_buf = NULL; 969 struct ice_aq_desc *desc_on_ring; 970 bool cmd_completed = false; 971 struct ice_sq_cd *details; 972 unsigned long timeout; 973 int status = 0; 974 u16 retval = 0; 975 u32 val = 0; 976 977 /* if reset is in progress return a soft error */ 978 if (hw->reset_ongoing) 979 return -EBUSY; 980 mutex_lock(&cq->sq_lock); 981 982 cq->sq_last_status = ICE_AQ_RC_OK; 983 984 if (!cq->sq.count) { 985 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); 986 status = -EIO; 987 goto sq_send_command_error; 988 } 989 990 if ((buf && !buf_size) || (!buf && buf_size)) { 991 status = -EINVAL; 992 goto sq_send_command_error; 993 } 994 995 if (buf) { 996 if (buf_size > cq->sq_buf_size) { 997 ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", 998 buf_size); 999 status = -EINVAL; 1000 goto sq_send_command_error; 1001 } 1002 1003 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); 1004 if (buf_size > ICE_AQ_LG_BUF) 1005 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1006 } 1007 1008 val = rd32(hw, cq->sq.head); 1009 if (val >= cq->num_sq_entries) { 1010 ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", 1011 val); 1012 status = -EIO; 1013 goto sq_send_command_error; 1014 } 1015 1016 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); 1017 if (cd) 1018 *details = *cd; 1019 else 1020 memset(details, 0, sizeof(*details)); 1021 1022 /* Call clean and check queue available function to reclaim the 1023 * descriptors that were processed by FW/MBX; the function returns the 1024 * number of desc available. The clean function called here could be 1025 * called in a separate thread in case of asynchronous completions. 1026 */ 1027 if (ice_clean_sq(hw, cq) == 0) { 1028 ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); 1029 status = -ENOSPC; 1030 goto sq_send_command_error; 1031 } 1032 1033 /* initialize the temp desc pointer with the right desc */ 1034 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); 1035 1036 /* if the desc is available copy the temp desc to the right place */ 1037 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring)); 1038 1039 /* if buf is not NULL assume indirect command */ 1040 if (buf) { 1041 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; 1042 /* copy the user buf into the respective DMA buf */ 1043 memcpy(dma_buf->va, buf, buf_size); 1044 desc_on_ring->datalen = cpu_to_le16(buf_size); 1045 1046 /* Update the address values in the desc with the pa value 1047 * for respective buffer 1048 */ 1049 desc_on_ring->params.generic.addr_high = 1050 cpu_to_le32(upper_32_bits(dma_buf->pa)); 1051 desc_on_ring->params.generic.addr_low = 1052 cpu_to_le32(lower_32_bits(dma_buf->pa)); 1053 } 1054 1055 /* Debug desc and buffer */ 1056 ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); 1057 1058 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); 1059 1060 (cq->sq.next_to_use)++; 1061 if (cq->sq.next_to_use == cq->sq.count) 1062 cq->sq.next_to_use = 0; 1063 wr32(hw, cq->sq.tail, cq->sq.next_to_use); 1064 ice_flush(hw); 1065 1066 /* Wait a short time before initial ice_sq_done() check, to allow 1067 * hardware time for completion. 1068 */ 1069 udelay(5); 1070 1071 timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT; 1072 do { 1073 if (ice_sq_done(hw, cq)) 1074 break; 1075 1076 usleep_range(100, 150); 1077 } while (time_before(jiffies, timeout)); 1078 1079 /* if ready, copy the desc back to temp */ 1080 if (ice_sq_done(hw, cq)) { 1081 memcpy(desc, desc_on_ring, sizeof(*desc)); 1082 if (buf) { 1083 /* get returned length to copy */ 1084 u16 copy_size = le16_to_cpu(desc->datalen); 1085 1086 if (copy_size > buf_size) { 1087 ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", 1088 copy_size, buf_size); 1089 status = -EIO; 1090 } else { 1091 memcpy(buf, dma_buf->va, copy_size); 1092 } 1093 } 1094 retval = le16_to_cpu(desc->retval); 1095 if (retval) { 1096 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", 1097 le16_to_cpu(desc->opcode), 1098 retval); 1099 1100 /* strip off FW internal code */ 1101 retval &= 0xff; 1102 } 1103 cmd_completed = true; 1104 if (!status && retval != ICE_AQ_RC_OK) 1105 status = -EIO; 1106 cq->sq_last_status = (enum ice_aq_err)retval; 1107 } 1108 1109 ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); 1110 1111 ice_debug_cq(hw, (void *)desc, buf, buf_size); 1112 1113 /* save writeback AQ if requested */ 1114 if (details->wb_desc) 1115 memcpy(details->wb_desc, desc_on_ring, 1116 sizeof(*details->wb_desc)); 1117 1118 /* update the error if time out occurred */ 1119 if (!cmd_completed) { 1120 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || 1121 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { 1122 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); 1123 status = -EIO; 1124 } else { 1125 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); 1126 status = -EIO; 1127 } 1128 } 1129 1130 sq_send_command_error: 1131 mutex_unlock(&cq->sq_lock); 1132 return status; 1133 } 1134 1135 /** 1136 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function 1137 * @desc: pointer to the temp descriptor (non DMA mem) 1138 * @opcode: the opcode can be used to decide which flags to turn off or on 1139 * 1140 * Fill the desc with default values 1141 */ 1142 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) 1143 { 1144 /* zero out the desc */ 1145 memset(desc, 0, sizeof(*desc)); 1146 desc->opcode = cpu_to_le16(opcode); 1147 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI); 1148 } 1149 1150 /** 1151 * ice_clean_rq_elem 1152 * @hw: pointer to the HW struct 1153 * @cq: pointer to the specific Control queue 1154 * @e: event info from the receive descriptor, includes any buffers 1155 * @pending: number of events that could be left to process 1156 * 1157 * This function cleans one Admin Receive Queue element and returns 1158 * the contents through e. It can also return how many events are 1159 * left to process through 'pending'. 1160 */ 1161 int 1162 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1163 struct ice_rq_event_info *e, u16 *pending) 1164 { 1165 u16 ntc = cq->rq.next_to_clean; 1166 enum ice_aq_err rq_last_status; 1167 struct ice_aq_desc *desc; 1168 struct ice_dma_mem *bi; 1169 int ret_code = 0; 1170 u16 desc_idx; 1171 u16 datalen; 1172 u16 flags; 1173 u16 ntu; 1174 1175 /* pre-clean the event info */ 1176 memset(&e->desc, 0, sizeof(e->desc)); 1177 1178 /* take the lock before we start messing with the ring */ 1179 mutex_lock(&cq->rq_lock); 1180 1181 if (!cq->rq.count) { 1182 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); 1183 ret_code = -EIO; 1184 goto clean_rq_elem_err; 1185 } 1186 1187 /* set next_to_use to head */ 1188 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1189 1190 if (ntu == ntc) { 1191 /* nothing to do - shouldn't need to update ring's values */ 1192 ret_code = -EALREADY; 1193 goto clean_rq_elem_out; 1194 } 1195 1196 /* now clean the next descriptor */ 1197 desc = ICE_CTL_Q_DESC(cq->rq, ntc); 1198 desc_idx = ntc; 1199 1200 rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); 1201 flags = le16_to_cpu(desc->flags); 1202 if (flags & ICE_AQ_FLAG_ERR) { 1203 ret_code = -EIO; 1204 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", 1205 le16_to_cpu(desc->opcode), rq_last_status); 1206 } 1207 memcpy(&e->desc, desc, sizeof(e->desc)); 1208 datalen = le16_to_cpu(desc->datalen); 1209 e->msg_len = min_t(u16, datalen, e->buf_len); 1210 if (e->msg_buf && e->msg_len) 1211 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); 1212 1213 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); 1214 1215 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); 1216 1217 /* Restore the original datalen and buffer address in the desc, 1218 * FW updates datalen to indicate the event message size 1219 */ 1220 bi = &cq->rq.r.rq_bi[ntc]; 1221 memset(desc, 0, sizeof(*desc)); 1222 1223 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF); 1224 if (cq->rq_buf_size > ICE_AQ_LG_BUF) 1225 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); 1226 desc->datalen = cpu_to_le16(bi->size); 1227 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 1228 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 1229 1230 /* set tail = the last cleaned desc index. */ 1231 wr32(hw, cq->rq.tail, ntc); 1232 /* ntc is updated to tail + 1 */ 1233 ntc++; 1234 if (ntc == cq->num_rq_entries) 1235 ntc = 0; 1236 cq->rq.next_to_clean = ntc; 1237 cq->rq.next_to_use = ntu; 1238 1239 clean_rq_elem_out: 1240 /* Set pending if needed, unlock and return */ 1241 if (pending) { 1242 /* re-read HW head to calculate actual pending messages */ 1243 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1244 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1245 } 1246 clean_rq_elem_err: 1247 mutex_unlock(&cq->rq_lock); 1248 1249 return ret_code; 1250 } 1251