1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Fast Path Operators 37 */ 38 39 #define dev_fmt(fmt) "QPLIB: " fmt 40 41 #include <linux/interrupt.h> 42 #include <linux/spinlock.h> 43 #include <linux/sched.h> 44 #include <linux/slab.h> 45 #include <linux/pci.h> 46 #include <linux/delay.h> 47 #include <linux/prefetch.h> 48 #include <linux/if_ether.h> 49 #include <rdma/ib_mad.h> 50 51 #include "roce_hsi.h" 52 53 #include "qplib_res.h" 54 #include "qplib_rcfw.h" 55 #include "qplib_sp.h" 56 #include "qplib_fp.h" 57 #include <rdma/ib_addr.h> 58 #include "bnxt_ulp.h" 59 #include "bnxt_re.h" 60 #include "ib_verbs.h" 61 62 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp); 63 64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp) 65 { 66 qp->sq.condition = false; 67 qp->sq.send_phantom = false; 68 qp->sq.single = false; 69 } 70 71 /* Flush list */ 72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 73 { 74 struct bnxt_qplib_cq *scq, *rcq; 75 76 scq = qp->scq; 77 rcq = qp->rcq; 78 79 if (!qp->sq.flushed) { 80 dev_dbg(&scq->hwq.pdev->dev, 81 "FP: Adding to SQ Flush list = %p\n", qp); 82 bnxt_qplib_cancel_phantom_processing(qp); 83 list_add_tail(&qp->sq_flush, &scq->sqf_head); 84 qp->sq.flushed = true; 85 } 86 if (!qp->srq) { 87 if (!qp->rq.flushed) { 88 dev_dbg(&rcq->hwq.pdev->dev, 89 "FP: Adding to RQ Flush list = %p\n", qp); 90 list_add_tail(&qp->rq_flush, &rcq->rqf_head); 91 qp->rq.flushed = true; 92 } 93 } 94 } 95 96 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp, 97 unsigned long *flags) 98 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) 99 { 100 spin_lock_irqsave(&qp->scq->flush_lock, *flags); 101 if (qp->scq == qp->rcq) 102 __acquire(&qp->rcq->flush_lock); 103 else 104 spin_lock(&qp->rcq->flush_lock); 105 } 106 107 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp, 108 unsigned long *flags) 109 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) 110 { 111 if (qp->scq == qp->rcq) 112 __release(&qp->rcq->flush_lock); 113 else 114 spin_unlock(&qp->rcq->flush_lock); 115 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); 116 } 117 118 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 119 { 120 unsigned long flags; 121 122 bnxt_qplib_acquire_cq_flush_locks(qp, &flags); 123 __bnxt_qplib_add_flush_qp(qp); 124 bnxt_qplib_release_cq_flush_locks(qp, &flags); 125 } 126 127 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) 128 { 129 if (qp->sq.flushed) { 130 qp->sq.flushed = false; 131 list_del(&qp->sq_flush); 132 } 133 if (!qp->srq) { 134 if (qp->rq.flushed) { 135 qp->rq.flushed = false; 136 list_del(&qp->rq_flush); 137 } 138 } 139 } 140 141 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) 142 { 143 unsigned long flags; 144 145 bnxt_qplib_acquire_cq_flush_locks(qp, &flags); 146 __clean_cq(qp->scq, (u64)(unsigned long)qp); 147 qp->sq.hwq.prod = 0; 148 qp->sq.hwq.cons = 0; 149 __clean_cq(qp->rcq, (u64)(unsigned long)qp); 150 qp->rq.hwq.prod = 0; 151 qp->rq.hwq.cons = 0; 152 153 __bnxt_qplib_del_flush_qp(qp); 154 bnxt_qplib_release_cq_flush_locks(qp, &flags); 155 } 156 157 static void bnxt_qpn_cqn_sched_task(struct work_struct *work) 158 { 159 struct bnxt_qplib_nq_work *nq_work = 160 container_of(work, struct bnxt_qplib_nq_work, work); 161 162 struct bnxt_qplib_cq *cq = nq_work->cq; 163 struct bnxt_qplib_nq *nq = nq_work->nq; 164 165 if (cq && nq) { 166 spin_lock_bh(&cq->compl_lock); 167 if (atomic_read(&cq->arm_state) && nq->cqn_handler) { 168 dev_dbg(&nq->pdev->dev, 169 "%s:Trigger cq = %p event nq = %p\n", 170 __func__, cq, nq); 171 nq->cqn_handler(nq, cq); 172 } 173 spin_unlock_bh(&cq->compl_lock); 174 } 175 kfree(nq_work); 176 } 177 178 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res, 179 struct bnxt_qplib_qp *qp) 180 { 181 struct bnxt_qplib_q *rq = &qp->rq; 182 struct bnxt_qplib_q *sq = &qp->sq; 183 184 if (qp->rq_hdr_buf) 185 dma_free_coherent(&res->pdev->dev, 186 rq->max_wqe * qp->rq_hdr_buf_size, 187 qp->rq_hdr_buf, qp->rq_hdr_buf_map); 188 if (qp->sq_hdr_buf) 189 dma_free_coherent(&res->pdev->dev, 190 sq->max_wqe * qp->sq_hdr_buf_size, 191 qp->sq_hdr_buf, qp->sq_hdr_buf_map); 192 qp->rq_hdr_buf = NULL; 193 qp->sq_hdr_buf = NULL; 194 qp->rq_hdr_buf_map = 0; 195 qp->sq_hdr_buf_map = 0; 196 qp->sq_hdr_buf_size = 0; 197 qp->rq_hdr_buf_size = 0; 198 } 199 200 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res, 201 struct bnxt_qplib_qp *qp) 202 { 203 struct bnxt_qplib_q *rq = &qp->rq; 204 struct bnxt_qplib_q *sq = &qp->sq; 205 int rc = 0; 206 207 if (qp->sq_hdr_buf_size && sq->max_wqe) { 208 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev, 209 sq->max_wqe * qp->sq_hdr_buf_size, 210 &qp->sq_hdr_buf_map, GFP_KERNEL); 211 if (!qp->sq_hdr_buf) { 212 rc = -ENOMEM; 213 dev_err(&res->pdev->dev, 214 "Failed to create sq_hdr_buf\n"); 215 goto fail; 216 } 217 } 218 219 if (qp->rq_hdr_buf_size && rq->max_wqe) { 220 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev, 221 rq->max_wqe * 222 qp->rq_hdr_buf_size, 223 &qp->rq_hdr_buf_map, 224 GFP_KERNEL); 225 if (!qp->rq_hdr_buf) { 226 rc = -ENOMEM; 227 dev_err(&res->pdev->dev, 228 "Failed to create rq_hdr_buf\n"); 229 goto fail; 230 } 231 } 232 return 0; 233 234 fail: 235 bnxt_qplib_free_qp_hdr_buf(res, qp); 236 return rc; 237 } 238 239 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) 240 { 241 struct bnxt_qplib_hwq *hwq = &nq->hwq; 242 struct nq_base *nqe, **nq_ptr; 243 int budget = nq->budget; 244 uintptr_t q_handle; 245 u16 type; 246 247 spin_lock_bh(&hwq->lock); 248 /* Service the NQ until empty */ 249 while (budget--) { 250 nq_ptr = (struct nq_base **)hwq->pbl_ptr; 251 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)]; 252 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags)) 253 break; 254 255 /* 256 * The valid test of the entry must be done first before 257 * reading any further. 258 */ 259 dma_rmb(); 260 261 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK; 262 switch (type) { 263 case NQ_BASE_TYPE_CQ_NOTIFICATION: 264 { 265 struct nq_cn *nqcne = (struct nq_cn *)nqe; 266 267 q_handle = le32_to_cpu(nqcne->cq_handle_low); 268 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) 269 << 32; 270 if ((unsigned long)cq == q_handle) { 271 nqcne->cq_handle_low = 0; 272 nqcne->cq_handle_high = 0; 273 cq->cnq_events++; 274 } 275 break; 276 } 277 default: 278 break; 279 } 280 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons, 281 1, &nq->nq_db.dbinfo.flags); 282 } 283 spin_unlock_bh(&hwq->lock); 284 } 285 286 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with 287 * this CQ. 288 */ 289 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events) 290 { 291 u32 retry_cnt = 100; 292 293 while (retry_cnt--) { 294 if (cnq_events == cq->cnq_events) 295 return; 296 usleep_range(50, 100); 297 clean_nq(cq->nq, cq); 298 } 299 } 300 301 static void bnxt_qplib_service_nq(struct tasklet_struct *t) 302 { 303 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet); 304 struct bnxt_qplib_hwq *hwq = &nq->hwq; 305 struct bnxt_qplib_cq *cq; 306 int budget = nq->budget; 307 struct nq_base *nqe; 308 uintptr_t q_handle; 309 u32 hw_polled = 0; 310 u16 type; 311 312 spin_lock_bh(&hwq->lock); 313 /* Service the NQ until empty */ 314 while (budget--) { 315 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL); 316 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags)) 317 break; 318 319 /* 320 * The valid test of the entry must be done first before 321 * reading any further. 322 */ 323 dma_rmb(); 324 325 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK; 326 switch (type) { 327 case NQ_BASE_TYPE_CQ_NOTIFICATION: 328 { 329 struct nq_cn *nqcne = (struct nq_cn *)nqe; 330 struct bnxt_re_cq *cq_p; 331 332 q_handle = le32_to_cpu(nqcne->cq_handle_low); 333 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) 334 << 32; 335 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle; 336 if (!cq) 337 break; 338 cq->toggle = (le16_to_cpu(nqe->info10_type) & 339 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT; 340 cq->dbinfo.toggle = cq->toggle; 341 cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq); 342 if (cq_p->uctx_cq_page) 343 *((u32 *)cq_p->uctx_cq_page) = cq->toggle; 344 345 bnxt_qplib_armen_db(&cq->dbinfo, 346 DBC_DBC_TYPE_CQ_ARMENA); 347 spin_lock_bh(&cq->compl_lock); 348 atomic_set(&cq->arm_state, 0); 349 if (nq->cqn_handler(nq, (cq))) 350 dev_warn(&nq->pdev->dev, 351 "cqn - type 0x%x not handled\n", type); 352 cq->cnq_events++; 353 spin_unlock_bh(&cq->compl_lock); 354 break; 355 } 356 case NQ_BASE_TYPE_SRQ_EVENT: 357 { 358 struct bnxt_qplib_srq *srq; 359 struct bnxt_re_srq *srq_p; 360 struct nq_srq_event *nqsrqe = 361 (struct nq_srq_event *)nqe; 362 363 q_handle = le32_to_cpu(nqsrqe->srq_handle_low); 364 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high) 365 << 32; 366 srq = (struct bnxt_qplib_srq *)q_handle; 367 srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK) 368 >> NQ_CN_TOGGLE_SFT; 369 srq->dbinfo.toggle = srq->toggle; 370 srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq); 371 if (srq_p->uctx_srq_page) 372 *((u32 *)srq_p->uctx_srq_page) = srq->toggle; 373 bnxt_qplib_armen_db(&srq->dbinfo, 374 DBC_DBC_TYPE_SRQ_ARMENA); 375 if (nq->srqn_handler(nq, 376 (struct bnxt_qplib_srq *)q_handle, 377 nqsrqe->event)) 378 dev_warn(&nq->pdev->dev, 379 "SRQ event 0x%x not handled\n", 380 nqsrqe->event); 381 break; 382 } 383 case NQ_BASE_TYPE_DBQ_EVENT: 384 break; 385 default: 386 dev_warn(&nq->pdev->dev, 387 "nqe with type = 0x%x not handled\n", type); 388 break; 389 } 390 hw_polled++; 391 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons, 392 1, &nq->nq_db.dbinfo.flags); 393 } 394 if (hw_polled) 395 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true); 396 spin_unlock_bh(&hwq->lock); 397 } 398 399 /* bnxt_re_synchronize_nq - self polling notification queue. 400 * @nq - notification queue pointer 401 * 402 * This function will start polling entries of a given notification queue 403 * for all pending entries. 404 * This function is useful to synchronize notification entries while resources 405 * are going away. 406 */ 407 408 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq) 409 { 410 int budget = nq->budget; 411 412 nq->budget = nq->hwq.max_elements; 413 bnxt_qplib_service_nq(&nq->nq_tasklet); 414 nq->budget = budget; 415 } 416 417 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) 418 { 419 struct bnxt_qplib_nq *nq = dev_instance; 420 struct bnxt_qplib_hwq *hwq = &nq->hwq; 421 u32 sw_cons; 422 423 /* Prefetch the NQ element */ 424 sw_cons = HWQ_CMP(hwq->cons, hwq); 425 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL)); 426 427 /* Fan out to CPU affinitized kthreads? */ 428 tasklet_schedule(&nq->nq_tasklet); 429 430 return IRQ_HANDLED; 431 } 432 433 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) 434 { 435 if (!nq->requested) 436 return; 437 438 nq->requested = false; 439 /* Mask h/w interrupt */ 440 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false); 441 /* Sync with last running IRQ handler */ 442 synchronize_irq(nq->msix_vec); 443 irq_set_affinity_hint(nq->msix_vec, NULL); 444 free_irq(nq->msix_vec, nq); 445 kfree(nq->name); 446 nq->name = NULL; 447 448 if (kill) 449 tasklet_kill(&nq->nq_tasklet); 450 tasklet_disable(&nq->nq_tasklet); 451 } 452 453 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) 454 { 455 if (nq->cqn_wq) { 456 destroy_workqueue(nq->cqn_wq); 457 nq->cqn_wq = NULL; 458 } 459 460 /* Make sure the HW is stopped! */ 461 bnxt_qplib_nq_stop_irq(nq, true); 462 463 if (nq->nq_db.reg.bar_reg) { 464 iounmap(nq->nq_db.reg.bar_reg); 465 nq->nq_db.reg.bar_reg = NULL; 466 } 467 468 nq->cqn_handler = NULL; 469 nq->srqn_handler = NULL; 470 nq->msix_vec = 0; 471 } 472 473 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 474 int msix_vector, bool need_init) 475 { 476 struct bnxt_qplib_res *res = nq->res; 477 int rc; 478 479 if (nq->requested) 480 return -EFAULT; 481 482 nq->msix_vec = msix_vector; 483 if (need_init) 484 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq); 485 else 486 tasklet_enable(&nq->nq_tasklet); 487 488 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s", 489 nq_indx, pci_name(res->pdev)); 490 if (!nq->name) 491 return -ENOMEM; 492 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq); 493 if (rc) { 494 kfree(nq->name); 495 nq->name = NULL; 496 tasklet_disable(&nq->nq_tasklet); 497 return rc; 498 } 499 500 cpumask_clear(&nq->mask); 501 cpumask_set_cpu(nq_indx, &nq->mask); 502 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask); 503 if (rc) { 504 dev_warn(&nq->pdev->dev, 505 "set affinity failed; vector: %d nq_idx: %d\n", 506 nq->msix_vec, nq_indx); 507 } 508 nq->requested = true; 509 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true); 510 511 return rc; 512 } 513 514 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt) 515 { 516 resource_size_t reg_base; 517 struct bnxt_qplib_nq_db *nq_db; 518 struct pci_dev *pdev; 519 520 pdev = nq->pdev; 521 nq_db = &nq->nq_db; 522 523 nq_db->dbinfo.flags = 0; 524 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION; 525 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id); 526 if (!nq_db->reg.bar_base) { 527 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!", 528 nq_db->reg.bar_id); 529 return -ENOMEM; 530 } 531 532 reg_base = nq_db->reg.bar_base + reg_offt; 533 /* Unconditionally map 8 bytes to support 57500 series */ 534 nq_db->reg.len = 8; 535 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len); 536 if (!nq_db->reg.bar_reg) { 537 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed", 538 nq_db->reg.bar_id); 539 return -ENOMEM; 540 } 541 542 nq_db->dbinfo.db = nq_db->reg.bar_reg; 543 nq_db->dbinfo.hwq = &nq->hwq; 544 nq_db->dbinfo.xid = nq->ring_id; 545 546 return 0; 547 } 548 549 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 550 int nq_idx, int msix_vector, int bar_reg_offset, 551 cqn_handler_t cqn_handler, 552 srqn_handler_t srqn_handler) 553 { 554 int rc; 555 556 nq->pdev = pdev; 557 nq->cqn_handler = cqn_handler; 558 nq->srqn_handler = srqn_handler; 559 nq->load = 0; 560 561 /* Have a task to schedule CQ notifiers in post send case */ 562 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); 563 if (!nq->cqn_wq) 564 return -ENOMEM; 565 566 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset); 567 if (rc) 568 goto fail; 569 570 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true); 571 if (rc) { 572 dev_err(&nq->pdev->dev, 573 "Failed to request irq for nq-idx %d\n", nq_idx); 574 goto fail; 575 } 576 577 return 0; 578 fail: 579 bnxt_qplib_disable_nq(nq); 580 return rc; 581 } 582 583 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq) 584 { 585 if (nq->hwq.max_elements) { 586 bnxt_qplib_free_hwq(nq->res, &nq->hwq); 587 nq->hwq.max_elements = 0; 588 } 589 } 590 591 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq) 592 { 593 struct bnxt_qplib_hwq_attr hwq_attr = {}; 594 struct bnxt_qplib_sg_info sginfo = {}; 595 596 nq->pdev = res->pdev; 597 nq->res = res; 598 if (!nq->hwq.max_elements || 599 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT) 600 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; 601 602 sginfo.pgsize = PAGE_SIZE; 603 sginfo.pgshft = PAGE_SHIFT; 604 hwq_attr.res = res; 605 hwq_attr.sginfo = &sginfo; 606 hwq_attr.depth = nq->hwq.max_elements; 607 hwq_attr.stride = sizeof(struct nq_base); 608 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res); 609 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) { 610 dev_err(&nq->pdev->dev, "FP NQ allocation failed"); 611 return -ENOMEM; 612 } 613 nq->budget = 8; 614 return 0; 615 } 616 617 /* SRQ */ 618 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, 619 struct bnxt_qplib_srq *srq) 620 { 621 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 622 struct creq_destroy_srq_resp resp = {}; 623 struct bnxt_qplib_cmdqmsg msg = {}; 624 struct cmdq_destroy_srq req = {}; 625 int rc; 626 627 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 628 CMDQ_BASE_OPCODE_DESTROY_SRQ, 629 sizeof(req)); 630 631 /* Configure the request */ 632 req.srq_cid = cpu_to_le32(srq->id); 633 634 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 635 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 636 kfree(srq->swq); 637 if (rc) 638 return; 639 bnxt_qplib_free_hwq(res, &srq->hwq); 640 } 641 642 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, 643 struct bnxt_qplib_srq *srq) 644 { 645 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 646 struct bnxt_qplib_hwq_attr hwq_attr = {}; 647 struct creq_create_srq_resp resp = {}; 648 struct bnxt_qplib_cmdqmsg msg = {}; 649 struct cmdq_create_srq req = {}; 650 struct bnxt_qplib_pbl *pbl; 651 u16 pg_sz_lvl; 652 int rc, idx; 653 654 hwq_attr.res = res; 655 hwq_attr.sginfo = &srq->sg_info; 656 hwq_attr.depth = srq->max_wqe; 657 hwq_attr.stride = srq->wqe_size; 658 hwq_attr.type = HWQ_TYPE_QUEUE; 659 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr); 660 if (rc) 661 return rc; 662 srq->dbinfo.flags = 0; 663 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 664 CMDQ_BASE_OPCODE_CREATE_SRQ, 665 sizeof(req)); 666 667 /* Configure the request */ 668 req.dpi = cpu_to_le32(srq->dpi->dpi); 669 req.srq_handle = cpu_to_le64((uintptr_t)srq); 670 671 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements); 672 pbl = &srq->hwq.pbl[PBL_LVL_0]; 673 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) << 674 CMDQ_CREATE_SRQ_PG_SIZE_SFT); 675 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) << 676 CMDQ_CREATE_SRQ_LVL_SFT; 677 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl); 678 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]); 679 req.pd_id = cpu_to_le32(srq->pd->id); 680 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id); 681 682 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 683 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 684 if (rc) 685 goto fail; 686 687 spin_lock_init(&srq->lock); 688 srq->start_idx = 0; 689 srq->last_idx = srq->hwq.max_elements - 1; 690 if (!srq->hwq.is_user) { 691 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq), 692 GFP_KERNEL); 693 if (!srq->swq) { 694 rc = -ENOMEM; 695 goto fail; 696 } 697 for (idx = 0; idx < srq->hwq.max_elements; idx++) 698 srq->swq[idx].next_idx = idx + 1; 699 srq->swq[srq->last_idx].next_idx = -1; 700 } 701 702 srq->id = le32_to_cpu(resp.xid); 703 srq->dbinfo.hwq = &srq->hwq; 704 srq->dbinfo.xid = srq->id; 705 srq->dbinfo.db = srq->dpi->dbr; 706 srq->dbinfo.max_slot = 1; 707 srq->dbinfo.priv_db = res->dpi_tbl.priv_db; 708 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); 709 710 return 0; 711 fail: 712 bnxt_qplib_free_hwq(res, &srq->hwq); 713 kfree(srq->swq); 714 715 return rc; 716 } 717 718 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, 719 struct bnxt_qplib_srq *srq) 720 { 721 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 722 struct creq_query_srq_resp resp = {}; 723 struct bnxt_qplib_cmdqmsg msg = {}; 724 struct bnxt_qplib_rcfw_sbuf sbuf; 725 struct creq_query_srq_resp_sb *sb; 726 struct cmdq_query_srq req = {}; 727 int rc; 728 729 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 730 CMDQ_BASE_OPCODE_QUERY_SRQ, 731 sizeof(req)); 732 733 /* Configure the request */ 734 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); 735 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, 736 &sbuf.dma_addr, GFP_KERNEL); 737 if (!sbuf.sb) 738 return -ENOMEM; 739 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; 740 req.srq_cid = cpu_to_le32(srq->id); 741 sb = sbuf.sb; 742 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), 743 sizeof(resp), 0); 744 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 745 if (!rc) 746 srq->threshold = le16_to_cpu(sb->srq_limit); 747 dma_free_coherent(&rcfw->pdev->dev, sbuf.size, 748 sbuf.sb, sbuf.dma_addr); 749 750 return rc; 751 } 752 753 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, 754 struct bnxt_qplib_swqe *wqe) 755 { 756 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; 757 struct rq_wqe *srqe; 758 struct sq_sge *hw_sge; 759 int i, next; 760 761 spin_lock(&srq_hwq->lock); 762 if (srq->start_idx == srq->last_idx) { 763 dev_err(&srq_hwq->pdev->dev, 764 "FP: SRQ (0x%x) is full!\n", srq->id); 765 spin_unlock(&srq_hwq->lock); 766 return -EINVAL; 767 } 768 next = srq->start_idx; 769 srq->start_idx = srq->swq[next].next_idx; 770 spin_unlock(&srq_hwq->lock); 771 772 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL); 773 memset(srqe, 0, srq->wqe_size); 774 /* Calculate wqe_size16 and data_len */ 775 for (i = 0, hw_sge = (struct sq_sge *)srqe->data; 776 i < wqe->num_sge; i++, hw_sge++) { 777 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr); 778 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey); 779 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size); 780 } 781 srqe->wqe_type = wqe->type; 782 srqe->flags = wqe->flags; 783 srqe->wqe_size = wqe->num_sge + 784 ((offsetof(typeof(*srqe), data) + 15) >> 4); 785 srqe->wr_id[0] = cpu_to_le32((u32)next); 786 srq->swq[next].wr_id = wqe->wr_id; 787 788 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot); 789 790 /* Ring DB */ 791 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); 792 793 return 0; 794 } 795 796 /* QP */ 797 798 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que) 799 { 800 int indx; 801 802 que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL); 803 if (!que->swq) 804 return -ENOMEM; 805 806 que->swq_start = 0; 807 que->swq_last = que->max_sw_wqe - 1; 808 for (indx = 0; indx < que->max_sw_wqe; indx++) 809 que->swq[indx].next_idx = indx + 1; 810 que->swq[que->swq_last].next_idx = 0; /* Make it circular */ 811 que->swq_last = 0; 812 813 return 0; 814 } 815 816 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 817 { 818 struct bnxt_qplib_hwq_attr hwq_attr = {}; 819 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 820 struct creq_create_qp1_resp resp = {}; 821 struct bnxt_qplib_cmdqmsg msg = {}; 822 struct bnxt_qplib_q *sq = &qp->sq; 823 struct bnxt_qplib_q *rq = &qp->rq; 824 struct cmdq_create_qp1 req = {}; 825 struct bnxt_qplib_pbl *pbl; 826 u32 qp_flags = 0; 827 u8 pg_sz_lvl; 828 u32 tbl_indx; 829 int rc; 830 831 sq->dbinfo.flags = 0; 832 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 833 CMDQ_BASE_OPCODE_CREATE_QP1, 834 sizeof(req)); 835 /* General */ 836 req.type = qp->type; 837 req.dpi = cpu_to_le32(qp->dpi->dpi); 838 req.qp_handle = cpu_to_le64(qp->qp_handle); 839 840 /* SQ */ 841 hwq_attr.res = res; 842 hwq_attr.sginfo = &sq->sg_info; 843 hwq_attr.stride = sizeof(struct sq_sge); 844 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false); 845 hwq_attr.type = HWQ_TYPE_QUEUE; 846 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); 847 if (rc) 848 return rc; 849 850 rc = bnxt_qplib_alloc_init_swq(sq); 851 if (rc) 852 goto fail_sq; 853 854 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); 855 pbl = &sq->hwq.pbl[PBL_LVL_0]; 856 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 857 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) << 858 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT); 859 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK); 860 req.sq_pg_size_sq_lvl = pg_sz_lvl; 861 req.sq_fwo_sq_sge = 862 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) << 863 CMDQ_CREATE_QP1_SQ_SGE_SFT); 864 req.scq_cid = cpu_to_le32(qp->scq->id); 865 866 /* RQ */ 867 if (rq->max_wqe) { 868 rq->dbinfo.flags = 0; 869 hwq_attr.res = res; 870 hwq_attr.sginfo = &rq->sg_info; 871 hwq_attr.stride = sizeof(struct sq_sge); 872 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false); 873 hwq_attr.type = HWQ_TYPE_QUEUE; 874 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); 875 if (rc) 876 goto sq_swq; 877 rc = bnxt_qplib_alloc_init_swq(rq); 878 if (rc) 879 goto fail_rq; 880 req.rq_size = cpu_to_le32(rq->max_wqe); 881 pbl = &rq->hwq.pbl[PBL_LVL_0]; 882 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 883 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) << 884 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT); 885 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK); 886 req.rq_pg_size_rq_lvl = pg_sz_lvl; 887 req.rq_fwo_rq_sge = 888 cpu_to_le16((rq->max_sge & 889 CMDQ_CREATE_QP1_RQ_SGE_MASK) << 890 CMDQ_CREATE_QP1_RQ_SGE_SFT); 891 } 892 req.rcq_cid = cpu_to_le32(qp->rcq->id); 893 /* Header buffer - allow hdr_buf pass in */ 894 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp); 895 if (rc) { 896 rc = -ENOMEM; 897 goto rq_rwq; 898 } 899 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE; 900 req.qp_flags = cpu_to_le32(qp_flags); 901 req.pd_id = cpu_to_le32(qp->pd->id); 902 903 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 904 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 905 if (rc) 906 goto fail; 907 908 qp->id = le32_to_cpu(resp.xid); 909 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 910 qp->cctx = res->cctx; 911 sq->dbinfo.hwq = &sq->hwq; 912 sq->dbinfo.xid = qp->id; 913 sq->dbinfo.db = qp->dpi->dbr; 914 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode); 915 if (rq->max_wqe) { 916 rq->dbinfo.hwq = &rq->hwq; 917 rq->dbinfo.xid = qp->id; 918 rq->dbinfo.db = qp->dpi->dbr; 919 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); 920 } 921 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); 922 rcfw->qp_tbl[tbl_indx].qp_id = qp->id; 923 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; 924 925 return 0; 926 927 fail: 928 bnxt_qplib_free_qp_hdr_buf(res, qp); 929 rq_rwq: 930 kfree(rq->swq); 931 fail_rq: 932 bnxt_qplib_free_hwq(res, &rq->hwq); 933 sq_swq: 934 kfree(sq->swq); 935 fail_sq: 936 bnxt_qplib_free_hwq(res, &sq->hwq); 937 return rc; 938 } 939 940 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size) 941 { 942 struct bnxt_qplib_hwq *hwq; 943 struct bnxt_qplib_q *sq; 944 u64 fpsne, psn_pg; 945 u16 indx_pad = 0; 946 947 sq = &qp->sq; 948 hwq = &sq->hwq; 949 /* First psn entry */ 950 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg); 951 if (!IS_ALIGNED(fpsne, PAGE_SIZE)) 952 indx_pad = (fpsne & ~PAGE_MASK) / size; 953 hwq->pad_pgofft = indx_pad; 954 hwq->pad_pg = (u64 *)psn_pg; 955 hwq->pad_stride = size; 956 } 957 958 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 959 { 960 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 961 struct bnxt_qplib_hwq_attr hwq_attr = {}; 962 struct bnxt_qplib_sg_info sginfo = {}; 963 struct creq_create_qp_resp resp = {}; 964 struct bnxt_qplib_cmdqmsg msg = {}; 965 struct bnxt_qplib_q *sq = &qp->sq; 966 struct bnxt_qplib_q *rq = &qp->rq; 967 struct cmdq_create_qp req = {}; 968 int rc, req_size, psn_sz = 0; 969 struct bnxt_qplib_hwq *xrrq; 970 struct bnxt_qplib_pbl *pbl; 971 u32 qp_flags = 0; 972 u8 pg_sz_lvl; 973 u32 tbl_indx; 974 u16 nsge; 975 976 qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2); 977 sq->dbinfo.flags = 0; 978 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 979 CMDQ_BASE_OPCODE_CREATE_QP, 980 sizeof(req)); 981 982 /* General */ 983 req.type = qp->type; 984 req.dpi = cpu_to_le32(qp->dpi->dpi); 985 req.qp_handle = cpu_to_le64(qp->qp_handle); 986 987 /* SQ */ 988 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) { 989 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? 990 sizeof(struct sq_psn_search_ext) : 991 sizeof(struct sq_psn_search); 992 993 if (qp->is_host_msn_tbl) { 994 psn_sz = sizeof(struct sq_msn_search); 995 qp->msn = 0; 996 } 997 } 998 999 hwq_attr.res = res; 1000 hwq_attr.sginfo = &sq->sg_info; 1001 hwq_attr.stride = sizeof(struct sq_sge); 1002 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true); 1003 hwq_attr.aux_stride = psn_sz; 1004 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode) 1005 : 0; 1006 /* Update msn tbl size */ 1007 if (qp->is_host_msn_tbl && psn_sz) { 1008 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) 1009 hwq_attr.aux_depth = 1010 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); 1011 else 1012 hwq_attr.aux_depth = 1013 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2; 1014 qp->msn_tbl_sz = hwq_attr.aux_depth; 1015 qp->msn = 0; 1016 } 1017 1018 hwq_attr.type = HWQ_TYPE_QUEUE; 1019 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr); 1020 if (rc) 1021 return rc; 1022 1023 if (!sq->hwq.is_user) { 1024 rc = bnxt_qplib_alloc_init_swq(sq); 1025 if (rc) 1026 goto fail_sq; 1027 1028 if (psn_sz) 1029 bnxt_qplib_init_psn_ptr(qp, psn_sz); 1030 } 1031 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); 1032 pbl = &sq->hwq.pbl[PBL_LVL_0]; 1033 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 1034 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) << 1035 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT); 1036 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK); 1037 req.sq_pg_size_sq_lvl = pg_sz_lvl; 1038 req.sq_fwo_sq_sge = 1039 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) << 1040 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0); 1041 req.scq_cid = cpu_to_le32(qp->scq->id); 1042 1043 /* RQ */ 1044 if (!qp->srq) { 1045 rq->dbinfo.flags = 0; 1046 hwq_attr.res = res; 1047 hwq_attr.sginfo = &rq->sg_info; 1048 hwq_attr.stride = sizeof(struct sq_sge); 1049 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false); 1050 hwq_attr.aux_stride = 0; 1051 hwq_attr.aux_depth = 0; 1052 hwq_attr.type = HWQ_TYPE_QUEUE; 1053 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr); 1054 if (rc) 1055 goto sq_swq; 1056 if (!rq->hwq.is_user) { 1057 rc = bnxt_qplib_alloc_init_swq(rq); 1058 if (rc) 1059 goto fail_rq; 1060 } 1061 1062 req.rq_size = cpu_to_le32(rq->max_wqe); 1063 pbl = &rq->hwq.pbl[PBL_LVL_0]; 1064 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 1065 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) << 1066 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT); 1067 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK); 1068 req.rq_pg_size_rq_lvl = pg_sz_lvl; 1069 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 1070 6 : rq->max_sge; 1071 req.rq_fwo_rq_sge = 1072 cpu_to_le16(((nsge & 1073 CMDQ_CREATE_QP_RQ_SGE_MASK) << 1074 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0); 1075 } else { 1076 /* SRQ */ 1077 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED; 1078 req.srq_cid = cpu_to_le32(qp->srq->id); 1079 } 1080 req.rcq_cid = cpu_to_le32(qp->rcq->id); 1081 1082 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE; 1083 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED; 1084 if (qp->sig_type) 1085 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION; 1086 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) 1087 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED; 1088 if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf)) 1089 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED; 1090 1091 req.qp_flags = cpu_to_le32(qp_flags); 1092 1093 /* ORRQ and IRRQ */ 1094 if (psn_sz) { 1095 xrrq = &qp->orrq; 1096 xrrq->max_elements = 1097 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); 1098 req_size = xrrq->max_elements * 1099 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1; 1100 req_size &= ~(PAGE_SIZE - 1); 1101 sginfo.pgsize = req_size; 1102 sginfo.pgshft = PAGE_SHIFT; 1103 1104 hwq_attr.res = res; 1105 hwq_attr.sginfo = &sginfo; 1106 hwq_attr.depth = xrrq->max_elements; 1107 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE; 1108 hwq_attr.aux_stride = 0; 1109 hwq_attr.aux_depth = 0; 1110 hwq_attr.type = HWQ_TYPE_CTX; 1111 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); 1112 if (rc) 1113 goto rq_swq; 1114 pbl = &xrrq->pbl[PBL_LVL_0]; 1115 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]); 1116 1117 xrrq = &qp->irrq; 1118 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS( 1119 qp->max_dest_rd_atomic); 1120 req_size = xrrq->max_elements * 1121 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1; 1122 req_size &= ~(PAGE_SIZE - 1); 1123 sginfo.pgsize = req_size; 1124 hwq_attr.depth = xrrq->max_elements; 1125 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE; 1126 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr); 1127 if (rc) 1128 goto fail_orrq; 1129 1130 pbl = &xrrq->pbl[PBL_LVL_0]; 1131 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]); 1132 } 1133 req.pd_id = cpu_to_le32(qp->pd->id); 1134 1135 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 1136 sizeof(resp), 0); 1137 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1138 if (rc) 1139 goto fail; 1140 1141 qp->id = le32_to_cpu(resp.xid); 1142 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 1143 INIT_LIST_HEAD(&qp->sq_flush); 1144 INIT_LIST_HEAD(&qp->rq_flush); 1145 qp->cctx = res->cctx; 1146 sq->dbinfo.hwq = &sq->hwq; 1147 sq->dbinfo.xid = qp->id; 1148 sq->dbinfo.db = qp->dpi->dbr; 1149 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode); 1150 if (rq->max_wqe) { 1151 rq->dbinfo.hwq = &rq->hwq; 1152 rq->dbinfo.xid = qp->id; 1153 rq->dbinfo.db = qp->dpi->dbr; 1154 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); 1155 } 1156 spin_lock_bh(&rcfw->tbl_lock); 1157 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); 1158 rcfw->qp_tbl[tbl_indx].qp_id = qp->id; 1159 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; 1160 spin_unlock_bh(&rcfw->tbl_lock); 1161 1162 return 0; 1163 fail: 1164 bnxt_qplib_free_hwq(res, &qp->irrq); 1165 fail_orrq: 1166 bnxt_qplib_free_hwq(res, &qp->orrq); 1167 rq_swq: 1168 kfree(rq->swq); 1169 fail_rq: 1170 bnxt_qplib_free_hwq(res, &rq->hwq); 1171 sq_swq: 1172 kfree(sq->swq); 1173 fail_sq: 1174 bnxt_qplib_free_hwq(res, &sq->hwq); 1175 return rc; 1176 } 1177 1178 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp) 1179 { 1180 switch (qp->state) { 1181 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 1182 /* INIT->RTR, configure the path_mtu to the default 1183 * 2048 if not being requested 1184 */ 1185 if (!(qp->modify_flags & 1186 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) { 1187 qp->modify_flags |= 1188 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 1189 qp->path_mtu = 1190 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; 1191 } 1192 /* Bono FW require the max_dest_rd_atomic to be >= 1 */ 1193 if (qp->max_dest_rd_atomic < 1) 1194 qp->max_dest_rd_atomic = 1; 1195 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC; 1196 /* Bono FW 20.6.5 requires SGID_INDEX configuration */ 1197 if (!(qp->modify_flags & 1198 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) { 1199 qp->modify_flags |= 1200 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX; 1201 qp->ah.sgid_index = 0; 1202 } 1203 break; 1204 default: 1205 break; 1206 } 1207 } 1208 1209 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp) 1210 { 1211 switch (qp->state) { 1212 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 1213 /* Bono FW requires the max_rd_atomic to be >= 1 */ 1214 if (qp->max_rd_atomic < 1) 1215 qp->max_rd_atomic = 1; 1216 /* Bono FW does not allow PKEY_INDEX, 1217 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT, 1218 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN, 1219 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID 1220 * modification 1221 */ 1222 qp->modify_flags &= 1223 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY | 1224 CMDQ_MODIFY_QP_MODIFY_MASK_DGID | 1225 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | 1226 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | 1227 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | 1228 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | 1229 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | 1230 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU | 1231 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN | 1232 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER | 1233 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC | 1234 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID); 1235 break; 1236 default: 1237 break; 1238 } 1239 } 1240 1241 static void __filter_modify_flags(struct bnxt_qplib_qp *qp) 1242 { 1243 switch (qp->cur_qp_state) { 1244 case CMDQ_MODIFY_QP_NEW_STATE_RESET: 1245 break; 1246 case CMDQ_MODIFY_QP_NEW_STATE_INIT: 1247 __modify_flags_from_init_state(qp); 1248 break; 1249 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 1250 __modify_flags_from_rtr_state(qp); 1251 break; 1252 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 1253 break; 1254 case CMDQ_MODIFY_QP_NEW_STATE_SQD: 1255 break; 1256 case CMDQ_MODIFY_QP_NEW_STATE_SQE: 1257 break; 1258 case CMDQ_MODIFY_QP_NEW_STATE_ERR: 1259 break; 1260 default: 1261 break; 1262 } 1263 } 1264 1265 static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res, 1266 struct bnxt_qplib_qp *qp, 1267 struct cmdq_modify_qp *req) 1268 { 1269 u32 mandatory_flags = 0; 1270 1271 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC) 1272 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; 1273 1274 if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT && 1275 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) { 1276 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC && qp->srq) 1277 req->flags = cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED); 1278 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; 1279 } 1280 1281 if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) && 1282 (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR && 1283 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) { 1284 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC) 1285 mandatory_flags |= 1286 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER; 1287 } 1288 1289 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD || 1290 qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI) 1291 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; 1292 1293 qp->modify_flags |= mandatory_flags; 1294 req->qp_type = qp->type; 1295 } 1296 1297 static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp) 1298 { 1299 if ((qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT && 1300 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) || 1301 (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR && 1302 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) 1303 return true; 1304 1305 return false; 1306 } 1307 1308 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 1309 { 1310 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1311 struct creq_modify_qp_resp resp = {}; 1312 struct bnxt_qplib_cmdqmsg msg = {}; 1313 struct cmdq_modify_qp req = {}; 1314 u16 vlan_pcp_vlan_dei_vlan_id; 1315 u32 temp32[4]; 1316 u32 bmask; 1317 int rc; 1318 1319 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1320 CMDQ_BASE_OPCODE_MODIFY_QP, 1321 sizeof(req)); 1322 1323 /* Filter out the qp_attr_mask based on the state->new transition */ 1324 __filter_modify_flags(qp); 1325 if (qp->modify_flags & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) { 1326 /* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */ 1327 if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) && 1328 is_optimized_state_transition(qp)) 1329 bnxt_set_mandatory_attributes(res, qp, &req); 1330 } 1331 bmask = qp->modify_flags; 1332 req.modify_mask = cpu_to_le32(qp->modify_flags); 1333 req.qp_cid = cpu_to_le32(qp->id); 1334 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) { 1335 req.network_type_en_sqd_async_notify_new_state = 1336 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) | 1337 (qp->en_sqd_async_notify ? 1338 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0); 1339 } 1340 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type; 1341 1342 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) 1343 req.access = qp->access; 1344 1345 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) 1346 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL); 1347 1348 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) 1349 req.qkey = cpu_to_le32(qp->qkey); 1350 1351 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) { 1352 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid)); 1353 req.dgid[0] = cpu_to_le32(temp32[0]); 1354 req.dgid[1] = cpu_to_le32(temp32[1]); 1355 req.dgid[2] = cpu_to_le32(temp32[2]); 1356 req.dgid[3] = cpu_to_le32(temp32[3]); 1357 } 1358 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL) 1359 req.flow_label = cpu_to_le32(qp->ah.flow_label); 1360 1361 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) 1362 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id 1363 [qp->ah.sgid_index]); 1364 1365 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT) 1366 req.hop_limit = qp->ah.hop_limit; 1367 1368 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS) 1369 req.traffic_class = qp->ah.traffic_class; 1370 1371 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC) 1372 memcpy(req.dest_mac, qp->ah.dmac, 6); 1373 1374 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU) 1375 req.path_mtu_pingpong_push_enable |= qp->path_mtu; 1376 1377 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT) 1378 req.timeout = qp->timeout; 1379 1380 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT) 1381 req.retry_cnt = qp->retry_cnt; 1382 1383 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY) 1384 req.rnr_retry = qp->rnr_retry; 1385 1386 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER) 1387 req.min_rnr_timer = qp->min_rnr_timer; 1388 1389 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN) 1390 req.rq_psn = cpu_to_le32(qp->rq.psn); 1391 1392 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN) 1393 req.sq_psn = cpu_to_le32(qp->sq.psn); 1394 1395 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC) 1396 req.max_rd_atomic = 1397 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); 1398 1399 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC) 1400 req.max_dest_rd_atomic = 1401 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic); 1402 1403 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements); 1404 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements); 1405 req.sq_sge = cpu_to_le16(qp->sq.max_sge); 1406 req.rq_sge = cpu_to_le16(qp->rq.max_sge); 1407 req.max_inline_data = cpu_to_le32(qp->max_inline_data); 1408 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID) 1409 req.dest_qp_id = cpu_to_le32(qp->dest_qpn); 1410 1411 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) { 1412 vlan_pcp_vlan_dei_vlan_id = 1413 ((res->sgid_tbl.tbl[qp->ah.sgid_index].vlan_id << 1414 CMDQ_MODIFY_QP_VLAN_ID_SFT) & 1415 CMDQ_MODIFY_QP_VLAN_ID_MASK); 1416 vlan_pcp_vlan_dei_vlan_id |= 1417 ((qp->ah.sl << CMDQ_MODIFY_QP_VLAN_PCP_SFT) & 1418 CMDQ_MODIFY_QP_VLAN_PCP_MASK); 1419 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(vlan_pcp_vlan_dei_vlan_id); 1420 } 1421 1422 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 1423 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1424 if (rc) 1425 return rc; 1426 qp->cur_qp_state = qp->state; 1427 return 0; 1428 } 1429 1430 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 1431 { 1432 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1433 struct creq_query_qp_resp resp = {}; 1434 struct bnxt_qplib_cmdqmsg msg = {}; 1435 struct bnxt_qplib_rcfw_sbuf sbuf; 1436 struct creq_query_qp_resp_sb *sb; 1437 struct cmdq_query_qp req = {}; 1438 u32 temp32[4]; 1439 int i, rc; 1440 1441 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); 1442 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, 1443 &sbuf.dma_addr, GFP_KERNEL); 1444 if (!sbuf.sb) 1445 return -ENOMEM; 1446 sb = sbuf.sb; 1447 1448 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1449 CMDQ_BASE_OPCODE_QUERY_QP, 1450 sizeof(req)); 1451 1452 req.qp_cid = cpu_to_le32(qp->id); 1453 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; 1454 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), 1455 sizeof(resp), 0); 1456 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1457 if (rc) 1458 goto bail; 1459 /* Extract the context from the side buffer */ 1460 qp->state = sb->en_sqd_async_notify_state & 1461 CREQ_QUERY_QP_RESP_SB_STATE_MASK; 1462 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state & 1463 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY; 1464 qp->access = sb->access; 1465 qp->pkey_index = le16_to_cpu(sb->pkey); 1466 qp->qkey = le32_to_cpu(sb->qkey); 1467 1468 temp32[0] = le32_to_cpu(sb->dgid[0]); 1469 temp32[1] = le32_to_cpu(sb->dgid[1]); 1470 temp32[2] = le32_to_cpu(sb->dgid[2]); 1471 temp32[3] = le32_to_cpu(sb->dgid[3]); 1472 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data)); 1473 1474 qp->ah.flow_label = le32_to_cpu(sb->flow_label); 1475 1476 qp->ah.sgid_index = 0; 1477 for (i = 0; i < res->sgid_tbl.max; i++) { 1478 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) { 1479 qp->ah.sgid_index = i; 1480 break; 1481 } 1482 } 1483 if (i == res->sgid_tbl.max) 1484 dev_warn(&res->pdev->dev, "SGID not found??\n"); 1485 1486 qp->ah.hop_limit = sb->hop_limit; 1487 qp->ah.traffic_class = sb->traffic_class; 1488 memcpy(qp->ah.dmac, sb->dest_mac, 6); 1489 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) & 1490 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >> 1491 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT; 1492 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) & 1493 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >> 1494 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT; 1495 qp->timeout = sb->timeout; 1496 qp->retry_cnt = sb->retry_cnt; 1497 qp->rnr_retry = sb->rnr_retry; 1498 qp->min_rnr_timer = sb->min_rnr_timer; 1499 qp->rq.psn = le32_to_cpu(sb->rq_psn); 1500 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic); 1501 qp->sq.psn = le32_to_cpu(sb->sq_psn); 1502 qp->max_dest_rd_atomic = 1503 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic); 1504 qp->sq.max_wqe = qp->sq.hwq.max_elements; 1505 qp->rq.max_wqe = qp->rq.hwq.max_elements; 1506 qp->sq.max_sge = le16_to_cpu(sb->sq_sge); 1507 qp->rq.max_sge = le16_to_cpu(sb->rq_sge); 1508 qp->max_inline_data = le32_to_cpu(sb->max_inline_data); 1509 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); 1510 memcpy(qp->smac, sb->src_mac, 6); 1511 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); 1512 qp->port_id = le16_to_cpu(sb->port_id); 1513 bail: 1514 dma_free_coherent(&rcfw->pdev->dev, sbuf.size, 1515 sbuf.sb, sbuf.dma_addr); 1516 return rc; 1517 } 1518 1519 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) 1520 { 1521 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq; 1522 u32 peek_flags, peek_cons; 1523 struct cq_base *hw_cqe; 1524 int i; 1525 1526 peek_flags = cq->dbinfo.flags; 1527 peek_cons = cq_hwq->cons; 1528 for (i = 0; i < cq_hwq->max_elements; i++) { 1529 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL); 1530 if (!CQE_CMP_VALID(hw_cqe, peek_flags)) 1531 continue; 1532 /* 1533 * The valid test of the entry must be done first before 1534 * reading any further. 1535 */ 1536 dma_rmb(); 1537 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) { 1538 case CQ_BASE_CQE_TYPE_REQ: 1539 case CQ_BASE_CQE_TYPE_TERMINAL: 1540 { 1541 struct cq_req *cqe = (struct cq_req *)hw_cqe; 1542 1543 if (qp == le64_to_cpu(cqe->qp_handle)) 1544 cqe->qp_handle = 0; 1545 break; 1546 } 1547 case CQ_BASE_CQE_TYPE_RES_RC: 1548 case CQ_BASE_CQE_TYPE_RES_UD: 1549 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 1550 { 1551 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; 1552 1553 if (qp == le64_to_cpu(cqe->qp_handle)) 1554 cqe->qp_handle = 0; 1555 break; 1556 } 1557 default: 1558 break; 1559 } 1560 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons, 1561 1, &peek_flags); 1562 } 1563 } 1564 1565 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, 1566 struct bnxt_qplib_qp *qp) 1567 { 1568 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1569 struct creq_destroy_qp_resp resp = {}; 1570 struct bnxt_qplib_cmdqmsg msg = {}; 1571 struct cmdq_destroy_qp req = {}; 1572 u32 tbl_indx; 1573 int rc; 1574 1575 spin_lock_bh(&rcfw->tbl_lock); 1576 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); 1577 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID; 1578 rcfw->qp_tbl[tbl_indx].qp_handle = NULL; 1579 spin_unlock_bh(&rcfw->tbl_lock); 1580 1581 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1582 CMDQ_BASE_OPCODE_DESTROY_QP, 1583 sizeof(req)); 1584 1585 req.qp_cid = cpu_to_le32(qp->id); 1586 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 1587 sizeof(resp), 0); 1588 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1589 if (rc) { 1590 spin_lock_bh(&rcfw->tbl_lock); 1591 rcfw->qp_tbl[tbl_indx].qp_id = qp->id; 1592 rcfw->qp_tbl[tbl_indx].qp_handle = qp; 1593 spin_unlock_bh(&rcfw->tbl_lock); 1594 return rc; 1595 } 1596 1597 return 0; 1598 } 1599 1600 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, 1601 struct bnxt_qplib_qp *qp) 1602 { 1603 bnxt_qplib_free_qp_hdr_buf(res, qp); 1604 bnxt_qplib_free_hwq(res, &qp->sq.hwq); 1605 kfree(qp->sq.swq); 1606 1607 bnxt_qplib_free_hwq(res, &qp->rq.hwq); 1608 kfree(qp->rq.swq); 1609 1610 if (qp->irrq.max_elements) 1611 bnxt_qplib_free_hwq(res, &qp->irrq); 1612 if (qp->orrq.max_elements) 1613 bnxt_qplib_free_hwq(res, &qp->orrq); 1614 1615 } 1616 1617 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 1618 struct bnxt_qplib_sge *sge) 1619 { 1620 struct bnxt_qplib_q *sq = &qp->sq; 1621 u32 sw_prod; 1622 1623 memset(sge, 0, sizeof(*sge)); 1624 1625 if (qp->sq_hdr_buf) { 1626 sw_prod = sq->swq_start; 1627 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map + 1628 sw_prod * qp->sq_hdr_buf_size); 1629 sge->lkey = 0xFFFFFFFF; 1630 sge->size = qp->sq_hdr_buf_size; 1631 return qp->sq_hdr_buf + sw_prod * sge->size; 1632 } 1633 return NULL; 1634 } 1635 1636 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp) 1637 { 1638 struct bnxt_qplib_q *rq = &qp->rq; 1639 1640 return rq->swq_start; 1641 } 1642 1643 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index) 1644 { 1645 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size); 1646 } 1647 1648 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, 1649 struct bnxt_qplib_sge *sge) 1650 { 1651 struct bnxt_qplib_q *rq = &qp->rq; 1652 u32 sw_prod; 1653 1654 memset(sge, 0, sizeof(*sge)); 1655 1656 if (qp->rq_hdr_buf) { 1657 sw_prod = rq->swq_start; 1658 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map + 1659 sw_prod * qp->rq_hdr_buf_size); 1660 sge->lkey = 0xFFFFFFFF; 1661 sge->size = qp->rq_hdr_buf_size; 1662 return qp->rq_hdr_buf + sw_prod * sge->size; 1663 } 1664 return NULL; 1665 } 1666 1667 /* Fil the MSN table into the next psn row */ 1668 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp, 1669 struct bnxt_qplib_swqe *wqe, 1670 struct bnxt_qplib_swq *swq) 1671 { 1672 struct sq_msn_search *msns; 1673 u32 start_psn, next_psn; 1674 u16 start_idx; 1675 1676 msns = (struct sq_msn_search *)swq->psn_search; 1677 msns->start_idx_next_psn_start_psn = 0; 1678 1679 start_psn = swq->start_psn; 1680 next_psn = swq->next_psn; 1681 start_idx = swq->slot_idx; 1682 msns->start_idx_next_psn_start_psn |= 1683 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn); 1684 qp->msn++; 1685 qp->msn %= qp->msn_tbl_sz; 1686 } 1687 1688 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp, 1689 struct bnxt_qplib_swqe *wqe, 1690 struct bnxt_qplib_swq *swq) 1691 { 1692 struct sq_psn_search_ext *psns_ext; 1693 struct sq_psn_search *psns; 1694 u32 flg_npsn; 1695 u32 op_spsn; 1696 1697 if (!swq->psn_search) 1698 return; 1699 /* Handle MSN differently on cap flags */ 1700 if (qp->is_host_msn_tbl) { 1701 bnxt_qplib_fill_msn_search(qp, wqe, swq); 1702 return; 1703 } 1704 psns = (struct sq_psn_search *)swq->psn_search; 1705 psns = swq->psn_search; 1706 psns_ext = swq->psn_ext; 1707 1708 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) & 1709 SQ_PSN_SEARCH_START_PSN_MASK); 1710 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) & 1711 SQ_PSN_SEARCH_OPCODE_MASK); 1712 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) & 1713 SQ_PSN_SEARCH_NEXT_PSN_MASK); 1714 1715 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) { 1716 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn); 1717 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn); 1718 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx); 1719 } else { 1720 psns->opcode_start_psn = cpu_to_le32(op_spsn); 1721 psns->flags_next_psn = cpu_to_le32(flg_npsn); 1722 } 1723 } 1724 1725 static unsigned int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp, 1726 struct bnxt_qplib_swqe *wqe, 1727 u32 *idx) 1728 { 1729 struct bnxt_qplib_hwq *hwq; 1730 int len, t_len, offt; 1731 bool pull_dst = true; 1732 void *il_dst = NULL; 1733 void *il_src = NULL; 1734 int t_cplen, cplen; 1735 int indx; 1736 1737 hwq = &qp->sq.hwq; 1738 t_len = 0; 1739 for (indx = 0; indx < wqe->num_sge; indx++) { 1740 len = wqe->sg_list[indx].size; 1741 il_src = (void *)wqe->sg_list[indx].addr; 1742 t_len += len; 1743 if (t_len > qp->max_inline_data) 1744 return BNXT_RE_INVAL_MSG_SIZE; 1745 while (len) { 1746 if (pull_dst) { 1747 pull_dst = false; 1748 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx); 1749 (*idx)++; 1750 t_cplen = 0; 1751 offt = 0; 1752 } 1753 cplen = min_t(int, len, sizeof(struct sq_sge)); 1754 cplen = min_t(int, cplen, 1755 (sizeof(struct sq_sge) - offt)); 1756 memcpy(il_dst, il_src, cplen); 1757 t_cplen += cplen; 1758 il_src += cplen; 1759 il_dst += cplen; 1760 offt += cplen; 1761 len -= cplen; 1762 if (t_cplen == sizeof(struct sq_sge)) 1763 pull_dst = true; 1764 } 1765 } 1766 1767 return t_len; 1768 } 1769 1770 static unsigned int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq, 1771 struct bnxt_qplib_sge *ssge, 1772 u32 nsge, u32 *idx) 1773 { 1774 struct sq_sge *dsge; 1775 int indx, len = 0; 1776 1777 for (indx = 0; indx < nsge; indx++, (*idx)++) { 1778 dsge = bnxt_qplib_get_prod_qe(hwq, *idx); 1779 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr); 1780 dsge->l_key = cpu_to_le32(ssge[indx].lkey); 1781 dsge->size = cpu_to_le32(ssge[indx].size); 1782 len += ssge[indx].size; 1783 } 1784 1785 return len; 1786 } 1787 1788 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp, 1789 struct bnxt_qplib_swqe *wqe, 1790 u16 *wqe_sz, u16 *qdf, u8 mode) 1791 { 1792 u32 ilsize, bytes; 1793 u16 nsge; 1794 u16 slot; 1795 1796 nsge = wqe->num_sge; 1797 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */ 1798 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge); 1799 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) { 1800 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data); 1801 bytes = ALIGN(ilsize, sizeof(struct sq_sge)); 1802 bytes += sizeof(struct sq_send_hdr); 1803 } 1804 1805 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes); 1806 slot = bytes >> 4; 1807 *wqe_sz = slot; 1808 if (mode == BNXT_QPLIB_WQE_MODE_STATIC) 1809 slot = 8; 1810 return slot; 1811 } 1812 1813 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq, 1814 struct bnxt_qplib_swq *swq, bool hw_retx) 1815 { 1816 struct bnxt_qplib_hwq *hwq; 1817 u32 pg_num, pg_indx; 1818 void *buff; 1819 u32 tail; 1820 1821 hwq = &sq->hwq; 1822 if (!hwq->pad_pg) 1823 return; 1824 tail = swq->slot_idx / sq->dbinfo.max_slot; 1825 if (hw_retx) { 1826 /* For HW retx use qp msn index */ 1827 tail = qp->msn; 1828 tail %= qp->msn_tbl_sz; 1829 } 1830 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride); 1831 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride); 1832 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride); 1833 swq->psn_ext = buff; 1834 swq->psn_search = buff; 1835 } 1836 1837 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp) 1838 { 1839 struct bnxt_qplib_q *sq = &qp->sq; 1840 1841 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ); 1842 } 1843 1844 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, 1845 struct bnxt_qplib_swqe *wqe) 1846 { 1847 struct bnxt_qplib_nq_work *nq_work = NULL; 1848 int i, rc = 0, data_len = 0, pkt_num = 0; 1849 struct bnxt_qplib_q *sq = &qp->sq; 1850 struct bnxt_qplib_hwq *hwq; 1851 struct bnxt_qplib_swq *swq; 1852 bool sch_handler = false; 1853 u32 wqe_idx, slots, idx; 1854 u16 wqe_sz, qdf = 0; 1855 bool msn_update; 1856 void *base_hdr; 1857 void *ext_hdr; 1858 __le32 temp32; 1859 1860 hwq = &sq->hwq; 1861 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS && 1862 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1863 dev_err(&hwq->pdev->dev, 1864 "QPLIB: FP: QP (0x%x) is in the 0x%x state", 1865 qp->id, qp->state); 1866 rc = -EINVAL; 1867 goto done; 1868 } 1869 1870 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode); 1871 if (bnxt_qplib_queue_full(sq, slots + qdf)) { 1872 dev_err(&hwq->pdev->dev, 1873 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n", 1874 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta); 1875 rc = -ENOMEM; 1876 goto done; 1877 } 1878 1879 swq = bnxt_qplib_get_swqe(sq, &wqe_idx); 1880 bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl); 1881 1882 idx = 0; 1883 swq->slot_idx = hwq->prod; 1884 swq->slots = slots; 1885 swq->wr_id = wqe->wr_id; 1886 swq->type = wqe->type; 1887 swq->flags = wqe->flags; 1888 swq->start_psn = sq->psn & BTH_PSN_MASK; 1889 if (qp->sig_type) 1890 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP; 1891 1892 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1893 sch_handler = true; 1894 dev_dbg(&hwq->pdev->dev, 1895 "%s Error QP. Scheduling for poll_cq\n", __func__); 1896 goto queue_err; 1897 } 1898 1899 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 1900 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 1901 memset(base_hdr, 0, sizeof(struct sq_sge)); 1902 memset(ext_hdr, 0, sizeof(struct sq_sge)); 1903 1904 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) 1905 /* Copy the inline data */ 1906 data_len = bnxt_qplib_put_inline(qp, wqe, &idx); 1907 else 1908 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, 1909 &idx); 1910 if (data_len > BNXT_RE_MAX_MSG_SIZE) { 1911 rc = -EINVAL; 1912 goto done; 1913 } 1914 /* Make sure we update MSN table only for wired wqes */ 1915 msn_update = true; 1916 /* Specifics */ 1917 switch (wqe->type) { 1918 case BNXT_QPLIB_SWQE_TYPE_SEND: 1919 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) { 1920 struct sq_send_raweth_qp1_hdr *sqe = base_hdr; 1921 struct sq_raw_ext_hdr *ext_sqe = ext_hdr; 1922 /* Assemble info for Raw Ethertype QPs */ 1923 1924 sqe->wqe_type = wqe->type; 1925 sqe->flags = wqe->flags; 1926 sqe->wqe_size = wqe_sz; 1927 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action); 1928 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags); 1929 sqe->length = cpu_to_le32(data_len); 1930 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta & 1931 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) << 1932 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT); 1933 1934 break; 1935 } 1936 fallthrough; 1937 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: 1938 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: 1939 { 1940 struct sq_ud_ext_hdr *ext_sqe = ext_hdr; 1941 struct sq_send_hdr *sqe = base_hdr; 1942 1943 sqe->wqe_type = wqe->type; 1944 sqe->flags = wqe->flags; 1945 sqe->wqe_size = wqe_sz; 1946 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key); 1947 if (qp->type == CMDQ_CREATE_QP_TYPE_UD || 1948 qp->type == CMDQ_CREATE_QP_TYPE_GSI) { 1949 sqe->q_key = cpu_to_le32(wqe->send.q_key); 1950 sqe->length = cpu_to_le32(data_len); 1951 sq->psn = (sq->psn + 1) & BTH_PSN_MASK; 1952 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp & 1953 SQ_SEND_DST_QP_MASK); 1954 ext_sqe->avid = cpu_to_le32(wqe->send.avid & 1955 SQ_SEND_AVID_MASK); 1956 msn_update = false; 1957 } else { 1958 sqe->length = cpu_to_le32(data_len); 1959 if (qp->mtu) 1960 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1961 if (!pkt_num) 1962 pkt_num = 1; 1963 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1964 } 1965 break; 1966 } 1967 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: 1968 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: 1969 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: 1970 { 1971 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr; 1972 struct sq_rdma_hdr *sqe = base_hdr; 1973 1974 sqe->wqe_type = wqe->type; 1975 sqe->flags = wqe->flags; 1976 sqe->wqe_size = wqe_sz; 1977 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key); 1978 sqe->length = cpu_to_le32((u32)data_len); 1979 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va); 1980 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key); 1981 if (qp->mtu) 1982 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1983 if (!pkt_num) 1984 pkt_num = 1; 1985 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1986 break; 1987 } 1988 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: 1989 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: 1990 { 1991 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr; 1992 struct sq_atomic_hdr *sqe = base_hdr; 1993 1994 sqe->wqe_type = wqe->type; 1995 sqe->flags = wqe->flags; 1996 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key); 1997 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va); 1998 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data); 1999 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data); 2000 if (qp->mtu) 2001 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 2002 if (!pkt_num) 2003 pkt_num = 1; 2004 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 2005 break; 2006 } 2007 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: 2008 { 2009 struct sq_localinvalidate *sqe = base_hdr; 2010 2011 sqe->wqe_type = wqe->type; 2012 sqe->flags = wqe->flags; 2013 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key); 2014 msn_update = false; 2015 break; 2016 } 2017 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR: 2018 { 2019 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr; 2020 struct sq_fr_pmr_hdr *sqe = base_hdr; 2021 2022 sqe->wqe_type = wqe->type; 2023 sqe->flags = wqe->flags; 2024 sqe->access_cntl = wqe->frmr.access_cntl | 2025 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; 2026 sqe->zero_based_page_size_log = 2027 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) << 2028 SQ_FR_PMR_PAGE_SIZE_LOG_SFT | 2029 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0); 2030 sqe->l_key = cpu_to_le32(wqe->frmr.l_key); 2031 temp32 = cpu_to_le32(wqe->frmr.length); 2032 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length)); 2033 sqe->numlevels_pbl_page_size_log = 2034 ((wqe->frmr.pbl_pg_sz_log << 2035 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) & 2036 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) | 2037 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) & 2038 SQ_FR_PMR_NUMLEVELS_MASK); 2039 2040 for (i = 0; i < wqe->frmr.page_list_len; i++) 2041 wqe->frmr.pbl_ptr[i] = cpu_to_le64( 2042 wqe->frmr.page_list[i] | 2043 PTU_PTE_VALID); 2044 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr); 2045 ext_sqe->va = cpu_to_le64(wqe->frmr.va); 2046 msn_update = false; 2047 2048 break; 2049 } 2050 case BNXT_QPLIB_SWQE_TYPE_BIND_MW: 2051 { 2052 struct sq_bind_ext_hdr *ext_sqe = ext_hdr; 2053 struct sq_bind_hdr *sqe = base_hdr; 2054 2055 sqe->wqe_type = wqe->type; 2056 sqe->flags = wqe->flags; 2057 sqe->access_cntl = wqe->bind.access_cntl; 2058 sqe->mw_type_zero_based = wqe->bind.mw_type | 2059 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0); 2060 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key); 2061 sqe->l_key = cpu_to_le32(wqe->bind.r_key); 2062 ext_sqe->va = cpu_to_le64(wqe->bind.va); 2063 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length); 2064 msn_update = false; 2065 break; 2066 } 2067 default: 2068 /* Bad wqe, return error */ 2069 rc = -EINVAL; 2070 goto done; 2071 } 2072 if (!qp->is_host_msn_tbl || msn_update) { 2073 swq->next_psn = sq->psn & BTH_PSN_MASK; 2074 bnxt_qplib_fill_psn_search(qp, wqe, swq); 2075 } 2076 queue_err: 2077 bnxt_qplib_swq_mod_start(sq, wqe_idx); 2078 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots); 2079 qp->wqe_cnt++; 2080 done: 2081 if (sch_handler) { 2082 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC); 2083 if (nq_work) { 2084 nq_work->cq = qp->scq; 2085 nq_work->nq = qp->scq->nq; 2086 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); 2087 queue_work(qp->scq->nq->cqn_wq, &nq_work->work); 2088 } else { 2089 dev_err(&hwq->pdev->dev, 2090 "FP: Failed to allocate SQ nq_work!\n"); 2091 rc = -ENOMEM; 2092 } 2093 } 2094 return rc; 2095 } 2096 2097 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp) 2098 { 2099 struct bnxt_qplib_q *rq = &qp->rq; 2100 2101 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ); 2102 } 2103 2104 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, 2105 struct bnxt_qplib_swqe *wqe) 2106 { 2107 struct bnxt_qplib_nq_work *nq_work = NULL; 2108 struct bnxt_qplib_q *rq = &qp->rq; 2109 struct rq_wqe_hdr *base_hdr; 2110 struct rq_ext_hdr *ext_hdr; 2111 struct bnxt_qplib_hwq *hwq; 2112 struct bnxt_qplib_swq *swq; 2113 bool sch_handler = false; 2114 u32 wqe_idx, idx; 2115 u16 wqe_sz; 2116 int rc = 0; 2117 2118 hwq = &rq->hwq; 2119 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { 2120 dev_err(&hwq->pdev->dev, 2121 "QPLIB: FP: QP (0x%x) is in the 0x%x state", 2122 qp->id, qp->state); 2123 rc = -EINVAL; 2124 goto done; 2125 } 2126 2127 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) { 2128 dev_err(&hwq->pdev->dev, 2129 "FP: QP (0x%x) RQ is full!\n", qp->id); 2130 rc = -EINVAL; 2131 goto done; 2132 } 2133 2134 swq = bnxt_qplib_get_swqe(rq, &wqe_idx); 2135 swq->wr_id = wqe->wr_id; 2136 swq->slots = rq->dbinfo.max_slot; 2137 2138 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { 2139 sch_handler = true; 2140 dev_dbg(&hwq->pdev->dev, 2141 "%s: Error QP. Scheduling for poll_cq\n", __func__); 2142 goto queue_err; 2143 } 2144 2145 idx = 0; 2146 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 2147 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++); 2148 memset(base_hdr, 0, sizeof(struct sq_sge)); 2149 memset(ext_hdr, 0, sizeof(struct sq_sge)); 2150 wqe_sz = (sizeof(struct rq_wqe_hdr) + 2151 wqe->num_sge * sizeof(struct sq_sge)) >> 4; 2152 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx); 2153 if (!wqe->num_sge) { 2154 struct sq_sge *sge; 2155 2156 sge = bnxt_qplib_get_prod_qe(hwq, idx++); 2157 sge->size = 0; 2158 wqe_sz++; 2159 } 2160 base_hdr->wqe_type = wqe->type; 2161 base_hdr->flags = wqe->flags; 2162 base_hdr->wqe_size = wqe_sz; 2163 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx); 2164 queue_err: 2165 bnxt_qplib_swq_mod_start(rq, wqe_idx); 2166 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots); 2167 done: 2168 if (sch_handler) { 2169 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC); 2170 if (nq_work) { 2171 nq_work->cq = qp->rcq; 2172 nq_work->nq = qp->rcq->nq; 2173 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task); 2174 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work); 2175 } else { 2176 dev_err(&hwq->pdev->dev, 2177 "FP: Failed to allocate RQ nq_work!\n"); 2178 rc = -ENOMEM; 2179 } 2180 } 2181 2182 return rc; 2183 } 2184 2185 /* CQ */ 2186 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) 2187 { 2188 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 2189 struct bnxt_qplib_hwq_attr hwq_attr = {}; 2190 struct creq_create_cq_resp resp = {}; 2191 struct bnxt_qplib_cmdqmsg msg = {}; 2192 struct cmdq_create_cq req = {}; 2193 struct bnxt_qplib_pbl *pbl; 2194 u32 coalescing = 0; 2195 u32 pg_sz_lvl; 2196 int rc; 2197 2198 if (!cq->dpi) { 2199 dev_err(&rcfw->pdev->dev, 2200 "FP: CREATE_CQ failed due to NULL DPI\n"); 2201 return -EINVAL; 2202 } 2203 2204 cq->dbinfo.flags = 0; 2205 hwq_attr.res = res; 2206 hwq_attr.depth = cq->max_wqe; 2207 hwq_attr.stride = sizeof(struct cq_base); 2208 hwq_attr.type = HWQ_TYPE_QUEUE; 2209 hwq_attr.sginfo = &cq->sg_info; 2210 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr); 2211 if (rc) 2212 return rc; 2213 2214 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 2215 CMDQ_BASE_OPCODE_CREATE_CQ, 2216 sizeof(req)); 2217 2218 req.dpi = cpu_to_le32(cq->dpi->dpi); 2219 req.cq_handle = cpu_to_le64(cq->cq_handle); 2220 req.cq_size = cpu_to_le32(cq->max_wqe); 2221 2222 if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2)) { 2223 req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID); 2224 coalescing |= ((cq->coalescing->buf_maxtime << 2225 CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) & 2226 CMDQ_CREATE_CQ_BUF_MAXTIME_MASK); 2227 coalescing |= ((cq->coalescing->normal_maxbuf << 2228 CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT) & 2229 CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK); 2230 coalescing |= ((cq->coalescing->during_maxbuf << 2231 CMDQ_CREATE_CQ_DURING_MAXBUF_SFT) & 2232 CMDQ_CREATE_CQ_DURING_MAXBUF_MASK); 2233 if (cq->coalescing->en_ring_idle_mode) 2234 coalescing |= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE; 2235 else 2236 coalescing &= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE; 2237 req.coalescing = cpu_to_le32(coalescing); 2238 } 2239 2240 pbl = &cq->hwq.pbl[PBL_LVL_0]; 2241 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) << 2242 CMDQ_CREATE_CQ_PG_SIZE_SFT); 2243 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK); 2244 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl); 2245 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]); 2246 req.cq_fco_cnq_id = cpu_to_le32( 2247 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << 2248 CMDQ_CREATE_CQ_CNQ_ID_SFT); 2249 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 2250 sizeof(resp), 0); 2251 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 2252 if (rc) 2253 goto fail; 2254 2255 cq->id = le32_to_cpu(resp.xid); 2256 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; 2257 init_waitqueue_head(&cq->waitq); 2258 INIT_LIST_HEAD(&cq->sqf_head); 2259 INIT_LIST_HEAD(&cq->rqf_head); 2260 spin_lock_init(&cq->compl_lock); 2261 spin_lock_init(&cq->flush_lock); 2262 2263 cq->dbinfo.hwq = &cq->hwq; 2264 cq->dbinfo.xid = cq->id; 2265 cq->dbinfo.db = cq->dpi->dbr; 2266 cq->dbinfo.priv_db = res->dpi_tbl.priv_db; 2267 cq->dbinfo.flags = 0; 2268 cq->dbinfo.toggle = 0; 2269 2270 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA); 2271 2272 return 0; 2273 2274 fail: 2275 bnxt_qplib_free_hwq(res, &cq->hwq); 2276 return rc; 2277 } 2278 2279 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, 2280 struct bnxt_qplib_cq *cq) 2281 { 2282 bnxt_qplib_free_hwq(res, &cq->hwq); 2283 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq)); 2284 /* Reset only the cons bit in the flags */ 2285 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT); 2286 } 2287 2288 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, 2289 int new_cqes) 2290 { 2291 struct bnxt_qplib_hwq_attr hwq_attr = {}; 2292 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 2293 struct creq_resize_cq_resp resp = {}; 2294 struct bnxt_qplib_cmdqmsg msg = {}; 2295 struct cmdq_resize_cq req = {}; 2296 struct bnxt_qplib_pbl *pbl; 2297 u32 pg_sz, lvl, new_sz; 2298 int rc; 2299 2300 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 2301 CMDQ_BASE_OPCODE_RESIZE_CQ, 2302 sizeof(req)); 2303 hwq_attr.sginfo = &cq->sg_info; 2304 hwq_attr.res = res; 2305 hwq_attr.depth = new_cqes; 2306 hwq_attr.stride = sizeof(struct cq_base); 2307 hwq_attr.type = HWQ_TYPE_QUEUE; 2308 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr); 2309 if (rc) 2310 return rc; 2311 2312 req.cq_cid = cpu_to_le32(cq->id); 2313 pbl = &cq->resize_hwq.pbl[PBL_LVL_0]; 2314 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq); 2315 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) & 2316 CMDQ_RESIZE_CQ_LVL_MASK; 2317 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) & 2318 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK; 2319 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl); 2320 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 2321 2322 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 2323 sizeof(resp), 0); 2324 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 2325 return rc; 2326 } 2327 2328 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) 2329 { 2330 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 2331 struct creq_destroy_cq_resp resp = {}; 2332 struct bnxt_qplib_cmdqmsg msg = {}; 2333 struct cmdq_destroy_cq req = {}; 2334 u16 total_cnq_events; 2335 int rc; 2336 2337 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 2338 CMDQ_BASE_OPCODE_DESTROY_CQ, 2339 sizeof(req)); 2340 2341 req.cq_cid = cpu_to_le32(cq->id); 2342 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 2343 sizeof(resp), 0); 2344 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 2345 if (rc) 2346 return rc; 2347 total_cnq_events = le16_to_cpu(resp.total_cnq_events); 2348 __wait_for_all_nqes(cq, total_cnq_events); 2349 bnxt_qplib_free_hwq(res, &cq->hwq); 2350 return 0; 2351 } 2352 2353 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp, 2354 struct bnxt_qplib_cqe **pcqe, int *budget) 2355 { 2356 struct bnxt_qplib_cqe *cqe; 2357 u32 start, last; 2358 int rc = 0; 2359 2360 /* Now complete all outstanding SQEs with FLUSHED_ERR */ 2361 start = sq->swq_start; 2362 cqe = *pcqe; 2363 while (*budget) { 2364 last = sq->swq_last; 2365 if (start == last) 2366 break; 2367 /* Skip the FENCE WQE completions */ 2368 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) { 2369 bnxt_qplib_cancel_phantom_processing(qp); 2370 goto skip_compl; 2371 } 2372 memset(cqe, 0, sizeof(*cqe)); 2373 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; 2374 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 2375 cqe->qp_handle = (u64)(unsigned long)qp; 2376 cqe->wr_id = sq->swq[last].wr_id; 2377 cqe->src_qp = qp->id; 2378 cqe->type = sq->swq[last].type; 2379 cqe++; 2380 (*budget)--; 2381 skip_compl: 2382 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons, 2383 sq->swq[last].slots, &sq->dbinfo.flags); 2384 sq->swq_last = sq->swq[last].next_idx; 2385 } 2386 *pcqe = cqe; 2387 if (!(*budget) && sq->swq_last != start) 2388 /* Out of budget */ 2389 rc = -EAGAIN; 2390 2391 return rc; 2392 } 2393 2394 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, 2395 struct bnxt_qplib_cqe **pcqe, int *budget) 2396 { 2397 struct bnxt_qplib_cqe *cqe; 2398 u32 start, last; 2399 int opcode = 0; 2400 int rc = 0; 2401 2402 switch (qp->type) { 2403 case CMDQ_CREATE_QP1_TYPE_GSI: 2404 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1; 2405 break; 2406 case CMDQ_CREATE_QP_TYPE_RC: 2407 opcode = CQ_BASE_CQE_TYPE_RES_RC; 2408 break; 2409 case CMDQ_CREATE_QP_TYPE_UD: 2410 case CMDQ_CREATE_QP_TYPE_GSI: 2411 opcode = CQ_BASE_CQE_TYPE_RES_UD; 2412 break; 2413 } 2414 2415 /* Flush the rest of the RQ */ 2416 start = rq->swq_start; 2417 cqe = *pcqe; 2418 while (*budget) { 2419 last = rq->swq_last; 2420 if (last == start) 2421 break; 2422 memset(cqe, 0, sizeof(*cqe)); 2423 cqe->status = 2424 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR; 2425 cqe->opcode = opcode; 2426 cqe->qp_handle = (unsigned long)qp; 2427 cqe->wr_id = rq->swq[last].wr_id; 2428 cqe++; 2429 (*budget)--; 2430 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons, 2431 rq->swq[last].slots, &rq->dbinfo.flags); 2432 rq->swq_last = rq->swq[last].next_idx; 2433 } 2434 *pcqe = cqe; 2435 if (!*budget && rq->swq_last != start) 2436 /* Out of budget */ 2437 rc = -EAGAIN; 2438 2439 return rc; 2440 } 2441 2442 void bnxt_qplib_mark_qp_error(void *qp_handle) 2443 { 2444 struct bnxt_qplib_qp *qp = qp_handle; 2445 2446 if (!qp) 2447 return; 2448 2449 /* Must block new posting of SQ and RQ */ 2450 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2451 bnxt_qplib_cancel_phantom_processing(qp); 2452 } 2453 2454 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) 2455 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 2456 */ 2457 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, 2458 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons) 2459 { 2460 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags; 2461 struct bnxt_qplib_q *sq = &qp->sq; 2462 struct cq_req *peek_req_hwcqe; 2463 struct bnxt_qplib_qp *peek_qp; 2464 struct bnxt_qplib_q *peek_sq; 2465 struct bnxt_qplib_swq *swq; 2466 struct cq_base *peek_hwcqe; 2467 int i, rc = 0; 2468 2469 /* Normal mode */ 2470 /* Check for the psn_search marking before completing */ 2471 swq = &sq->swq[swq_last]; 2472 if (swq->psn_search && 2473 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { 2474 /* Unmark */ 2475 swq->psn_search->flags_next_psn = cpu_to_le32 2476 (le32_to_cpu(swq->psn_search->flags_next_psn) 2477 & ~0x80000000); 2478 dev_dbg(&cq->hwq.pdev->dev, 2479 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", 2480 cq_cons, qp->id, swq_last, cqe_sq_cons); 2481 sq->condition = true; 2482 sq->send_phantom = true; 2483 2484 /* TODO: Only ARM if the previous SQE is ARMALL */ 2485 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL); 2486 rc = -EAGAIN; 2487 goto out; 2488 } 2489 if (sq->condition) { 2490 /* Peek at the completions */ 2491 peek_flags = cq->dbinfo.flags; 2492 peek_sw_cq_cons = cq_cons; 2493 i = cq->hwq.max_elements; 2494 while (i--) { 2495 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq, 2496 peek_sw_cq_cons, NULL); 2497 /* If the next hwcqe is VALID */ 2498 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) { 2499 /* 2500 * The valid test of the entry must be done first before 2501 * reading any further. 2502 */ 2503 dma_rmb(); 2504 /* If the next hwcqe is a REQ */ 2505 if ((peek_hwcqe->cqe_type_toggle & 2506 CQ_BASE_CQE_TYPE_MASK) == 2507 CQ_BASE_CQE_TYPE_REQ) { 2508 peek_req_hwcqe = (struct cq_req *) 2509 peek_hwcqe; 2510 peek_qp = (struct bnxt_qplib_qp *) 2511 ((unsigned long) 2512 le64_to_cpu 2513 (peek_req_hwcqe->qp_handle)); 2514 peek_sq = &peek_qp->sq; 2515 peek_sq_cons_idx = 2516 ((le16_to_cpu( 2517 peek_req_hwcqe->sq_cons_idx) 2518 - 1) % sq->max_wqe); 2519 /* If the hwcqe's sq's wr_id matches */ 2520 if (peek_sq == sq && 2521 sq->swq[peek_sq_cons_idx].wr_id == 2522 BNXT_QPLIB_FENCE_WRID) { 2523 /* 2524 * Unbreak only if the phantom 2525 * comes back 2526 */ 2527 dev_dbg(&cq->hwq.pdev->dev, 2528 "FP: Got Phantom CQE\n"); 2529 sq->condition = false; 2530 sq->single = true; 2531 rc = 0; 2532 goto out; 2533 } 2534 } 2535 /* Valid but not the phantom, so keep looping */ 2536 } else { 2537 /* Not valid yet, just exit and wait */ 2538 rc = -EINVAL; 2539 goto out; 2540 } 2541 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, 2542 &peek_sw_cq_cons, 2543 1, &peek_flags); 2544 } 2545 dev_err(&cq->hwq.pdev->dev, 2546 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n", 2547 cq_cons, qp->id, swq_last, cqe_sq_cons); 2548 rc = -EINVAL; 2549 } 2550 out: 2551 return rc; 2552 } 2553 2554 static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot) 2555 { 2556 struct bnxt_qplib_hwq *sq_hwq; 2557 struct bnxt_qplib_swq *swq; 2558 int cqe_sq_cons = -1; 2559 u32 start, last; 2560 2561 sq_hwq = &sq->hwq; 2562 2563 start = sq->swq_start; 2564 last = sq->swq_last; 2565 2566 while (last != start) { 2567 swq = &sq->swq[last]; 2568 if (swq->slot_idx == cqe_slot) { 2569 cqe_sq_cons = swq->next_idx; 2570 dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n", 2571 __func__, cqe_sq_cons, cqe_slot); 2572 break; 2573 } 2574 2575 last = swq->next_idx; 2576 } 2577 return cqe_sq_cons; 2578 } 2579 2580 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, 2581 struct cq_req *hwcqe, 2582 struct bnxt_qplib_cqe **pcqe, int *budget, 2583 u32 cq_cons, struct bnxt_qplib_qp **lib_qp) 2584 { 2585 struct bnxt_qplib_swq *swq; 2586 struct bnxt_qplib_cqe *cqe; 2587 u32 cqe_sq_cons, slot_num; 2588 struct bnxt_qplib_qp *qp; 2589 struct bnxt_qplib_q *sq; 2590 int cqe_cons; 2591 int rc = 0; 2592 2593 qp = (struct bnxt_qplib_qp *)((unsigned long) 2594 le64_to_cpu(hwcqe->qp_handle)); 2595 if (!qp) { 2596 dev_err(&cq->hwq.pdev->dev, 2597 "FP: Process Req qp is NULL\n"); 2598 return -EINVAL; 2599 } 2600 sq = &qp->sq; 2601 2602 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe; 2603 if (qp->sq.flushed) { 2604 dev_dbg(&cq->hwq.pdev->dev, 2605 "%s: QP in Flush QP = %p\n", __func__, qp); 2606 goto done; 2607 } 2608 2609 if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) { 2610 slot_num = le16_to_cpu(hwcqe->sq_cons_idx); 2611 cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num); 2612 if (cqe_cons < 0) { 2613 dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n", 2614 __func__, slot_num); 2615 goto done; 2616 } 2617 cqe_sq_cons = cqe_cons; 2618 dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n", 2619 __func__, cqe_sq_cons, sq->swq_last, sq->swq_start); 2620 } 2621 2622 /* Require to walk the sq's swq to fabricate CQEs for all previously 2623 * signaled SWQEs due to CQE aggregation from the current sq cons 2624 * to the cqe_sq_cons 2625 */ 2626 cqe = *pcqe; 2627 while (*budget) { 2628 if (sq->swq_last == cqe_sq_cons) 2629 /* Done */ 2630 break; 2631 2632 swq = &sq->swq[sq->swq_last]; 2633 memset(cqe, 0, sizeof(*cqe)); 2634 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 2635 cqe->qp_handle = (u64)(unsigned long)qp; 2636 cqe->src_qp = qp->id; 2637 cqe->wr_id = swq->wr_id; 2638 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) 2639 goto skip; 2640 cqe->type = swq->type; 2641 2642 /* For the last CQE, check for status. For errors, regardless 2643 * of the request being signaled or not, it must complete with 2644 * the hwcqe error status 2645 */ 2646 if (swq->next_idx == cqe_sq_cons && 2647 hwcqe->status != CQ_REQ_STATUS_OK) { 2648 cqe->status = hwcqe->status; 2649 dev_err(&cq->hwq.pdev->dev, 2650 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n", 2651 sq->swq_last, cqe->wr_id, cqe->status); 2652 cqe++; 2653 (*budget)--; 2654 bnxt_qplib_mark_qp_error(qp); 2655 /* Add qp to flush list of the CQ */ 2656 bnxt_qplib_add_flush_qp(qp); 2657 } else { 2658 /* Before we complete, do WA 9060 */ 2659 if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) { 2660 if (do_wa9060(qp, cq, cq_cons, sq->swq_last, 2661 cqe_sq_cons)) { 2662 *lib_qp = qp; 2663 goto out; 2664 } 2665 } 2666 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 2667 cqe->status = CQ_REQ_STATUS_OK; 2668 cqe++; 2669 (*budget)--; 2670 } 2671 } 2672 skip: 2673 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons, 2674 swq->slots, &sq->dbinfo.flags); 2675 sq->swq_last = swq->next_idx; 2676 if (sq->single) 2677 break; 2678 } 2679 out: 2680 *pcqe = cqe; 2681 if (sq->swq_last != cqe_sq_cons) { 2682 /* Out of budget */ 2683 rc = -EAGAIN; 2684 goto done; 2685 } 2686 /* 2687 * Back to normal completion mode only after it has completed all of 2688 * the WC for this CQE 2689 */ 2690 sq->single = false; 2691 done: 2692 return rc; 2693 } 2694 2695 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag) 2696 { 2697 spin_lock(&srq->hwq.lock); 2698 srq->swq[srq->last_idx].next_idx = (int)tag; 2699 srq->last_idx = (int)tag; 2700 srq->swq[srq->last_idx].next_idx = -1; 2701 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons, 2702 srq->dbinfo.max_slot, &srq->dbinfo.flags); 2703 spin_unlock(&srq->hwq.lock); 2704 } 2705 2706 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, 2707 struct cq_res_rc *hwcqe, 2708 struct bnxt_qplib_cqe **pcqe, 2709 int *budget) 2710 { 2711 struct bnxt_qplib_srq *srq; 2712 struct bnxt_qplib_cqe *cqe; 2713 struct bnxt_qplib_qp *qp; 2714 struct bnxt_qplib_q *rq; 2715 u32 wr_id_idx; 2716 2717 qp = (struct bnxt_qplib_qp *)((unsigned long) 2718 le64_to_cpu(hwcqe->qp_handle)); 2719 if (!qp) { 2720 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n"); 2721 return -EINVAL; 2722 } 2723 if (qp->rq.flushed) { 2724 dev_dbg(&cq->hwq.pdev->dev, 2725 "%s: QP in Flush QP = %p\n", __func__, qp); 2726 return 0; 2727 } 2728 2729 cqe = *pcqe; 2730 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2731 cqe->length = le32_to_cpu(hwcqe->length); 2732 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key); 2733 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle); 2734 cqe->flags = le16_to_cpu(hwcqe->flags); 2735 cqe->status = hwcqe->status; 2736 cqe->qp_handle = (u64)(unsigned long)qp; 2737 2738 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) & 2739 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK; 2740 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) { 2741 srq = qp->srq; 2742 if (!srq) 2743 return -EINVAL; 2744 if (wr_id_idx >= srq->hwq.max_elements) { 2745 dev_err(&cq->hwq.pdev->dev, 2746 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n", 2747 wr_id_idx, srq->hwq.max_elements); 2748 return -EINVAL; 2749 } 2750 cqe->wr_id = srq->swq[wr_id_idx].wr_id; 2751 bnxt_qplib_release_srqe(srq, wr_id_idx); 2752 cqe++; 2753 (*budget)--; 2754 *pcqe = cqe; 2755 } else { 2756 struct bnxt_qplib_swq *swq; 2757 2758 rq = &qp->rq; 2759 if (wr_id_idx > (rq->max_wqe - 1)) { 2760 dev_err(&cq->hwq.pdev->dev, 2761 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n", 2762 wr_id_idx, rq->max_wqe); 2763 return -EINVAL; 2764 } 2765 if (wr_id_idx != rq->swq_last) 2766 return -EINVAL; 2767 swq = &rq->swq[rq->swq_last]; 2768 cqe->wr_id = swq->wr_id; 2769 cqe++; 2770 (*budget)--; 2771 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons, 2772 swq->slots, &rq->dbinfo.flags); 2773 rq->swq_last = swq->next_idx; 2774 *pcqe = cqe; 2775 2776 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2777 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2778 /* Add qp to flush list of the CQ */ 2779 bnxt_qplib_add_flush_qp(qp); 2780 } 2781 } 2782 2783 return 0; 2784 } 2785 2786 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, 2787 struct cq_res_ud *hwcqe, 2788 struct bnxt_qplib_cqe **pcqe, 2789 int *budget) 2790 { 2791 struct bnxt_qplib_srq *srq; 2792 struct bnxt_qplib_cqe *cqe; 2793 struct bnxt_qplib_qp *qp; 2794 struct bnxt_qplib_q *rq; 2795 u32 wr_id_idx; 2796 2797 qp = (struct bnxt_qplib_qp *)((unsigned long) 2798 le64_to_cpu(hwcqe->qp_handle)); 2799 if (!qp) { 2800 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n"); 2801 return -EINVAL; 2802 } 2803 if (qp->rq.flushed) { 2804 dev_dbg(&cq->hwq.pdev->dev, 2805 "%s: QP in Flush QP = %p\n", __func__, qp); 2806 return 0; 2807 } 2808 cqe = *pcqe; 2809 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2810 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK; 2811 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata); 2812 cqe->invrkey = le32_to_cpu(hwcqe->imm_data); 2813 cqe->flags = le16_to_cpu(hwcqe->flags); 2814 cqe->status = hwcqe->status; 2815 cqe->qp_handle = (u64)(unsigned long)qp; 2816 /*FIXME: Endianness fix needed for smace */ 2817 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN); 2818 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id) 2819 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK; 2820 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) | 2821 ((le32_to_cpu( 2822 hwcqe->src_qp_high_srq_or_rq_wr_id) & 2823 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8); 2824 2825 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) { 2826 srq = qp->srq; 2827 if (!srq) 2828 return -EINVAL; 2829 2830 if (wr_id_idx >= srq->hwq.max_elements) { 2831 dev_err(&cq->hwq.pdev->dev, 2832 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n", 2833 wr_id_idx, srq->hwq.max_elements); 2834 return -EINVAL; 2835 } 2836 cqe->wr_id = srq->swq[wr_id_idx].wr_id; 2837 bnxt_qplib_release_srqe(srq, wr_id_idx); 2838 cqe++; 2839 (*budget)--; 2840 *pcqe = cqe; 2841 } else { 2842 struct bnxt_qplib_swq *swq; 2843 2844 rq = &qp->rq; 2845 if (wr_id_idx > (rq->max_wqe - 1)) { 2846 dev_err(&cq->hwq.pdev->dev, 2847 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n", 2848 wr_id_idx, rq->max_wqe); 2849 return -EINVAL; 2850 } 2851 2852 if (rq->swq_last != wr_id_idx) 2853 return -EINVAL; 2854 swq = &rq->swq[rq->swq_last]; 2855 cqe->wr_id = swq->wr_id; 2856 cqe++; 2857 (*budget)--; 2858 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons, 2859 swq->slots, &rq->dbinfo.flags); 2860 rq->swq_last = swq->next_idx; 2861 *pcqe = cqe; 2862 2863 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2864 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2865 /* Add qp to flush list of the CQ */ 2866 bnxt_qplib_add_flush_qp(qp); 2867 } 2868 } 2869 2870 return 0; 2871 } 2872 2873 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) 2874 { 2875 struct cq_base *hw_cqe; 2876 bool rc = true; 2877 2878 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL); 2879 /* Check for Valid bit. If the CQE is valid, return false */ 2880 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags); 2881 return rc; 2882 } 2883 2884 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, 2885 struct cq_res_raweth_qp1 *hwcqe, 2886 struct bnxt_qplib_cqe **pcqe, 2887 int *budget) 2888 { 2889 struct bnxt_qplib_qp *qp; 2890 struct bnxt_qplib_q *rq; 2891 struct bnxt_qplib_srq *srq; 2892 struct bnxt_qplib_cqe *cqe; 2893 u32 wr_id_idx; 2894 2895 qp = (struct bnxt_qplib_qp *)((unsigned long) 2896 le64_to_cpu(hwcqe->qp_handle)); 2897 if (!qp) { 2898 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n"); 2899 return -EINVAL; 2900 } 2901 if (qp->rq.flushed) { 2902 dev_dbg(&cq->hwq.pdev->dev, 2903 "%s: QP in Flush QP = %p\n", __func__, qp); 2904 return 0; 2905 } 2906 cqe = *pcqe; 2907 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 2908 cqe->flags = le16_to_cpu(hwcqe->flags); 2909 cqe->qp_handle = (u64)(unsigned long)qp; 2910 2911 wr_id_idx = 2912 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id) 2913 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK; 2914 cqe->src_qp = qp->id; 2915 if (qp->id == 1 && !cqe->length) { 2916 /* Add workaround for the length misdetection */ 2917 cqe->length = 296; 2918 } else { 2919 cqe->length = le16_to_cpu(hwcqe->length); 2920 } 2921 cqe->pkey_index = qp->pkey_index; 2922 memcpy(cqe->smac, qp->smac, 6); 2923 2924 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags); 2925 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2); 2926 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata); 2927 2928 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) { 2929 srq = qp->srq; 2930 if (!srq) { 2931 dev_err(&cq->hwq.pdev->dev, 2932 "FP: SRQ used but not defined??\n"); 2933 return -EINVAL; 2934 } 2935 if (wr_id_idx >= srq->hwq.max_elements) { 2936 dev_err(&cq->hwq.pdev->dev, 2937 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n", 2938 wr_id_idx, srq->hwq.max_elements); 2939 return -EINVAL; 2940 } 2941 cqe->wr_id = srq->swq[wr_id_idx].wr_id; 2942 bnxt_qplib_release_srqe(srq, wr_id_idx); 2943 cqe++; 2944 (*budget)--; 2945 *pcqe = cqe; 2946 } else { 2947 struct bnxt_qplib_swq *swq; 2948 2949 rq = &qp->rq; 2950 if (wr_id_idx > (rq->max_wqe - 1)) { 2951 dev_err(&cq->hwq.pdev->dev, 2952 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n", 2953 wr_id_idx, rq->max_wqe); 2954 return -EINVAL; 2955 } 2956 if (rq->swq_last != wr_id_idx) 2957 return -EINVAL; 2958 swq = &rq->swq[rq->swq_last]; 2959 cqe->wr_id = swq->wr_id; 2960 cqe++; 2961 (*budget)--; 2962 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons, 2963 swq->slots, &rq->dbinfo.flags); 2964 rq->swq_last = swq->next_idx; 2965 *pcqe = cqe; 2966 2967 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2968 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2969 /* Add qp to flush list of the CQ */ 2970 bnxt_qplib_add_flush_qp(qp); 2971 } 2972 } 2973 2974 return 0; 2975 } 2976 2977 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, 2978 struct cq_terminal *hwcqe, 2979 struct bnxt_qplib_cqe **pcqe, 2980 int *budget) 2981 { 2982 struct bnxt_qplib_qp *qp; 2983 struct bnxt_qplib_q *sq, *rq; 2984 struct bnxt_qplib_cqe *cqe; 2985 u32 swq_last = 0, cqe_cons; 2986 int rc = 0; 2987 2988 /* Check the Status */ 2989 if (hwcqe->status != CQ_TERMINAL_STATUS_OK) 2990 dev_warn(&cq->hwq.pdev->dev, 2991 "FP: CQ Process Terminal Error status = 0x%x\n", 2992 hwcqe->status); 2993 2994 qp = (struct bnxt_qplib_qp *)((unsigned long) 2995 le64_to_cpu(hwcqe->qp_handle)); 2996 if (!qp) 2997 return -EINVAL; 2998 2999 /* Must block new posting of SQ and RQ */ 3000 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 3001 3002 sq = &qp->sq; 3003 rq = &qp->rq; 3004 3005 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx); 3006 if (cqe_cons == 0xFFFF) 3007 goto do_rq; 3008 cqe_cons %= sq->max_sw_wqe; 3009 3010 if (qp->sq.flushed) { 3011 dev_dbg(&cq->hwq.pdev->dev, 3012 "%s: QP in Flush QP = %p\n", __func__, qp); 3013 goto sq_done; 3014 } 3015 3016 /* Terminal CQE can also include aggregated successful CQEs prior. 3017 * So we must complete all CQEs from the current sq's cons to the 3018 * cq_cons with status OK 3019 */ 3020 cqe = *pcqe; 3021 while (*budget) { 3022 swq_last = sq->swq_last; 3023 if (swq_last == cqe_cons) 3024 break; 3025 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 3026 memset(cqe, 0, sizeof(*cqe)); 3027 cqe->status = CQ_REQ_STATUS_OK; 3028 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 3029 cqe->qp_handle = (u64)(unsigned long)qp; 3030 cqe->src_qp = qp->id; 3031 cqe->wr_id = sq->swq[swq_last].wr_id; 3032 cqe->type = sq->swq[swq_last].type; 3033 cqe++; 3034 (*budget)--; 3035 } 3036 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons, 3037 sq->swq[swq_last].slots, &sq->dbinfo.flags); 3038 sq->swq_last = sq->swq[swq_last].next_idx; 3039 } 3040 *pcqe = cqe; 3041 if (!(*budget) && swq_last != cqe_cons) { 3042 /* Out of budget */ 3043 rc = -EAGAIN; 3044 goto sq_done; 3045 } 3046 sq_done: 3047 if (rc) 3048 return rc; 3049 do_rq: 3050 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx); 3051 if (cqe_cons == 0xFFFF) { 3052 goto done; 3053 } else if (cqe_cons > rq->max_wqe - 1) { 3054 dev_err(&cq->hwq.pdev->dev, 3055 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n", 3056 cqe_cons, rq->max_wqe); 3057 rc = -EINVAL; 3058 goto done; 3059 } 3060 3061 if (qp->rq.flushed) { 3062 dev_dbg(&cq->hwq.pdev->dev, 3063 "%s: QP in Flush QP = %p\n", __func__, qp); 3064 rc = 0; 3065 goto done; 3066 } 3067 3068 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR 3069 * from the current rq->cons to the rq->prod regardless what the 3070 * rq->cons the terminal CQE indicates 3071 */ 3072 3073 /* Add qp to flush list of the CQ */ 3074 bnxt_qplib_add_flush_qp(qp); 3075 done: 3076 return rc; 3077 } 3078 3079 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, 3080 struct cq_cutoff *hwcqe) 3081 { 3082 /* Check the Status */ 3083 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) { 3084 dev_err(&cq->hwq.pdev->dev, 3085 "FP: CQ Process Cutoff Error status = 0x%x\n", 3086 hwcqe->status); 3087 return -EINVAL; 3088 } 3089 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags); 3090 wake_up_interruptible(&cq->waitq); 3091 3092 return 0; 3093 } 3094 3095 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, 3096 struct bnxt_qplib_cqe *cqe, 3097 int num_cqes) 3098 { 3099 struct bnxt_qplib_qp *qp = NULL; 3100 u32 budget = num_cqes; 3101 unsigned long flags; 3102 3103 spin_lock_irqsave(&cq->flush_lock, flags); 3104 list_for_each_entry(qp, &cq->sqf_head, sq_flush) { 3105 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp); 3106 __flush_sq(&qp->sq, qp, &cqe, &budget); 3107 } 3108 3109 list_for_each_entry(qp, &cq->rqf_head, rq_flush) { 3110 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp); 3111 __flush_rq(&qp->rq, qp, &cqe, &budget); 3112 } 3113 spin_unlock_irqrestore(&cq->flush_lock, flags); 3114 3115 return num_cqes - budget; 3116 } 3117 3118 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 3119 int num_cqes, struct bnxt_qplib_qp **lib_qp) 3120 { 3121 struct cq_base *hw_cqe; 3122 int budget, rc = 0; 3123 u32 hw_polled = 0; 3124 u8 type; 3125 3126 budget = num_cqes; 3127 3128 while (budget) { 3129 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL); 3130 3131 /* Check for Valid bit */ 3132 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags)) 3133 break; 3134 3135 /* 3136 * The valid test of the entry must be done first before 3137 * reading any further. 3138 */ 3139 dma_rmb(); 3140 /* From the device's respective CQE format to qplib_wc*/ 3141 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 3142 switch (type) { 3143 case CQ_BASE_CQE_TYPE_REQ: 3144 rc = bnxt_qplib_cq_process_req(cq, 3145 (struct cq_req *)hw_cqe, 3146 &cqe, &budget, 3147 cq->hwq.cons, lib_qp); 3148 break; 3149 case CQ_BASE_CQE_TYPE_RES_RC: 3150 rc = bnxt_qplib_cq_process_res_rc(cq, 3151 (struct cq_res_rc *) 3152 hw_cqe, &cqe, 3153 &budget); 3154 break; 3155 case CQ_BASE_CQE_TYPE_RES_UD: 3156 rc = bnxt_qplib_cq_process_res_ud 3157 (cq, (struct cq_res_ud *)hw_cqe, &cqe, 3158 &budget); 3159 break; 3160 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 3161 rc = bnxt_qplib_cq_process_res_raweth_qp1 3162 (cq, (struct cq_res_raweth_qp1 *) 3163 hw_cqe, &cqe, &budget); 3164 break; 3165 case CQ_BASE_CQE_TYPE_TERMINAL: 3166 rc = bnxt_qplib_cq_process_terminal 3167 (cq, (struct cq_terminal *)hw_cqe, 3168 &cqe, &budget); 3169 break; 3170 case CQ_BASE_CQE_TYPE_CUT_OFF: 3171 bnxt_qplib_cq_process_cutoff 3172 (cq, (struct cq_cutoff *)hw_cqe); 3173 /* Done processing this CQ */ 3174 goto exit; 3175 default: 3176 dev_err(&cq->hwq.pdev->dev, 3177 "process_cq unknown type 0x%lx\n", 3178 hw_cqe->cqe_type_toggle & 3179 CQ_BASE_CQE_TYPE_MASK); 3180 rc = -EINVAL; 3181 break; 3182 } 3183 if (rc < 0) { 3184 if (rc == -EAGAIN) 3185 break; 3186 /* Error while processing the CQE, just skip to the 3187 * next one 3188 */ 3189 if (type != CQ_BASE_CQE_TYPE_TERMINAL) 3190 dev_err(&cq->hwq.pdev->dev, 3191 "process_cqe error rc = 0x%x\n", rc); 3192 } 3193 hw_polled++; 3194 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons, 3195 1, &cq->dbinfo.flags); 3196 3197 } 3198 if (hw_polled) 3199 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ); 3200 exit: 3201 return num_cqes - budget; 3202 } 3203 3204 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) 3205 { 3206 cq->dbinfo.toggle = cq->toggle; 3207 if (arm_type) 3208 bnxt_qplib_ring_db(&cq->dbinfo, arm_type); 3209 /* Using cq->arm_state variable to track whether to issue cq handler */ 3210 atomic_set(&cq->arm_state, 1); 3211 } 3212 3213 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) 3214 { 3215 flush_workqueue(qp->scq->nq->cqn_wq); 3216 if (qp->scq != qp->rcq) 3217 flush_workqueue(qp->rcq->nq->cqn_wq); 3218 } 3219