1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Fast Path Operators 37 */ 38 39 #include <linux/interrupt.h> 40 #include <linux/spinlock.h> 41 #include <linux/sched.h> 42 #include <linux/slab.h> 43 #include <linux/pci.h> 44 #include <linux/prefetch.h> 45 46 #include "roce_hsi.h" 47 48 #include "qplib_res.h" 49 #include "qplib_rcfw.h" 50 #include "qplib_sp.h" 51 #include "qplib_fp.h" 52 53 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq); 54 55 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res, 56 struct bnxt_qplib_qp *qp) 57 { 58 struct bnxt_qplib_q *rq = &qp->rq; 59 struct bnxt_qplib_q *sq = &qp->sq; 60 61 if (qp->rq_hdr_buf) 62 dma_free_coherent(&res->pdev->dev, 63 rq->hwq.max_elements * qp->rq_hdr_buf_size, 64 qp->rq_hdr_buf, qp->rq_hdr_buf_map); 65 if (qp->sq_hdr_buf) 66 dma_free_coherent(&res->pdev->dev, 67 sq->hwq.max_elements * qp->sq_hdr_buf_size, 68 qp->sq_hdr_buf, qp->sq_hdr_buf_map); 69 qp->rq_hdr_buf = NULL; 70 qp->sq_hdr_buf = NULL; 71 qp->rq_hdr_buf_map = 0; 72 qp->sq_hdr_buf_map = 0; 73 qp->sq_hdr_buf_size = 0; 74 qp->rq_hdr_buf_size = 0; 75 } 76 77 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res, 78 struct bnxt_qplib_qp *qp) 79 { 80 struct bnxt_qplib_q *rq = &qp->rq; 81 struct bnxt_qplib_q *sq = &qp->rq; 82 int rc = 0; 83 84 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) { 85 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev, 86 sq->hwq.max_elements * 87 qp->sq_hdr_buf_size, 88 &qp->sq_hdr_buf_map, GFP_KERNEL); 89 if (!qp->sq_hdr_buf) { 90 rc = -ENOMEM; 91 dev_err(&res->pdev->dev, 92 "QPLIB: Failed to create sq_hdr_buf"); 93 goto fail; 94 } 95 } 96 97 if (qp->rq_hdr_buf_size && rq->hwq.max_elements) { 98 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev, 99 rq->hwq.max_elements * 100 qp->rq_hdr_buf_size, 101 &qp->rq_hdr_buf_map, 102 GFP_KERNEL); 103 if (!qp->rq_hdr_buf) { 104 rc = -ENOMEM; 105 dev_err(&res->pdev->dev, 106 "QPLIB: Failed to create rq_hdr_buf"); 107 goto fail; 108 } 109 } 110 return 0; 111 112 fail: 113 bnxt_qplib_free_qp_hdr_buf(res, qp); 114 return rc; 115 } 116 117 static void bnxt_qplib_service_nq(unsigned long data) 118 { 119 struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data; 120 struct bnxt_qplib_hwq *hwq = &nq->hwq; 121 struct nq_base *nqe, **nq_ptr; 122 int num_cqne_processed = 0; 123 u32 sw_cons, raw_cons; 124 u16 type; 125 int budget = nq->budget; 126 u64 q_handle; 127 128 /* Service the NQ until empty */ 129 raw_cons = hwq->cons; 130 while (budget--) { 131 sw_cons = HWQ_CMP(raw_cons, hwq); 132 nq_ptr = (struct nq_base **)hwq->pbl_ptr; 133 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]; 134 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements)) 135 break; 136 137 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK; 138 switch (type) { 139 case NQ_BASE_TYPE_CQ_NOTIFICATION: 140 { 141 struct nq_cn *nqcne = (struct nq_cn *)nqe; 142 143 q_handle = le32_to_cpu(nqcne->cq_handle_low); 144 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) 145 << 32; 146 bnxt_qplib_arm_cq_enable((struct bnxt_qplib_cq *) 147 ((unsigned long)q_handle)); 148 if (!nq->cqn_handler(nq, (struct bnxt_qplib_cq *) 149 ((unsigned long)q_handle))) 150 num_cqne_processed++; 151 else 152 dev_warn(&nq->pdev->dev, 153 "QPLIB: cqn - type 0x%x not handled", 154 type); 155 break; 156 } 157 case NQ_BASE_TYPE_DBQ_EVENT: 158 break; 159 default: 160 dev_warn(&nq->pdev->dev, 161 "QPLIB: nqe with type = 0x%x not handled", 162 type); 163 break; 164 } 165 raw_cons++; 166 } 167 if (hwq->cons != raw_cons) { 168 hwq->cons = raw_cons; 169 NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements); 170 } 171 } 172 173 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) 174 { 175 struct bnxt_qplib_nq *nq = dev_instance; 176 struct bnxt_qplib_hwq *hwq = &nq->hwq; 177 struct nq_base **nq_ptr; 178 u32 sw_cons; 179 180 /* Prefetch the NQ element */ 181 sw_cons = HWQ_CMP(hwq->cons, hwq); 182 nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr; 183 prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]); 184 185 /* Fan out to CPU affinitized kthreads? */ 186 tasklet_schedule(&nq->worker); 187 188 return IRQ_HANDLED; 189 } 190 191 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) 192 { 193 /* Make sure the HW is stopped! */ 194 synchronize_irq(nq->vector); 195 tasklet_disable(&nq->worker); 196 tasklet_kill(&nq->worker); 197 198 if (nq->requested) { 199 free_irq(nq->vector, nq); 200 nq->requested = false; 201 } 202 if (nq->bar_reg_iomem) 203 iounmap(nq->bar_reg_iomem); 204 nq->bar_reg_iomem = NULL; 205 206 nq->cqn_handler = NULL; 207 nq->srqn_handler = NULL; 208 nq->vector = 0; 209 } 210 211 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 212 int msix_vector, int bar_reg_offset, 213 int (*cqn_handler)(struct bnxt_qplib_nq *nq, 214 struct bnxt_qplib_cq *), 215 int (*srqn_handler)(struct bnxt_qplib_nq *nq, 216 void *, u8 event)) 217 { 218 resource_size_t nq_base; 219 int rc; 220 221 nq->pdev = pdev; 222 nq->vector = msix_vector; 223 224 nq->cqn_handler = cqn_handler; 225 226 nq->srqn_handler = srqn_handler; 227 228 tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq); 229 230 nq->requested = false; 231 rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, "bnxt_qplib_nq", nq); 232 if (rc) { 233 dev_err(&nq->pdev->dev, 234 "Failed to request IRQ for NQ: %#x", rc); 235 bnxt_qplib_disable_nq(nq); 236 goto fail; 237 } 238 nq->requested = true; 239 nq->bar_reg = NQ_CONS_PCI_BAR_REGION; 240 nq->bar_reg_off = bar_reg_offset; 241 nq_base = pci_resource_start(pdev, nq->bar_reg); 242 if (!nq_base) { 243 rc = -ENOMEM; 244 goto fail; 245 } 246 nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4); 247 if (!nq->bar_reg_iomem) { 248 rc = -ENOMEM; 249 goto fail; 250 } 251 NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); 252 253 return 0; 254 fail: 255 bnxt_qplib_disable_nq(nq); 256 return rc; 257 } 258 259 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq) 260 { 261 if (nq->hwq.max_elements) 262 bnxt_qplib_free_hwq(nq->pdev, &nq->hwq); 263 } 264 265 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq) 266 { 267 nq->pdev = pdev; 268 if (!nq->hwq.max_elements || 269 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT) 270 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; 271 272 if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0, 273 &nq->hwq.max_elements, 274 BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0, 275 PAGE_SIZE, HWQ_TYPE_L2_CMPL)) 276 return -ENOMEM; 277 278 nq->budget = 8; 279 return 0; 280 } 281 282 /* QP */ 283 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 284 { 285 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 286 struct cmdq_create_qp1 req; 287 struct creq_create_qp1_resp *resp; 288 struct bnxt_qplib_pbl *pbl; 289 struct bnxt_qplib_q *sq = &qp->sq; 290 struct bnxt_qplib_q *rq = &qp->rq; 291 int rc; 292 u16 cmd_flags = 0; 293 u32 qp_flags = 0; 294 295 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags); 296 297 /* General */ 298 req.type = qp->type; 299 req.dpi = cpu_to_le32(qp->dpi->dpi); 300 req.qp_handle = cpu_to_le64(qp->qp_handle); 301 302 /* SQ */ 303 sq->hwq.max_elements = sq->max_wqe; 304 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0, 305 &sq->hwq.max_elements, 306 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0, 307 PAGE_SIZE, HWQ_TYPE_QUEUE); 308 if (rc) 309 goto exit; 310 311 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL); 312 if (!sq->swq) { 313 rc = -ENOMEM; 314 goto fail_sq; 315 } 316 pbl = &sq->hwq.pbl[PBL_LVL_0]; 317 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 318 req.sq_pg_size_sq_lvl = 319 ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK) 320 << CMDQ_CREATE_QP1_SQ_LVL_SFT) | 321 (pbl->pg_size == ROCE_PG_SIZE_4K ? 322 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K : 323 pbl->pg_size == ROCE_PG_SIZE_8K ? 324 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K : 325 pbl->pg_size == ROCE_PG_SIZE_64K ? 326 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K : 327 pbl->pg_size == ROCE_PG_SIZE_2M ? 328 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M : 329 pbl->pg_size == ROCE_PG_SIZE_8M ? 330 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M : 331 pbl->pg_size == ROCE_PG_SIZE_1G ? 332 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G : 333 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K); 334 335 if (qp->scq) 336 req.scq_cid = cpu_to_le32(qp->scq->id); 337 338 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE; 339 340 /* RQ */ 341 if (rq->max_wqe) { 342 rq->hwq.max_elements = qp->rq.max_wqe; 343 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0, 344 &rq->hwq.max_elements, 345 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0, 346 PAGE_SIZE, HWQ_TYPE_QUEUE); 347 if (rc) 348 goto fail_sq; 349 350 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq), 351 GFP_KERNEL); 352 if (!rq->swq) { 353 rc = -ENOMEM; 354 goto fail_rq; 355 } 356 pbl = &rq->hwq.pbl[PBL_LVL_0]; 357 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 358 req.rq_pg_size_rq_lvl = 359 ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) << 360 CMDQ_CREATE_QP1_RQ_LVL_SFT) | 361 (pbl->pg_size == ROCE_PG_SIZE_4K ? 362 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K : 363 pbl->pg_size == ROCE_PG_SIZE_8K ? 364 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K : 365 pbl->pg_size == ROCE_PG_SIZE_64K ? 366 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K : 367 pbl->pg_size == ROCE_PG_SIZE_2M ? 368 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M : 369 pbl->pg_size == ROCE_PG_SIZE_8M ? 370 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M : 371 pbl->pg_size == ROCE_PG_SIZE_1G ? 372 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G : 373 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K); 374 if (qp->rcq) 375 req.rcq_cid = cpu_to_le32(qp->rcq->id); 376 } 377 378 /* Header buffer - allow hdr_buf pass in */ 379 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp); 380 if (rc) { 381 rc = -ENOMEM; 382 goto fail; 383 } 384 req.qp_flags = cpu_to_le32(qp_flags); 385 req.sq_size = cpu_to_le32(sq->hwq.max_elements); 386 req.rq_size = cpu_to_le32(rq->hwq.max_elements); 387 388 req.sq_fwo_sq_sge = 389 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) << 390 CMDQ_CREATE_QP1_SQ_SGE_SFT); 391 req.rq_fwo_rq_sge = 392 cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) << 393 CMDQ_CREATE_QP1_RQ_SGE_SFT); 394 395 req.pd_id = cpu_to_le32(qp->pd->id); 396 397 resp = (struct creq_create_qp1_resp *) 398 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 399 NULL, 0); 400 if (!resp) { 401 dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed"); 402 rc = -EINVAL; 403 goto fail; 404 } 405 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 406 /* Cmd timed out */ 407 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out"); 408 rc = -ETIMEDOUT; 409 goto fail; 410 } 411 if (resp->status || 412 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 413 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed "); 414 dev_err(&rcfw->pdev->dev, 415 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 416 resp->status, le16_to_cpu(req.cookie), 417 le16_to_cpu(resp->cookie)); 418 rc = -EINVAL; 419 goto fail; 420 } 421 qp->id = le32_to_cpu(resp->xid); 422 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 423 sq->flush_in_progress = false; 424 rq->flush_in_progress = false; 425 426 return 0; 427 428 fail: 429 bnxt_qplib_free_qp_hdr_buf(res, qp); 430 fail_rq: 431 bnxt_qplib_free_hwq(res->pdev, &rq->hwq); 432 kfree(rq->swq); 433 fail_sq: 434 bnxt_qplib_free_hwq(res->pdev, &sq->hwq); 435 kfree(sq->swq); 436 exit: 437 return rc; 438 } 439 440 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 441 { 442 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 443 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; 444 struct cmdq_create_qp req; 445 struct creq_create_qp_resp *resp; 446 struct bnxt_qplib_pbl *pbl; 447 struct sq_psn_search **psn_search_ptr; 448 unsigned long int psn_search, poff = 0; 449 struct bnxt_qplib_q *sq = &qp->sq; 450 struct bnxt_qplib_q *rq = &qp->rq; 451 struct bnxt_qplib_hwq *xrrq; 452 int i, rc, req_size, psn_sz; 453 u16 cmd_flags = 0, max_ssge; 454 u32 sw_prod, qp_flags = 0; 455 456 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags); 457 458 /* General */ 459 req.type = qp->type; 460 req.dpi = cpu_to_le32(qp->dpi->dpi); 461 req.qp_handle = cpu_to_le64(qp->qp_handle); 462 463 /* SQ */ 464 psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ? 465 sizeof(struct sq_psn_search) : 0; 466 sq->hwq.max_elements = sq->max_wqe; 467 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist, 468 sq->nmap, &sq->hwq.max_elements, 469 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 470 psn_sz, 471 PAGE_SIZE, HWQ_TYPE_QUEUE); 472 if (rc) 473 goto exit; 474 475 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL); 476 if (!sq->swq) { 477 rc = -ENOMEM; 478 goto fail_sq; 479 } 480 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr; 481 if (psn_sz) { 482 psn_search_ptr = (struct sq_psn_search **) 483 &hw_sq_send_ptr[get_sqe_pg 484 (sq->hwq.max_elements)]; 485 psn_search = (unsigned long int) 486 &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)] 487 [get_sqe_idx(sq->hwq.max_elements)]; 488 if (psn_search & ~PAGE_MASK) { 489 /* If the psn_search does not start on a page boundary, 490 * then calculate the offset 491 */ 492 poff = (psn_search & ~PAGE_MASK) / 493 BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE; 494 } 495 for (i = 0; i < sq->hwq.max_elements; i++) 496 sq->swq[i].psn_search = 497 &psn_search_ptr[get_psne_pg(i + poff)] 498 [get_psne_idx(i + poff)]; 499 } 500 pbl = &sq->hwq.pbl[PBL_LVL_0]; 501 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 502 req.sq_pg_size_sq_lvl = 503 ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK) 504 << CMDQ_CREATE_QP_SQ_LVL_SFT) | 505 (pbl->pg_size == ROCE_PG_SIZE_4K ? 506 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K : 507 pbl->pg_size == ROCE_PG_SIZE_8K ? 508 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K : 509 pbl->pg_size == ROCE_PG_SIZE_64K ? 510 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K : 511 pbl->pg_size == ROCE_PG_SIZE_2M ? 512 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M : 513 pbl->pg_size == ROCE_PG_SIZE_8M ? 514 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M : 515 pbl->pg_size == ROCE_PG_SIZE_1G ? 516 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G : 517 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K); 518 519 /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */ 520 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr; 521 for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) { 522 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)] 523 [get_sqe_idx(sw_prod)]; 524 hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID; 525 } 526 527 if (qp->scq) 528 req.scq_cid = cpu_to_le32(qp->scq->id); 529 530 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE; 531 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED; 532 if (qp->sig_type) 533 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION; 534 535 /* RQ */ 536 if (rq->max_wqe) { 537 rq->hwq.max_elements = rq->max_wqe; 538 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist, 539 rq->nmap, &rq->hwq.max_elements, 540 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0, 541 PAGE_SIZE, HWQ_TYPE_QUEUE); 542 if (rc) 543 goto fail_sq; 544 545 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq), 546 GFP_KERNEL); 547 if (!rq->swq) { 548 rc = -ENOMEM; 549 goto fail_rq; 550 } 551 pbl = &rq->hwq.pbl[PBL_LVL_0]; 552 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]); 553 req.rq_pg_size_rq_lvl = 554 ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) << 555 CMDQ_CREATE_QP_RQ_LVL_SFT) | 556 (pbl->pg_size == ROCE_PG_SIZE_4K ? 557 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K : 558 pbl->pg_size == ROCE_PG_SIZE_8K ? 559 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K : 560 pbl->pg_size == ROCE_PG_SIZE_64K ? 561 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K : 562 pbl->pg_size == ROCE_PG_SIZE_2M ? 563 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M : 564 pbl->pg_size == ROCE_PG_SIZE_8M ? 565 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M : 566 pbl->pg_size == ROCE_PG_SIZE_1G ? 567 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G : 568 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K); 569 } 570 571 if (qp->rcq) 572 req.rcq_cid = cpu_to_le32(qp->rcq->id); 573 req.qp_flags = cpu_to_le32(qp_flags); 574 req.sq_size = cpu_to_le32(sq->hwq.max_elements); 575 req.rq_size = cpu_to_le32(rq->hwq.max_elements); 576 qp->sq_hdr_buf = NULL; 577 qp->rq_hdr_buf = NULL; 578 579 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp); 580 if (rc) 581 goto fail_rq; 582 583 /* CTRL-22434: Irrespective of the requested SGE count on the SQ 584 * always create the QP with max send sges possible if the requested 585 * inline size is greater than 0. 586 */ 587 max_ssge = qp->max_inline_data ? 6 : sq->max_sge; 588 req.sq_fwo_sq_sge = cpu_to_le16( 589 ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK) 590 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0); 591 req.rq_fwo_rq_sge = cpu_to_le16( 592 ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK) 593 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0); 594 /* ORRQ and IRRQ */ 595 if (psn_sz) { 596 xrrq = &qp->orrq; 597 xrrq->max_elements = 598 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); 599 req_size = xrrq->max_elements * 600 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1; 601 req_size &= ~(PAGE_SIZE - 1); 602 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0, 603 &xrrq->max_elements, 604 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE, 605 0, req_size, HWQ_TYPE_CTX); 606 if (rc) 607 goto fail_buf_free; 608 pbl = &xrrq->pbl[PBL_LVL_0]; 609 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]); 610 611 xrrq = &qp->irrq; 612 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS( 613 qp->max_dest_rd_atomic); 614 req_size = xrrq->max_elements * 615 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1; 616 req_size &= ~(PAGE_SIZE - 1); 617 618 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0, 619 &xrrq->max_elements, 620 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE, 621 0, req_size, HWQ_TYPE_CTX); 622 if (rc) 623 goto fail_orrq; 624 625 pbl = &xrrq->pbl[PBL_LVL_0]; 626 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]); 627 } 628 req.pd_id = cpu_to_le32(qp->pd->id); 629 630 resp = (struct creq_create_qp_resp *) 631 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 632 NULL, 0); 633 if (!resp) { 634 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed"); 635 rc = -EINVAL; 636 goto fail; 637 } 638 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 639 /* Cmd timed out */ 640 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out"); 641 rc = -ETIMEDOUT; 642 goto fail; 643 } 644 if (resp->status || 645 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 646 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed "); 647 dev_err(&rcfw->pdev->dev, 648 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 649 resp->status, le16_to_cpu(req.cookie), 650 le16_to_cpu(resp->cookie)); 651 rc = -EINVAL; 652 goto fail; 653 } 654 qp->id = le32_to_cpu(resp->xid); 655 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 656 sq->flush_in_progress = false; 657 rq->flush_in_progress = false; 658 659 return 0; 660 661 fail: 662 if (qp->irrq.max_elements) 663 bnxt_qplib_free_hwq(res->pdev, &qp->irrq); 664 fail_orrq: 665 if (qp->orrq.max_elements) 666 bnxt_qplib_free_hwq(res->pdev, &qp->orrq); 667 fail_buf_free: 668 bnxt_qplib_free_qp_hdr_buf(res, qp); 669 fail_rq: 670 bnxt_qplib_free_hwq(res->pdev, &rq->hwq); 671 kfree(rq->swq); 672 fail_sq: 673 bnxt_qplib_free_hwq(res->pdev, &sq->hwq); 674 kfree(sq->swq); 675 exit: 676 return rc; 677 } 678 679 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp) 680 { 681 switch (qp->state) { 682 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 683 /* INIT->RTR, configure the path_mtu to the default 684 * 2048 if not being requested 685 */ 686 if (!(qp->modify_flags & 687 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) { 688 qp->modify_flags |= 689 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 690 qp->path_mtu = 691 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; 692 } 693 qp->modify_flags &= 694 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; 695 /* Bono FW require the max_dest_rd_atomic to be >= 1 */ 696 if (qp->max_dest_rd_atomic < 1) 697 qp->max_dest_rd_atomic = 1; 698 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC; 699 /* Bono FW 20.6.5 requires SGID_INDEX configuration */ 700 if (!(qp->modify_flags & 701 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) { 702 qp->modify_flags |= 703 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX; 704 qp->ah.sgid_index = 0; 705 } 706 break; 707 default: 708 break; 709 } 710 } 711 712 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp) 713 { 714 switch (qp->state) { 715 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 716 /* Bono FW requires the max_rd_atomic to be >= 1 */ 717 if (qp->max_rd_atomic < 1) 718 qp->max_rd_atomic = 1; 719 /* Bono FW does not allow PKEY_INDEX, 720 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT, 721 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN, 722 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID 723 * modification 724 */ 725 qp->modify_flags &= 726 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY | 727 CMDQ_MODIFY_QP_MODIFY_MASK_DGID | 728 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | 729 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | 730 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | 731 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | 732 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | 733 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU | 734 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN | 735 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER | 736 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC | 737 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID); 738 break; 739 default: 740 break; 741 } 742 } 743 744 static void __filter_modify_flags(struct bnxt_qplib_qp *qp) 745 { 746 switch (qp->cur_qp_state) { 747 case CMDQ_MODIFY_QP_NEW_STATE_RESET: 748 break; 749 case CMDQ_MODIFY_QP_NEW_STATE_INIT: 750 __modify_flags_from_init_state(qp); 751 break; 752 case CMDQ_MODIFY_QP_NEW_STATE_RTR: 753 __modify_flags_from_rtr_state(qp); 754 break; 755 case CMDQ_MODIFY_QP_NEW_STATE_RTS: 756 break; 757 case CMDQ_MODIFY_QP_NEW_STATE_SQD: 758 break; 759 case CMDQ_MODIFY_QP_NEW_STATE_SQE: 760 break; 761 case CMDQ_MODIFY_QP_NEW_STATE_ERR: 762 break; 763 default: 764 break; 765 } 766 } 767 768 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 769 { 770 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 771 struct cmdq_modify_qp req; 772 struct creq_modify_qp_resp *resp; 773 u16 cmd_flags = 0, pkey; 774 u32 temp32[4]; 775 u32 bmask; 776 777 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); 778 779 /* Filter out the qp_attr_mask based on the state->new transition */ 780 __filter_modify_flags(qp); 781 bmask = qp->modify_flags; 782 req.modify_mask = cpu_to_le32(qp->modify_flags); 783 req.qp_cid = cpu_to_le32(qp->id); 784 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) { 785 req.network_type_en_sqd_async_notify_new_state = 786 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) | 787 (qp->en_sqd_async_notify ? 788 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0); 789 } 790 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type; 791 792 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) 793 req.access = qp->access; 794 795 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) { 796 if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl, 797 qp->pkey_index, &pkey)) 798 req.pkey = cpu_to_le16(pkey); 799 } 800 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) 801 req.qkey = cpu_to_le32(qp->qkey); 802 803 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) { 804 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid)); 805 req.dgid[0] = cpu_to_le32(temp32[0]); 806 req.dgid[1] = cpu_to_le32(temp32[1]); 807 req.dgid[2] = cpu_to_le32(temp32[2]); 808 req.dgid[3] = cpu_to_le32(temp32[3]); 809 } 810 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL) 811 req.flow_label = cpu_to_le32(qp->ah.flow_label); 812 813 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) 814 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id 815 [qp->ah.sgid_index]); 816 817 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT) 818 req.hop_limit = qp->ah.hop_limit; 819 820 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS) 821 req.traffic_class = qp->ah.traffic_class; 822 823 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC) 824 memcpy(req.dest_mac, qp->ah.dmac, 6); 825 826 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU) 827 req.path_mtu = qp->path_mtu; 828 829 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT) 830 req.timeout = qp->timeout; 831 832 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT) 833 req.retry_cnt = qp->retry_cnt; 834 835 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY) 836 req.rnr_retry = qp->rnr_retry; 837 838 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER) 839 req.min_rnr_timer = qp->min_rnr_timer; 840 841 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN) 842 req.rq_psn = cpu_to_le32(qp->rq.psn); 843 844 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN) 845 req.sq_psn = cpu_to_le32(qp->sq.psn); 846 847 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC) 848 req.max_rd_atomic = 849 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic); 850 851 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC) 852 req.max_dest_rd_atomic = 853 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic); 854 855 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements); 856 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements); 857 req.sq_sge = cpu_to_le16(qp->sq.max_sge); 858 req.rq_sge = cpu_to_le16(qp->rq.max_sge); 859 req.max_inline_data = cpu_to_le32(qp->max_inline_data); 860 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID) 861 req.dest_qp_id = cpu_to_le32(qp->dest_qpn); 862 863 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); 864 865 resp = (struct creq_modify_qp_resp *) 866 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 867 NULL, 0); 868 if (!resp) { 869 dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed"); 870 return -EINVAL; 871 } 872 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 873 /* Cmd timed out */ 874 dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out"); 875 return -ETIMEDOUT; 876 } 877 if (resp->status || 878 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 879 dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed "); 880 dev_err(&rcfw->pdev->dev, 881 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 882 resp->status, le16_to_cpu(req.cookie), 883 le16_to_cpu(resp->cookie)); 884 return -EINVAL; 885 } 886 qp->cur_qp_state = qp->state; 887 return 0; 888 } 889 890 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) 891 { 892 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 893 struct cmdq_query_qp req; 894 struct creq_query_qp_resp *resp; 895 struct creq_query_qp_resp_sb *sb; 896 u16 cmd_flags = 0; 897 u32 temp32[4]; 898 int i; 899 900 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); 901 902 req.qp_cid = cpu_to_le32(qp->id); 903 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 904 resp = (struct creq_query_qp_resp *) 905 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 906 (void **)&sb, 0); 907 if (!resp) { 908 dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed"); 909 return -EINVAL; 910 } 911 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 912 /* Cmd timed out */ 913 dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out"); 914 return -ETIMEDOUT; 915 } 916 if (resp->status || 917 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 918 dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed "); 919 dev_err(&rcfw->pdev->dev, 920 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 921 resp->status, le16_to_cpu(req.cookie), 922 le16_to_cpu(resp->cookie)); 923 return -EINVAL; 924 } 925 /* Extract the context from the side buffer */ 926 qp->state = sb->en_sqd_async_notify_state & 927 CREQ_QUERY_QP_RESP_SB_STATE_MASK; 928 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state & 929 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ? 930 true : false; 931 qp->access = sb->access; 932 qp->pkey_index = le16_to_cpu(sb->pkey); 933 qp->qkey = le32_to_cpu(sb->qkey); 934 935 temp32[0] = le32_to_cpu(sb->dgid[0]); 936 temp32[1] = le32_to_cpu(sb->dgid[1]); 937 temp32[2] = le32_to_cpu(sb->dgid[2]); 938 temp32[3] = le32_to_cpu(sb->dgid[3]); 939 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data)); 940 941 qp->ah.flow_label = le32_to_cpu(sb->flow_label); 942 943 qp->ah.sgid_index = 0; 944 for (i = 0; i < res->sgid_tbl.max; i++) { 945 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) { 946 qp->ah.sgid_index = i; 947 break; 948 } 949 } 950 if (i == res->sgid_tbl.max) 951 dev_warn(&res->pdev->dev, "QPLIB: SGID not found??"); 952 953 qp->ah.hop_limit = sb->hop_limit; 954 qp->ah.traffic_class = sb->traffic_class; 955 memcpy(qp->ah.dmac, sb->dest_mac, 6); 956 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) & 957 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >> 958 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT; 959 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) & 960 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >> 961 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT; 962 qp->timeout = sb->timeout; 963 qp->retry_cnt = sb->retry_cnt; 964 qp->rnr_retry = sb->rnr_retry; 965 qp->min_rnr_timer = sb->min_rnr_timer; 966 qp->rq.psn = le32_to_cpu(sb->rq_psn); 967 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic); 968 qp->sq.psn = le32_to_cpu(sb->sq_psn); 969 qp->max_dest_rd_atomic = 970 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic); 971 qp->sq.max_wqe = qp->sq.hwq.max_elements; 972 qp->rq.max_wqe = qp->rq.hwq.max_elements; 973 qp->sq.max_sge = le16_to_cpu(sb->sq_sge); 974 qp->rq.max_sge = le16_to_cpu(sb->rq_sge); 975 qp->max_inline_data = le32_to_cpu(sb->max_inline_data); 976 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); 977 memcpy(qp->smac, sb->src_mac, 6); 978 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); 979 return 0; 980 } 981 982 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) 983 { 984 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq; 985 struct cq_base *hw_cqe, **hw_cqe_ptr; 986 int i; 987 988 for (i = 0; i < cq_hwq->max_elements; i++) { 989 hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr; 990 hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)]; 991 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements)) 992 continue; 993 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) { 994 case CQ_BASE_CQE_TYPE_REQ: 995 case CQ_BASE_CQE_TYPE_TERMINAL: 996 { 997 struct cq_req *cqe = (struct cq_req *)hw_cqe; 998 999 if (qp == le64_to_cpu(cqe->qp_handle)) 1000 cqe->qp_handle = 0; 1001 break; 1002 } 1003 case CQ_BASE_CQE_TYPE_RES_RC: 1004 case CQ_BASE_CQE_TYPE_RES_UD: 1005 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 1006 { 1007 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; 1008 1009 if (qp == le64_to_cpu(cqe->qp_handle)) 1010 cqe->qp_handle = 0; 1011 break; 1012 } 1013 default: 1014 break; 1015 } 1016 } 1017 } 1018 1019 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, 1020 struct bnxt_qplib_qp *qp) 1021 { 1022 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1023 struct cmdq_destroy_qp req; 1024 struct creq_destroy_qp_resp *resp; 1025 unsigned long flags; 1026 u16 cmd_flags = 0; 1027 1028 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); 1029 1030 req.qp_cid = cpu_to_le32(qp->id); 1031 resp = (struct creq_destroy_qp_resp *) 1032 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1033 NULL, 0); 1034 if (!resp) { 1035 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed"); 1036 return -EINVAL; 1037 } 1038 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 1039 /* Cmd timed out */ 1040 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out"); 1041 return -ETIMEDOUT; 1042 } 1043 if (resp->status || 1044 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 1045 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed "); 1046 dev_err(&rcfw->pdev->dev, 1047 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 1048 resp->status, le16_to_cpu(req.cookie), 1049 le16_to_cpu(resp->cookie)); 1050 return -EINVAL; 1051 } 1052 1053 /* Must walk the associated CQs to nullified the QP ptr */ 1054 spin_lock_irqsave(&qp->scq->hwq.lock, flags); 1055 1056 __clean_cq(qp->scq, (u64)(unsigned long)qp); 1057 1058 if (qp->rcq && qp->rcq != qp->scq) { 1059 spin_lock(&qp->rcq->hwq.lock); 1060 __clean_cq(qp->rcq, (u64)(unsigned long)qp); 1061 spin_unlock(&qp->rcq->hwq.lock); 1062 } 1063 1064 spin_unlock_irqrestore(&qp->scq->hwq.lock, flags); 1065 1066 bnxt_qplib_free_qp_hdr_buf(res, qp); 1067 bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); 1068 kfree(qp->sq.swq); 1069 1070 bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq); 1071 kfree(qp->rq.swq); 1072 1073 if (qp->irrq.max_elements) 1074 bnxt_qplib_free_hwq(res->pdev, &qp->irrq); 1075 if (qp->orrq.max_elements) 1076 bnxt_qplib_free_hwq(res->pdev, &qp->orrq); 1077 1078 return 0; 1079 } 1080 1081 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 1082 struct bnxt_qplib_sge *sge) 1083 { 1084 struct bnxt_qplib_q *sq = &qp->sq; 1085 u32 sw_prod; 1086 1087 memset(sge, 0, sizeof(*sge)); 1088 1089 if (qp->sq_hdr_buf) { 1090 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); 1091 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map + 1092 sw_prod * qp->sq_hdr_buf_size); 1093 sge->lkey = 0xFFFFFFFF; 1094 sge->size = qp->sq_hdr_buf_size; 1095 return qp->sq_hdr_buf + sw_prod * sge->size; 1096 } 1097 return NULL; 1098 } 1099 1100 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp) 1101 { 1102 struct bnxt_qplib_q *rq = &qp->rq; 1103 1104 return HWQ_CMP(rq->hwq.prod, &rq->hwq); 1105 } 1106 1107 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index) 1108 { 1109 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size); 1110 } 1111 1112 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, 1113 struct bnxt_qplib_sge *sge) 1114 { 1115 struct bnxt_qplib_q *rq = &qp->rq; 1116 u32 sw_prod; 1117 1118 memset(sge, 0, sizeof(*sge)); 1119 1120 if (qp->rq_hdr_buf) { 1121 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq); 1122 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map + 1123 sw_prod * qp->rq_hdr_buf_size); 1124 sge->lkey = 0xFFFFFFFF; 1125 sge->size = qp->rq_hdr_buf_size; 1126 return qp->rq_hdr_buf + sw_prod * sge->size; 1127 } 1128 return NULL; 1129 } 1130 1131 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp) 1132 { 1133 struct bnxt_qplib_q *sq = &qp->sq; 1134 struct dbr_dbr db_msg = { 0 }; 1135 u32 sw_prod; 1136 1137 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); 1138 1139 db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) & 1140 DBR_DBR_INDEX_MASK); 1141 db_msg.type_xid = 1142 cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) | 1143 DBR_DBR_TYPE_SQ); 1144 /* Flush all the WQE writes to HW */ 1145 wmb(); 1146 __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64)); 1147 } 1148 1149 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, 1150 struct bnxt_qplib_swqe *wqe) 1151 { 1152 struct bnxt_qplib_q *sq = &qp->sq; 1153 struct bnxt_qplib_swq *swq; 1154 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; 1155 struct sq_sge *hw_sge; 1156 u32 sw_prod; 1157 u8 wqe_size16; 1158 int i, rc = 0, data_len = 0, pkt_num = 0; 1159 __le32 temp32; 1160 1161 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) { 1162 rc = -EINVAL; 1163 goto done; 1164 } 1165 if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == 1166 HWQ_CMP(sq->hwq.cons, &sq->hwq)) { 1167 rc = -ENOMEM; 1168 goto done; 1169 } 1170 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); 1171 swq = &sq->swq[sw_prod]; 1172 swq->wr_id = wqe->wr_id; 1173 swq->type = wqe->type; 1174 swq->flags = wqe->flags; 1175 if (qp->sig_type) 1176 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP; 1177 swq->start_psn = sq->psn & BTH_PSN_MASK; 1178 1179 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr; 1180 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)] 1181 [get_sqe_idx(sw_prod)]; 1182 1183 memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE); 1184 1185 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) { 1186 /* Copy the inline data */ 1187 if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) { 1188 dev_warn(&sq->hwq.pdev->dev, 1189 "QPLIB: Inline data length > 96 detected"); 1190 data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH; 1191 } else { 1192 data_len = wqe->inline_len; 1193 } 1194 memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len); 1195 wqe_size16 = (data_len + 15) >> 4; 1196 } else { 1197 for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data; 1198 i < wqe->num_sge; i++, hw_sge++) { 1199 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr); 1200 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey); 1201 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size); 1202 data_len += wqe->sg_list[i].size; 1203 } 1204 /* Each SGE entry = 1 WQE size16 */ 1205 wqe_size16 = wqe->num_sge; 1206 } 1207 1208 /* Specifics */ 1209 switch (wqe->type) { 1210 case BNXT_QPLIB_SWQE_TYPE_SEND: 1211 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) { 1212 /* Assemble info for Raw Ethertype QPs */ 1213 struct sq_send_raweth_qp1 *sqe = 1214 (struct sq_send_raweth_qp1 *)hw_sq_send_hdr; 1215 1216 sqe->wqe_type = wqe->type; 1217 sqe->flags = wqe->flags; 1218 sqe->wqe_size = wqe_size16 + 1219 ((offsetof(typeof(*sqe), data) + 15) >> 4); 1220 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action); 1221 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags); 1222 sqe->length = cpu_to_le32(data_len); 1223 sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta & 1224 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) << 1225 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT); 1226 1227 break; 1228 } 1229 /* else, just fall thru */ 1230 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: 1231 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: 1232 { 1233 struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr; 1234 1235 sqe->wqe_type = wqe->type; 1236 sqe->flags = wqe->flags; 1237 sqe->wqe_size = wqe_size16 + 1238 ((offsetof(typeof(*sqe), data) + 15) >> 4); 1239 sqe->inv_key_or_imm_data = cpu_to_le32( 1240 wqe->send.inv_key); 1241 if (qp->type == CMDQ_CREATE_QP_TYPE_UD) { 1242 sqe->q_key = cpu_to_le32(wqe->send.q_key); 1243 sqe->dst_qp = cpu_to_le32( 1244 wqe->send.dst_qp & SQ_SEND_DST_QP_MASK); 1245 sqe->length = cpu_to_le32(data_len); 1246 sqe->avid = cpu_to_le32(wqe->send.avid & 1247 SQ_SEND_AVID_MASK); 1248 sq->psn = (sq->psn + 1) & BTH_PSN_MASK; 1249 } else { 1250 sqe->length = cpu_to_le32(data_len); 1251 sqe->dst_qp = 0; 1252 sqe->avid = 0; 1253 if (qp->mtu) 1254 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1255 if (!pkt_num) 1256 pkt_num = 1; 1257 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1258 } 1259 break; 1260 } 1261 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: 1262 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: 1263 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: 1264 { 1265 struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr; 1266 1267 sqe->wqe_type = wqe->type; 1268 sqe->flags = wqe->flags; 1269 sqe->wqe_size = wqe_size16 + 1270 ((offsetof(typeof(*sqe), data) + 15) >> 4); 1271 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key); 1272 sqe->length = cpu_to_le32((u32)data_len); 1273 sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va); 1274 sqe->remote_key = cpu_to_le32(wqe->rdma.r_key); 1275 if (qp->mtu) 1276 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1277 if (!pkt_num) 1278 pkt_num = 1; 1279 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1280 break; 1281 } 1282 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: 1283 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: 1284 { 1285 struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr; 1286 1287 sqe->wqe_type = wqe->type; 1288 sqe->flags = wqe->flags; 1289 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key); 1290 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va); 1291 sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data); 1292 sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data); 1293 if (qp->mtu) 1294 pkt_num = (data_len + qp->mtu - 1) / qp->mtu; 1295 if (!pkt_num) 1296 pkt_num = 1; 1297 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; 1298 break; 1299 } 1300 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: 1301 { 1302 struct sq_localinvalidate *sqe = 1303 (struct sq_localinvalidate *)hw_sq_send_hdr; 1304 1305 sqe->wqe_type = wqe->type; 1306 sqe->flags = wqe->flags; 1307 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key); 1308 1309 break; 1310 } 1311 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR: 1312 { 1313 struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr; 1314 1315 sqe->wqe_type = wqe->type; 1316 sqe->flags = wqe->flags; 1317 sqe->access_cntl = wqe->frmr.access_cntl | 1318 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; 1319 sqe->zero_based_page_size_log = 1320 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) << 1321 SQ_FR_PMR_PAGE_SIZE_LOG_SFT | 1322 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0); 1323 sqe->l_key = cpu_to_le32(wqe->frmr.l_key); 1324 temp32 = cpu_to_le32(wqe->frmr.length); 1325 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length)); 1326 sqe->numlevels_pbl_page_size_log = 1327 ((wqe->frmr.pbl_pg_sz_log << 1328 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) & 1329 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) | 1330 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) & 1331 SQ_FR_PMR_NUMLEVELS_MASK); 1332 1333 for (i = 0; i < wqe->frmr.page_list_len; i++) 1334 wqe->frmr.pbl_ptr[i] = cpu_to_le64( 1335 wqe->frmr.page_list[i] | 1336 PTU_PTE_VALID); 1337 sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr); 1338 sqe->va = cpu_to_le64(wqe->frmr.va); 1339 1340 break; 1341 } 1342 case BNXT_QPLIB_SWQE_TYPE_BIND_MW: 1343 { 1344 struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr; 1345 1346 sqe->wqe_type = wqe->type; 1347 sqe->flags = wqe->flags; 1348 sqe->access_cntl = wqe->bind.access_cntl; 1349 sqe->mw_type_zero_based = wqe->bind.mw_type | 1350 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0); 1351 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key); 1352 sqe->l_key = cpu_to_le32(wqe->bind.r_key); 1353 sqe->va = cpu_to_le64(wqe->bind.va); 1354 temp32 = cpu_to_le32(wqe->bind.length); 1355 memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length)); 1356 break; 1357 } 1358 default: 1359 /* Bad wqe, return error */ 1360 rc = -EINVAL; 1361 goto done; 1362 } 1363 swq->next_psn = sq->psn & BTH_PSN_MASK; 1364 if (swq->psn_search) { 1365 swq->psn_search->opcode_start_psn = cpu_to_le32( 1366 ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) & 1367 SQ_PSN_SEARCH_START_PSN_MASK) | 1368 ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) & 1369 SQ_PSN_SEARCH_OPCODE_MASK)); 1370 swq->psn_search->flags_next_psn = cpu_to_le32( 1371 ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) & 1372 SQ_PSN_SEARCH_NEXT_PSN_MASK)); 1373 } 1374 1375 sq->hwq.prod++; 1376 done: 1377 return rc; 1378 } 1379 1380 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp) 1381 { 1382 struct bnxt_qplib_q *rq = &qp->rq; 1383 struct dbr_dbr db_msg = { 0 }; 1384 u32 sw_prod; 1385 1386 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq); 1387 db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) & 1388 DBR_DBR_INDEX_MASK); 1389 db_msg.type_xid = 1390 cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) | 1391 DBR_DBR_TYPE_RQ); 1392 1393 /* Flush the writes to HW Rx WQE before the ringing Rx DB */ 1394 wmb(); 1395 __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64)); 1396 } 1397 1398 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, 1399 struct bnxt_qplib_swqe *wqe) 1400 { 1401 struct bnxt_qplib_q *rq = &qp->rq; 1402 struct rq_wqe *rqe, **rqe_ptr; 1403 struct sq_sge *hw_sge; 1404 u32 sw_prod; 1405 int i, rc = 0; 1406 1407 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1408 dev_err(&rq->hwq.pdev->dev, 1409 "QPLIB: FP: QP (0x%x) is in the 0x%x state", 1410 qp->id, qp->state); 1411 rc = -EINVAL; 1412 goto done; 1413 } 1414 if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == 1415 HWQ_CMP(rq->hwq.cons, &rq->hwq)) { 1416 dev_err(&rq->hwq.pdev->dev, 1417 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); 1418 rc = -EINVAL; 1419 goto done; 1420 } 1421 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq); 1422 rq->swq[sw_prod].wr_id = wqe->wr_id; 1423 1424 rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr; 1425 rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)]; 1426 1427 memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); 1428 1429 /* Calculate wqe_size16 and data_len */ 1430 for (i = 0, hw_sge = (struct sq_sge *)rqe->data; 1431 i < wqe->num_sge; i++, hw_sge++) { 1432 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr); 1433 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey); 1434 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size); 1435 } 1436 rqe->wqe_type = wqe->type; 1437 rqe->flags = wqe->flags; 1438 rqe->wqe_size = wqe->num_sge + 1439 ((offsetof(typeof(*rqe), data) + 15) >> 4); 1440 1441 /* Supply the rqe->wr_id index to the wr_id_tbl for now */ 1442 rqe->wr_id[0] = cpu_to_le32(sw_prod); 1443 1444 rq->hwq.prod++; 1445 done: 1446 return rc; 1447 } 1448 1449 /* CQ */ 1450 1451 /* Spinlock must be held */ 1452 static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq) 1453 { 1454 struct dbr_dbr db_msg = { 0 }; 1455 1456 db_msg.type_xid = 1457 cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) | 1458 DBR_DBR_TYPE_CQ_ARMENA); 1459 /* Flush memory writes before enabling the CQ */ 1460 wmb(); 1461 __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64)); 1462 } 1463 1464 static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type) 1465 { 1466 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq; 1467 struct dbr_dbr db_msg = { 0 }; 1468 u32 sw_cons; 1469 1470 /* Ring DB */ 1471 sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq); 1472 db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) & 1473 DBR_DBR_INDEX_MASK); 1474 db_msg.type_xid = 1475 cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) | 1476 arm_type); 1477 /* flush memory writes before arming the CQ */ 1478 wmb(); 1479 __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64)); 1480 } 1481 1482 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) 1483 { 1484 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1485 struct cmdq_create_cq req; 1486 struct creq_create_cq_resp *resp; 1487 struct bnxt_qplib_pbl *pbl; 1488 u16 cmd_flags = 0; 1489 int rc; 1490 1491 cq->hwq.max_elements = cq->max_wqe; 1492 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead, 1493 cq->nmap, &cq->hwq.max_elements, 1494 BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0, 1495 PAGE_SIZE, HWQ_TYPE_QUEUE); 1496 if (rc) 1497 goto exit; 1498 1499 RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags); 1500 1501 if (!cq->dpi) { 1502 dev_err(&rcfw->pdev->dev, 1503 "QPLIB: FP: CREATE_CQ failed due to NULL DPI"); 1504 return -EINVAL; 1505 } 1506 req.dpi = cpu_to_le32(cq->dpi->dpi); 1507 req.cq_handle = cpu_to_le64(cq->cq_handle); 1508 1509 req.cq_size = cpu_to_le32(cq->hwq.max_elements); 1510 pbl = &cq->hwq.pbl[PBL_LVL_0]; 1511 req.pg_size_lvl = cpu_to_le32( 1512 ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) << 1513 CMDQ_CREATE_CQ_LVL_SFT) | 1514 (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K : 1515 pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K : 1516 pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K : 1517 pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M : 1518 pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M : 1519 pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G : 1520 CMDQ_CREATE_CQ_PG_SIZE_PG_4K)); 1521 1522 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]); 1523 1524 req.cq_fco_cnq_id = cpu_to_le32( 1525 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << 1526 CMDQ_CREATE_CQ_CNQ_ID_SFT); 1527 1528 resp = (struct creq_create_cq_resp *) 1529 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1530 NULL, 0); 1531 if (!resp) { 1532 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed"); 1533 return -EINVAL; 1534 } 1535 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 1536 /* Cmd timed out */ 1537 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out"); 1538 rc = -ETIMEDOUT; 1539 goto fail; 1540 } 1541 if (resp->status || 1542 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 1543 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed "); 1544 dev_err(&rcfw->pdev->dev, 1545 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 1546 resp->status, le16_to_cpu(req.cookie), 1547 le16_to_cpu(resp->cookie)); 1548 rc = -EINVAL; 1549 goto fail; 1550 } 1551 cq->id = le32_to_cpu(resp->xid); 1552 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; 1553 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; 1554 init_waitqueue_head(&cq->waitq); 1555 1556 bnxt_qplib_arm_cq_enable(cq); 1557 return 0; 1558 1559 fail: 1560 bnxt_qplib_free_hwq(res->pdev, &cq->hwq); 1561 exit: 1562 return rc; 1563 } 1564 1565 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) 1566 { 1567 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1568 struct cmdq_destroy_cq req; 1569 struct creq_destroy_cq_resp *resp; 1570 u16 cmd_flags = 0; 1571 1572 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); 1573 1574 req.cq_cid = cpu_to_le32(cq->id); 1575 resp = (struct creq_destroy_cq_resp *) 1576 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1577 NULL, 0); 1578 if (!resp) { 1579 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed"); 1580 return -EINVAL; 1581 } 1582 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 1583 /* Cmd timed out */ 1584 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out"); 1585 return -ETIMEDOUT; 1586 } 1587 if (resp->status || 1588 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 1589 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed "); 1590 dev_err(&rcfw->pdev->dev, 1591 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 1592 resp->status, le16_to_cpu(req.cookie), 1593 le16_to_cpu(resp->cookie)); 1594 return -EINVAL; 1595 } 1596 bnxt_qplib_free_hwq(res->pdev, &cq->hwq); 1597 return 0; 1598 } 1599 1600 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp, 1601 struct bnxt_qplib_cqe **pcqe, int *budget) 1602 { 1603 u32 sw_prod, sw_cons; 1604 struct bnxt_qplib_cqe *cqe; 1605 int rc = 0; 1606 1607 /* Now complete all outstanding SQEs with FLUSHED_ERR */ 1608 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); 1609 cqe = *pcqe; 1610 while (*budget) { 1611 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); 1612 if (sw_cons == sw_prod) { 1613 sq->flush_in_progress = false; 1614 break; 1615 } 1616 memset(cqe, 0, sizeof(*cqe)); 1617 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; 1618 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 1619 cqe->qp_handle = (u64)(unsigned long)qp; 1620 cqe->wr_id = sq->swq[sw_cons].wr_id; 1621 cqe->src_qp = qp->id; 1622 cqe->type = sq->swq[sw_cons].type; 1623 cqe++; 1624 (*budget)--; 1625 sq->hwq.cons++; 1626 } 1627 *pcqe = cqe; 1628 if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod) 1629 /* Out of budget */ 1630 rc = -EAGAIN; 1631 1632 return rc; 1633 } 1634 1635 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, 1636 int opcode, struct bnxt_qplib_cqe **pcqe, int *budget) 1637 { 1638 struct bnxt_qplib_cqe *cqe; 1639 u32 sw_prod, sw_cons; 1640 int rc = 0; 1641 1642 /* Flush the rest of the RQ */ 1643 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq); 1644 cqe = *pcqe; 1645 while (*budget) { 1646 sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq); 1647 if (sw_cons == sw_prod) 1648 break; 1649 memset(cqe, 0, sizeof(*cqe)); 1650 cqe->status = 1651 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR; 1652 cqe->opcode = opcode; 1653 cqe->qp_handle = (unsigned long)qp; 1654 cqe->wr_id = rq->swq[sw_cons].wr_id; 1655 cqe++; 1656 (*budget)--; 1657 rq->hwq.cons++; 1658 } 1659 *pcqe = cqe; 1660 if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod) 1661 /* Out of budget */ 1662 rc = -EAGAIN; 1663 1664 return rc; 1665 } 1666 1667 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, 1668 struct cq_req *hwcqe, 1669 struct bnxt_qplib_cqe **pcqe, int *budget) 1670 { 1671 struct bnxt_qplib_qp *qp; 1672 struct bnxt_qplib_q *sq; 1673 struct bnxt_qplib_cqe *cqe; 1674 u32 sw_cons, cqe_cons; 1675 int rc = 0; 1676 1677 qp = (struct bnxt_qplib_qp *)((unsigned long) 1678 le64_to_cpu(hwcqe->qp_handle)); 1679 if (!qp) { 1680 dev_err(&cq->hwq.pdev->dev, 1681 "QPLIB: FP: Process Req qp is NULL"); 1682 return -EINVAL; 1683 } 1684 sq = &qp->sq; 1685 1686 cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); 1687 if (cqe_cons > sq->hwq.max_elements) { 1688 dev_err(&cq->hwq.pdev->dev, 1689 "QPLIB: FP: CQ Process req reported "); 1690 dev_err(&cq->hwq.pdev->dev, 1691 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", 1692 cqe_cons, sq->hwq.max_elements); 1693 return -EINVAL; 1694 } 1695 /* If we were in the middle of flushing the SQ, continue */ 1696 if (sq->flush_in_progress) 1697 goto flush; 1698 1699 /* Require to walk the sq's swq to fabricate CQEs for all previously 1700 * signaled SWQEs due to CQE aggregation from the current sq cons 1701 * to the cqe_cons 1702 */ 1703 cqe = *pcqe; 1704 while (*budget) { 1705 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); 1706 if (sw_cons == cqe_cons) 1707 break; 1708 memset(cqe, 0, sizeof(*cqe)); 1709 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 1710 cqe->qp_handle = (u64)(unsigned long)qp; 1711 cqe->src_qp = qp->id; 1712 cqe->wr_id = sq->swq[sw_cons].wr_id; 1713 cqe->type = sq->swq[sw_cons].type; 1714 1715 /* For the last CQE, check for status. For errors, regardless 1716 * of the request being signaled or not, it must complete with 1717 * the hwcqe error status 1718 */ 1719 if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && 1720 hwcqe->status != CQ_REQ_STATUS_OK) { 1721 cqe->status = hwcqe->status; 1722 dev_err(&cq->hwq.pdev->dev, 1723 "QPLIB: FP: CQ Processed Req "); 1724 dev_err(&cq->hwq.pdev->dev, 1725 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", 1726 sw_cons, cqe->wr_id, cqe->status); 1727 cqe++; 1728 (*budget)--; 1729 sq->flush_in_progress = true; 1730 /* Must block new posting of SQ and RQ */ 1731 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 1732 } else { 1733 if (sq->swq[sw_cons].flags & 1734 SQ_SEND_FLAGS_SIGNAL_COMP) { 1735 cqe->status = CQ_REQ_STATUS_OK; 1736 cqe++; 1737 (*budget)--; 1738 } 1739 } 1740 sq->hwq.cons++; 1741 } 1742 *pcqe = cqe; 1743 if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { 1744 /* Out of budget */ 1745 rc = -EAGAIN; 1746 goto done; 1747 } 1748 if (!sq->flush_in_progress) 1749 goto done; 1750 flush: 1751 /* Require to walk the sq's swq to fabricate CQEs for all 1752 * previously posted SWQEs due to the error CQE received 1753 */ 1754 rc = __flush_sq(sq, qp, pcqe, budget); 1755 if (!rc) 1756 sq->flush_in_progress = false; 1757 done: 1758 return rc; 1759 } 1760 1761 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, 1762 struct cq_res_rc *hwcqe, 1763 struct bnxt_qplib_cqe **pcqe, 1764 int *budget) 1765 { 1766 struct bnxt_qplib_qp *qp; 1767 struct bnxt_qplib_q *rq; 1768 struct bnxt_qplib_cqe *cqe; 1769 u32 wr_id_idx; 1770 int rc = 0; 1771 1772 qp = (struct bnxt_qplib_qp *)((unsigned long) 1773 le64_to_cpu(hwcqe->qp_handle)); 1774 if (!qp) { 1775 dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL"); 1776 return -EINVAL; 1777 } 1778 cqe = *pcqe; 1779 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 1780 cqe->length = le32_to_cpu(hwcqe->length); 1781 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key); 1782 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle); 1783 cqe->flags = le16_to_cpu(hwcqe->flags); 1784 cqe->status = hwcqe->status; 1785 cqe->qp_handle = (u64)(unsigned long)qp; 1786 1787 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) & 1788 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK; 1789 rq = &qp->rq; 1790 if (wr_id_idx > rq->hwq.max_elements) { 1791 dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC "); 1792 dev_err(&cq->hwq.pdev->dev, 1793 "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x", 1794 wr_id_idx, rq->hwq.max_elements); 1795 return -EINVAL; 1796 } 1797 if (rq->flush_in_progress) 1798 goto flush_rq; 1799 1800 cqe->wr_id = rq->swq[wr_id_idx].wr_id; 1801 cqe++; 1802 (*budget)--; 1803 rq->hwq.cons++; 1804 *pcqe = cqe; 1805 1806 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 1807 rq->flush_in_progress = true; 1808 flush_rq: 1809 rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_RC, pcqe, budget); 1810 if (!rc) 1811 rq->flush_in_progress = false; 1812 } 1813 return rc; 1814 } 1815 1816 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, 1817 struct cq_res_ud *hwcqe, 1818 struct bnxt_qplib_cqe **pcqe, 1819 int *budget) 1820 { 1821 struct bnxt_qplib_qp *qp; 1822 struct bnxt_qplib_q *rq; 1823 struct bnxt_qplib_cqe *cqe; 1824 u32 wr_id_idx; 1825 int rc = 0; 1826 1827 qp = (struct bnxt_qplib_qp *)((unsigned long) 1828 le64_to_cpu(hwcqe->qp_handle)); 1829 if (!qp) { 1830 dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL"); 1831 return -EINVAL; 1832 } 1833 cqe = *pcqe; 1834 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 1835 cqe->length = le32_to_cpu(hwcqe->length); 1836 cqe->invrkey = le32_to_cpu(hwcqe->imm_data); 1837 cqe->flags = le16_to_cpu(hwcqe->flags); 1838 cqe->status = hwcqe->status; 1839 cqe->qp_handle = (u64)(unsigned long)qp; 1840 memcpy(cqe->smac, hwcqe->src_mac, 6); 1841 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id) 1842 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK; 1843 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) | 1844 ((le32_to_cpu( 1845 hwcqe->src_qp_high_srq_or_rq_wr_id) & 1846 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8); 1847 1848 rq = &qp->rq; 1849 if (wr_id_idx > rq->hwq.max_elements) { 1850 dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD "); 1851 dev_err(&cq->hwq.pdev->dev, 1852 "QPLIB: wr_id idx %#x exceeded RQ max %#x", 1853 wr_id_idx, rq->hwq.max_elements); 1854 return -EINVAL; 1855 } 1856 if (rq->flush_in_progress) 1857 goto flush_rq; 1858 1859 cqe->wr_id = rq->swq[wr_id_idx].wr_id; 1860 cqe++; 1861 (*budget)--; 1862 rq->hwq.cons++; 1863 *pcqe = cqe; 1864 1865 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 1866 rq->flush_in_progress = true; 1867 flush_rq: 1868 rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_UD, pcqe, budget); 1869 if (!rc) 1870 rq->flush_in_progress = false; 1871 } 1872 return rc; 1873 } 1874 1875 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, 1876 struct cq_res_raweth_qp1 *hwcqe, 1877 struct bnxt_qplib_cqe **pcqe, 1878 int *budget) 1879 { 1880 struct bnxt_qplib_qp *qp; 1881 struct bnxt_qplib_q *rq; 1882 struct bnxt_qplib_cqe *cqe; 1883 u32 wr_id_idx; 1884 int rc = 0; 1885 1886 qp = (struct bnxt_qplib_qp *)((unsigned long) 1887 le64_to_cpu(hwcqe->qp_handle)); 1888 if (!qp) { 1889 dev_err(&cq->hwq.pdev->dev, 1890 "QPLIB: process_cq Raw/QP1 qp is NULL"); 1891 return -EINVAL; 1892 } 1893 cqe = *pcqe; 1894 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; 1895 cqe->flags = le16_to_cpu(hwcqe->flags); 1896 cqe->qp_handle = (u64)(unsigned long)qp; 1897 1898 wr_id_idx = 1899 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id) 1900 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK; 1901 cqe->src_qp = qp->id; 1902 if (qp->id == 1 && !cqe->length) { 1903 /* Add workaround for the length misdetection */ 1904 cqe->length = 296; 1905 } else { 1906 cqe->length = le16_to_cpu(hwcqe->length); 1907 } 1908 cqe->pkey_index = qp->pkey_index; 1909 memcpy(cqe->smac, qp->smac, 6); 1910 1911 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags); 1912 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2); 1913 1914 rq = &qp->rq; 1915 if (wr_id_idx > rq->hwq.max_elements) { 1916 dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id "); 1917 dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x", 1918 wr_id_idx, rq->hwq.max_elements); 1919 return -EINVAL; 1920 } 1921 if (rq->flush_in_progress) 1922 goto flush_rq; 1923 1924 cqe->wr_id = rq->swq[wr_id_idx].wr_id; 1925 cqe++; 1926 (*budget)--; 1927 rq->hwq.cons++; 1928 *pcqe = cqe; 1929 1930 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 1931 rq->flush_in_progress = true; 1932 flush_rq: 1933 rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_RAWETH_QP1, pcqe, 1934 budget); 1935 if (!rc) 1936 rq->flush_in_progress = false; 1937 } 1938 return rc; 1939 } 1940 1941 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, 1942 struct cq_terminal *hwcqe, 1943 struct bnxt_qplib_cqe **pcqe, 1944 int *budget) 1945 { 1946 struct bnxt_qplib_qp *qp; 1947 struct bnxt_qplib_q *sq, *rq; 1948 struct bnxt_qplib_cqe *cqe; 1949 u32 sw_cons = 0, cqe_cons; 1950 int rc = 0; 1951 u8 opcode = 0; 1952 1953 /* Check the Status */ 1954 if (hwcqe->status != CQ_TERMINAL_STATUS_OK) 1955 dev_warn(&cq->hwq.pdev->dev, 1956 "QPLIB: FP: CQ Process Terminal Error status = 0x%x", 1957 hwcqe->status); 1958 1959 qp = (struct bnxt_qplib_qp *)((unsigned long) 1960 le64_to_cpu(hwcqe->qp_handle)); 1961 if (!qp) { 1962 dev_err(&cq->hwq.pdev->dev, 1963 "QPLIB: FP: CQ Process terminal qp is NULL"); 1964 return -EINVAL; 1965 } 1966 /* Must block new posting of SQ and RQ */ 1967 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 1968 1969 sq = &qp->sq; 1970 rq = &qp->rq; 1971 1972 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx); 1973 if (cqe_cons == 0xFFFF) 1974 goto do_rq; 1975 1976 if (cqe_cons > sq->hwq.max_elements) { 1977 dev_err(&cq->hwq.pdev->dev, 1978 "QPLIB: FP: CQ Process terminal reported "); 1979 dev_err(&cq->hwq.pdev->dev, 1980 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", 1981 cqe_cons, sq->hwq.max_elements); 1982 goto do_rq; 1983 } 1984 /* If we were in the middle of flushing, continue */ 1985 if (sq->flush_in_progress) 1986 goto flush_sq; 1987 1988 /* Terminal CQE can also include aggregated successful CQEs prior. 1989 * So we must complete all CQEs from the current sq's cons to the 1990 * cq_cons with status OK 1991 */ 1992 cqe = *pcqe; 1993 while (*budget) { 1994 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); 1995 if (sw_cons == cqe_cons) 1996 break; 1997 if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 1998 memset(cqe, 0, sizeof(*cqe)); 1999 cqe->status = CQ_REQ_STATUS_OK; 2000 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 2001 cqe->qp_handle = (u64)(unsigned long)qp; 2002 cqe->src_qp = qp->id; 2003 cqe->wr_id = sq->swq[sw_cons].wr_id; 2004 cqe->type = sq->swq[sw_cons].type; 2005 cqe++; 2006 (*budget)--; 2007 } 2008 sq->hwq.cons++; 2009 } 2010 *pcqe = cqe; 2011 if (!(*budget) && sw_cons != cqe_cons) { 2012 /* Out of budget */ 2013 rc = -EAGAIN; 2014 goto sq_done; 2015 } 2016 sq->flush_in_progress = true; 2017 flush_sq: 2018 rc = __flush_sq(sq, qp, pcqe, budget); 2019 if (!rc) 2020 sq->flush_in_progress = false; 2021 sq_done: 2022 if (rc) 2023 return rc; 2024 do_rq: 2025 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx); 2026 if (cqe_cons == 0xFFFF) { 2027 goto done; 2028 } else if (cqe_cons > rq->hwq.max_elements) { 2029 dev_err(&cq->hwq.pdev->dev, 2030 "QPLIB: FP: CQ Processed terminal "); 2031 dev_err(&cq->hwq.pdev->dev, 2032 "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x", 2033 cqe_cons, rq->hwq.max_elements); 2034 goto done; 2035 } 2036 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR 2037 * from the current rq->cons to the rq->prod regardless what the 2038 * rq->cons the terminal CQE indicates 2039 */ 2040 rq->flush_in_progress = true; 2041 switch (qp->type) { 2042 case CMDQ_CREATE_QP1_TYPE_GSI: 2043 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1; 2044 break; 2045 case CMDQ_CREATE_QP_TYPE_RC: 2046 opcode = CQ_BASE_CQE_TYPE_RES_RC; 2047 break; 2048 case CMDQ_CREATE_QP_TYPE_UD: 2049 opcode = CQ_BASE_CQE_TYPE_RES_UD; 2050 break; 2051 } 2052 2053 rc = __flush_rq(rq, qp, opcode, pcqe, budget); 2054 if (!rc) 2055 rq->flush_in_progress = false; 2056 done: 2057 return rc; 2058 } 2059 2060 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, 2061 struct cq_cutoff *hwcqe) 2062 { 2063 /* Check the Status */ 2064 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) { 2065 dev_err(&cq->hwq.pdev->dev, 2066 "QPLIB: FP: CQ Process Cutoff Error status = 0x%x", 2067 hwcqe->status); 2068 return -EINVAL; 2069 } 2070 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags); 2071 wake_up_interruptible(&cq->waitq); 2072 2073 return 0; 2074 } 2075 2076 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 2077 int num_cqes) 2078 { 2079 struct cq_base *hw_cqe, **hw_cqe_ptr; 2080 unsigned long flags; 2081 u32 sw_cons, raw_cons; 2082 int budget, rc = 0; 2083 2084 spin_lock_irqsave(&cq->hwq.lock, flags); 2085 raw_cons = cq->hwq.cons; 2086 budget = num_cqes; 2087 2088 while (budget) { 2089 sw_cons = HWQ_CMP(raw_cons, &cq->hwq); 2090 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; 2091 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)]; 2092 2093 /* Check for Valid bit */ 2094 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements)) 2095 break; 2096 2097 /* From the device's respective CQE format to qplib_wc*/ 2098 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) { 2099 case CQ_BASE_CQE_TYPE_REQ: 2100 rc = bnxt_qplib_cq_process_req(cq, 2101 (struct cq_req *)hw_cqe, 2102 &cqe, &budget); 2103 break; 2104 case CQ_BASE_CQE_TYPE_RES_RC: 2105 rc = bnxt_qplib_cq_process_res_rc(cq, 2106 (struct cq_res_rc *) 2107 hw_cqe, &cqe, 2108 &budget); 2109 break; 2110 case CQ_BASE_CQE_TYPE_RES_UD: 2111 rc = bnxt_qplib_cq_process_res_ud 2112 (cq, (struct cq_res_ud *)hw_cqe, &cqe, 2113 &budget); 2114 break; 2115 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: 2116 rc = bnxt_qplib_cq_process_res_raweth_qp1 2117 (cq, (struct cq_res_raweth_qp1 *) 2118 hw_cqe, &cqe, &budget); 2119 break; 2120 case CQ_BASE_CQE_TYPE_TERMINAL: 2121 rc = bnxt_qplib_cq_process_terminal 2122 (cq, (struct cq_terminal *)hw_cqe, 2123 &cqe, &budget); 2124 break; 2125 case CQ_BASE_CQE_TYPE_CUT_OFF: 2126 bnxt_qplib_cq_process_cutoff 2127 (cq, (struct cq_cutoff *)hw_cqe); 2128 /* Done processing this CQ */ 2129 goto exit; 2130 default: 2131 dev_err(&cq->hwq.pdev->dev, 2132 "QPLIB: process_cq unknown type 0x%lx", 2133 hw_cqe->cqe_type_toggle & 2134 CQ_BASE_CQE_TYPE_MASK); 2135 rc = -EINVAL; 2136 break; 2137 } 2138 if (rc < 0) { 2139 if (rc == -EAGAIN) 2140 break; 2141 /* Error while processing the CQE, just skip to the 2142 * next one 2143 */ 2144 dev_err(&cq->hwq.pdev->dev, 2145 "QPLIB: process_cqe error rc = 0x%x", rc); 2146 } 2147 raw_cons++; 2148 } 2149 if (cq->hwq.cons != raw_cons) { 2150 cq->hwq.cons = raw_cons; 2151 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); 2152 } 2153 exit: 2154 spin_unlock_irqrestore(&cq->hwq.lock, flags); 2155 return num_cqes - budget; 2156 } 2157 2158 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) 2159 { 2160 unsigned long flags; 2161 2162 spin_lock_irqsave(&cq->hwq.lock, flags); 2163 if (arm_type) 2164 bnxt_qplib_arm_cq(cq, arm_type); 2165 2166 spin_unlock_irqrestore(&cq->hwq.lock, flags); 2167 } 2168