1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Main component of the bnxt_re driver 37 */ 38 39 #include <linux/module.h> 40 #include <linux/netdevice.h> 41 #include <linux/ethtool.h> 42 #include <linux/mutex.h> 43 #include <linux/list.h> 44 #include <linux/rculist.h> 45 #include <linux/spinlock.h> 46 #include <linux/pci.h> 47 #include <net/dcbnl.h> 48 #include <net/ipv6.h> 49 #include <net/addrconf.h> 50 #include <linux/if_ether.h> 51 #include <linux/auxiliary_bus.h> 52 53 #include <rdma/ib_verbs.h> 54 #include <rdma/ib_user_verbs.h> 55 #include <rdma/ib_umem.h> 56 #include <rdma/ib_addr.h> 57 #include <linux/hashtable.h> 58 59 #include "bnxt_ulp.h" 60 #include "roce_hsi.h" 61 #include "qplib_res.h" 62 #include "qplib_sp.h" 63 #include "qplib_fp.h" 64 #include "qplib_rcfw.h" 65 #include "bnxt_re.h" 66 #include "ib_verbs.h" 67 #include <rdma/bnxt_re-abi.h> 68 #include "bnxt.h" 69 #include "hw_counters.h" 70 #include "debugfs.h" 71 72 static char version[] = 73 BNXT_RE_DESC "\n"; 74 75 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>"); 76 MODULE_DESCRIPTION(BNXT_RE_DESC); 77 MODULE_LICENSE("Dual BSD/GPL"); 78 79 /* globals */ 80 static DEFINE_MUTEX(bnxt_re_mutex); 81 82 static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev); 83 static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev); 84 85 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len, 86 u32 *offset); 87 static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp, 88 u8 port_num, enum ib_event_type event); 89 static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev) 90 { 91 struct bnxt_qplib_chip_ctx *cctx; 92 struct bnxt_en_dev *en_dev; 93 struct bnxt_qplib_res *res; 94 u32 l2db_len = 0; 95 u32 offset = 0; 96 u32 barlen; 97 int rc; 98 99 res = &rdev->qplib_res; 100 en_dev = rdev->en_dev; 101 cctx = rdev->chip_ctx; 102 103 /* Issue qcfg */ 104 rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset); 105 if (rc) 106 dev_info(rdev_to_dev(rdev), 107 "Couldn't get DB bar size, Low latency framework is disabled\n"); 108 /* set register offsets for both UC and WC */ 109 if (bnxt_qplib_is_chip_gen_p7(cctx)) { 110 res->dpi_tbl.ucreg.offset = offset; 111 res->dpi_tbl.wcreg.offset = en_dev->l2_db_size; 112 } else { 113 res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET : 114 BNXT_QPLIB_DBR_PF_DB_OFFSET; 115 res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset; 116 } 117 118 /* If WC mapping is disabled by L2 driver then en_dev->l2_db_size 119 * is equal to the DB-Bar actual size. This indicates that L2 120 * is mapping entire bar as UC-. RoCE driver can't enable WC mapping 121 * in such cases and DB-push will be disabled. 122 */ 123 barlen = pci_resource_len(res->pdev, RCFW_DBR_PCI_BAR_REGION); 124 if (cctx->modes.db_push && l2db_len && en_dev->l2_db_size != barlen) { 125 res->dpi_tbl.wcreg.offset = en_dev->l2_db_size; 126 dev_info(rdev_to_dev(rdev), "Low latency framework is enabled\n"); 127 } 128 } 129 130 static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev) 131 { 132 struct bnxt_qplib_chip_ctx *cctx; 133 134 cctx = rdev->chip_ctx; 135 cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? 136 BNXT_QPLIB_WQE_MODE_VARIABLE : BNXT_QPLIB_WQE_MODE_STATIC; 137 if (bnxt_re_hwrm_qcaps(rdev)) 138 dev_err(rdev_to_dev(rdev), 139 "Failed to query hwrm qcaps\n"); 140 if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) { 141 cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT; 142 cctx->modes.toggle_bits |= BNXT_QPLIB_SRQ_TOGGLE_BIT; 143 } 144 } 145 146 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) 147 { 148 struct bnxt_qplib_chip_ctx *chip_ctx; 149 150 if (!rdev->chip_ctx) 151 return; 152 153 kfree(rdev->dev_attr); 154 rdev->dev_attr = NULL; 155 156 chip_ctx = rdev->chip_ctx; 157 rdev->chip_ctx = NULL; 158 rdev->rcfw.res = NULL; 159 rdev->qplib_res.cctx = NULL; 160 rdev->qplib_res.pdev = NULL; 161 rdev->qplib_res.netdev = NULL; 162 kfree(chip_ctx); 163 } 164 165 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) 166 { 167 struct bnxt_qplib_chip_ctx *chip_ctx; 168 struct bnxt_en_dev *en_dev; 169 int rc = -ENOMEM; 170 171 en_dev = rdev->en_dev; 172 173 rdev->qplib_res.pdev = en_dev->pdev; 174 chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL); 175 if (!chip_ctx) 176 return -ENOMEM; 177 chip_ctx->chip_num = en_dev->chip_num; 178 chip_ctx->hw_stats_size = en_dev->hw_ring_stats_size; 179 180 rdev->chip_ctx = chip_ctx; 181 /* rest members to follow eventually */ 182 183 rdev->qplib_res.cctx = rdev->chip_ctx; 184 rdev->rcfw.res = &rdev->qplib_res; 185 rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL); 186 if (!rdev->dev_attr) 187 goto free_chip_ctx; 188 rdev->qplib_res.dattr = rdev->dev_attr; 189 rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev); 190 rdev->qplib_res.en_dev = en_dev; 191 192 rc = bnxt_re_query_hwrm_intf_version(rdev); 193 if (rc) 194 goto free_dev_attr; 195 196 bnxt_re_set_drv_mode(rdev); 197 198 bnxt_re_set_db_offset(rdev); 199 rc = bnxt_qplib_map_db_bar(&rdev->qplib_res); 200 if (rc) 201 goto free_dev_attr; 202 203 if (bnxt_qplib_determine_atomics(en_dev->pdev)) 204 ibdev_info(&rdev->ibdev, 205 "platform doesn't support global atomics."); 206 return 0; 207 free_dev_attr: 208 kfree(rdev->dev_attr); 209 rdev->dev_attr = NULL; 210 free_chip_ctx: 211 kfree(rdev->chip_ctx); 212 rdev->chip_ctx = NULL; 213 return rc; 214 } 215 216 /* SR-IOV helper functions */ 217 218 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev) 219 { 220 if (BNXT_EN_VF(rdev->en_dev)) 221 rdev->is_virtfn = 1; 222 } 223 224 /* Set the maximum number of each resource that the driver actually wants 225 * to allocate. This may be up to the maximum number the firmware has 226 * reserved for the function. The driver may choose to allocate fewer 227 * resources than the firmware maximum. 228 */ 229 static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) 230 { 231 struct bnxt_qplib_dev_attr *attr; 232 struct bnxt_qplib_ctx *ctx; 233 int i; 234 235 attr = rdev->dev_attr; 236 ctx = &rdev->qplib_ctx; 237 238 ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT, 239 attr->max_qp); 240 ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K; 241 /* Use max_mr from fw since max_mrw does not get set */ 242 ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr); 243 ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT, 244 attr->max_srq); 245 ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq); 246 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) 247 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) 248 rdev->qplib_ctx.tqm_ctx.qcount[i] = 249 rdev->dev_attr->tqm_alloc_reqs[i]; 250 } 251 252 static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf) 253 { 254 struct bnxt_qplib_vf_res *vf_res; 255 u32 mrws = 0; 256 u32 vf_pct; 257 u32 nvfs; 258 259 vf_res = &qplib_ctx->vf_res; 260 /* 261 * Reserve a set of resources for the PF. Divide the remaining 262 * resources among the VFs 263 */ 264 vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF; 265 nvfs = num_vf; 266 num_vf = 100 * num_vf; 267 vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf; 268 vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf; 269 vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf; 270 /* 271 * The driver allows many more MRs than other resources. If the 272 * firmware does also, then reserve a fixed amount for the PF and 273 * divide the rest among VFs. VFs may use many MRs for NFS 274 * mounts, ISER, NVME applications, etc. If the firmware severely 275 * restricts the number of MRs, then let PF have half and divide 276 * the rest among VFs, as for the other resource types. 277 */ 278 if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) { 279 mrws = qplib_ctx->mrw_count * vf_pct; 280 nvfs = num_vf; 281 } else { 282 mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF; 283 } 284 vf_res->max_mrw_per_vf = (mrws / nvfs); 285 vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF; 286 } 287 288 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) 289 { 290 u32 num_vfs; 291 292 memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res)); 293 bnxt_re_limit_pf_res(rdev); 294 295 num_vfs = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ? 296 BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs; 297 if (num_vfs) 298 bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs); 299 } 300 301 static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev) 302 { 303 /* 304 * Use the total VF count since the actual VF count may not be 305 * available at this point. 306 */ 307 rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev); 308 if (!rdev->num_vfs) 309 return; 310 311 bnxt_re_set_resource_limits(rdev); 312 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, 313 &rdev->qplib_ctx); 314 } 315 316 struct bnxt_re_dcb_work { 317 struct work_struct work; 318 struct bnxt_re_dev *rdev; 319 struct hwrm_async_event_cmpl cmpl; 320 }; 321 322 static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp) 323 { 324 return qp->ib_qp.qp_type == IB_QPT_GSI; 325 } 326 327 static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev) 328 { 329 struct bnxt_re_qp *qp; 330 331 mutex_lock(&rdev->qp_lock); 332 list_for_each_entry(qp, &rdev->qp_list, list) { 333 if (bnxt_re_is_qp1_qp(qp)) { 334 mutex_unlock(&rdev->qp_lock); 335 return qp; 336 } 337 } 338 mutex_unlock(&rdev->qp_lock); 339 return NULL; 340 } 341 342 static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev) 343 { 344 struct bnxt_re_qp *qp; 345 346 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) 347 return 0; 348 349 qp = bnxt_re_get_qp1_qp(rdev); 350 if (!qp) 351 return 0; 352 353 qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP; 354 qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp; 355 356 return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); 357 } 358 359 static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev) 360 { 361 rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq"); 362 } 363 364 static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev) 365 { 366 if (!rdev->dcb_wq) 367 return; 368 destroy_workqueue(rdev->dcb_wq); 369 } 370 371 static void bnxt_re_dcb_wq_task(struct work_struct *work) 372 { 373 struct bnxt_re_dcb_work *dcb_work = 374 container_of(work, struct bnxt_re_dcb_work, work); 375 struct bnxt_re_dev *rdev = dcb_work->rdev; 376 struct bnxt_qplib_cc_param *cc_param; 377 int rc; 378 379 if (!rdev) 380 goto free_dcb; 381 382 cc_param = &rdev->cc_param; 383 rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param); 384 if (rc) { 385 ibdev_dbg(&rdev->ibdev, "Failed to query ccparam rc:%d", rc); 386 goto free_dcb; 387 } 388 if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) { 389 cc_param->qp1_tos_dscp = cc_param->tos_dscp; 390 rc = bnxt_re_update_qp1_tos_dscp(rdev); 391 if (rc) { 392 ibdev_dbg(&rdev->ibdev, "%s: Failed to modify QP1 rc:%d", 393 __func__, rc); 394 goto free_dcb; 395 } 396 } 397 398 free_dcb: 399 kfree(dcb_work); 400 } 401 402 static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl) 403 { 404 struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle); 405 struct bnxt_re_dcb_work *dcb_work; 406 struct bnxt_re_dev *rdev; 407 u32 data1, data2; 408 u16 event_id; 409 410 rdev = en_info->rdev; 411 if (!rdev) 412 return; 413 414 event_id = le16_to_cpu(cmpl->event_id); 415 data1 = le32_to_cpu(cmpl->event_data1); 416 data2 = le32_to_cpu(cmpl->event_data2); 417 418 ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d", 419 event_id, data1, data2); 420 421 switch (event_id) { 422 case ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE: 423 dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC); 424 if (!dcb_work) 425 break; 426 427 dcb_work->rdev = rdev; 428 memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl)); 429 INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task); 430 queue_work(rdev->dcb_wq, &dcb_work->work); 431 break; 432 default: 433 break; 434 } 435 } 436 437 static void bnxt_re_stop_irq(void *handle, bool reset) 438 { 439 struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle); 440 struct bnxt_qplib_rcfw *rcfw; 441 struct bnxt_re_dev *rdev; 442 struct bnxt_qplib_nq *nq; 443 int indx; 444 445 rdev = en_info->rdev; 446 if (!rdev) 447 return; 448 rcfw = &rdev->rcfw; 449 450 if (reset) { 451 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); 452 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); 453 wake_up_all(&rdev->rcfw.cmdq.waitq); 454 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 455 IB_EVENT_DEVICE_FATAL); 456 } 457 458 for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) { 459 nq = &rdev->nqr->nq[indx - 1]; 460 bnxt_qplib_nq_stop_irq(nq, false); 461 } 462 463 bnxt_qplib_rcfw_stop_irq(rcfw, false); 464 } 465 466 static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) 467 { 468 struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle); 469 struct bnxt_msix_entry *msix_ent; 470 struct bnxt_qplib_rcfw *rcfw; 471 struct bnxt_re_dev *rdev; 472 struct bnxt_qplib_nq *nq; 473 int indx, rc; 474 475 rdev = en_info->rdev; 476 if (!rdev) 477 return; 478 msix_ent = rdev->nqr->msix_entries; 479 rcfw = &rdev->rcfw; 480 if (!ent) { 481 /* Not setting the f/w timeout bit in rcfw. 482 * During the driver unload the first command 483 * to f/w will timeout and that will set the 484 * timeout bit. 485 */ 486 ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n"); 487 return; 488 } 489 490 /* Vectors may change after restart, so update with new vectors 491 * in device sctructure. 492 */ 493 for (indx = 0; indx < rdev->nqr->num_msix; indx++) 494 rdev->nqr->msix_entries[indx].vector = ent[indx].vector; 495 496 rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, 497 false); 498 if (rc) { 499 ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n"); 500 return; 501 } 502 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->nqr->num_msix; indx++) { 503 nq = &rdev->nqr->nq[indx - 1]; 504 rc = bnxt_qplib_nq_start_irq(nq, indx - 1, 505 msix_ent[indx].vector, false); 506 if (rc) { 507 ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n", 508 indx - 1); 509 return; 510 } 511 } 512 } 513 514 static struct bnxt_ulp_ops bnxt_re_ulp_ops = { 515 .ulp_async_notifier = bnxt_re_async_notifier, 516 .ulp_irq_stop = bnxt_re_stop_irq, 517 .ulp_irq_restart = bnxt_re_start_irq 518 }; 519 520 /* RoCE -> Net driver */ 521 522 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) 523 { 524 struct bnxt_en_dev *en_dev; 525 526 en_dev = rdev->en_dev; 527 return bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev->adev); 528 } 529 530 static void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd) 531 { 532 hdr->req_type = cpu_to_le16(opcd); 533 hdr->cmpl_ring = cpu_to_le16(-1); 534 hdr->target_id = cpu_to_le16(-1); 535 } 536 537 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg, 538 int msg_len, void *resp, int resp_max_len, 539 int timeout) 540 { 541 fw_msg->msg = msg; 542 fw_msg->msg_len = msg_len; 543 fw_msg->resp = resp; 544 fw_msg->resp_max_len = resp_max_len; 545 fw_msg->timeout = timeout; 546 } 547 548 void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev) 549 { 550 struct bnxt_en_dev *en_dev = rdev->en_dev; 551 struct hwrm_vnic_free_input req = {}; 552 struct bnxt_fw_msg fw_msg = {}; 553 int rc; 554 555 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_FREE); 556 557 req.vnic_id = cpu_to_le32(rdev->mirror_vnic_id); 558 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL, 559 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 560 rc = bnxt_send_msg(en_dev, &fw_msg); 561 if (rc) 562 ibdev_dbg(&rdev->ibdev, 563 "Failed to free vnic, rc = %d\n", rc); 564 } 565 566 int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev) 567 { 568 struct bnxt_en_dev *en_dev = rdev->en_dev; 569 struct hwrm_vnic_alloc_output resp = {}; 570 struct hwrm_vnic_alloc_input req = {}; 571 struct bnxt_fw_msg fw_msg = {}; 572 int rc; 573 574 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_ALLOC); 575 576 req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id); 577 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID); 578 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 579 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 580 rc = bnxt_send_msg(en_dev, &fw_msg); 581 if (rc) 582 ibdev_dbg(&rdev->ibdev, 583 "Failed to alloc vnic, rc = %d\n", rc); 584 585 return rc; 586 } 587 588 int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id) 589 { 590 struct bnxt_en_dev *en_dev = rdev->en_dev; 591 struct hwrm_vnic_cfg_input req = {}; 592 struct bnxt_fw_msg fw_msg = {}; 593 int rc; 594 595 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_CFG); 596 597 req.flags = cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE); 598 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_RAW_QP_ID | 599 VNIC_CFG_REQ_ENABLES_MRU); 600 req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id); 601 req.raw_qp_id = cpu_to_le32(qp_id); 602 req.mru = cpu_to_le16(rdev->netdev->mtu + VLAN_ETH_HLEN); 603 604 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL, 605 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 606 rc = bnxt_send_msg(en_dev, &fw_msg); 607 if (rc) 608 ibdev_dbg(&rdev->ibdev, 609 "Failed to cfg vnic, rc = %d\n", rc); 610 611 return rc; 612 } 613 614 /* Query device config using common hwrm */ 615 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len, 616 u32 *offset) 617 { 618 struct bnxt_en_dev *en_dev = rdev->en_dev; 619 struct hwrm_func_qcfg_output resp = {0}; 620 struct hwrm_func_qcfg_input req = {0}; 621 struct bnxt_fw_msg fw_msg = {}; 622 int rc; 623 624 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG); 625 req.fid = cpu_to_le16(0xffff); 626 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 627 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 628 rc = bnxt_send_msg(en_dev, &fw_msg); 629 if (!rc) { 630 *db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024); 631 *offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024); 632 rdev->mirror_vnic_id = le16_to_cpu(resp.mirror_vnic_id); 633 } 634 return rc; 635 } 636 637 /* Query function capabilities using common hwrm */ 638 int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev) 639 { 640 struct bnxt_en_dev *en_dev = rdev->en_dev; 641 struct hwrm_func_qcaps_output resp = {}; 642 struct hwrm_func_qcaps_input req = {}; 643 struct bnxt_qplib_chip_ctx *cctx; 644 struct bnxt_fw_msg fw_msg = {}; 645 u32 flags_ext2; 646 int rc; 647 648 cctx = rdev->chip_ctx; 649 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS); 650 req.fid = cpu_to_le16(0xffff); 651 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 652 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 653 654 rc = bnxt_send_msg(en_dev, &fw_msg); 655 if (rc) 656 return rc; 657 cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE; 658 659 flags_ext2 = le32_to_cpu(resp.flags_ext2); 660 cctx->modes.dbr_pacing = flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED || 661 flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED; 662 cctx->modes.roce_mirror = !!(le32_to_cpu(resp.flags_ext3) & 663 FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED); 664 return 0; 665 } 666 667 static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev) 668 { 669 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; 670 struct hwrm_func_dbr_pacing_qcfg_output resp = {}; 671 struct hwrm_func_dbr_pacing_qcfg_input req = {}; 672 struct bnxt_en_dev *en_dev = rdev->en_dev; 673 struct bnxt_qplib_chip_ctx *cctx; 674 struct bnxt_fw_msg fw_msg = {}; 675 int rc; 676 677 cctx = rdev->chip_ctx; 678 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG); 679 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 680 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 681 rc = bnxt_send_msg(en_dev, &fw_msg); 682 if (rc) 683 return rc; 684 685 if ((le32_to_cpu(resp.dbr_stat_db_fifo_reg) & 686 FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK) == 687 FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC) 688 cctx->dbr_stat_db_fifo = 689 le32_to_cpu(resp.dbr_stat_db_fifo_reg) & 690 ~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK; 691 692 pacing_data->fifo_max_depth = le32_to_cpu(resp.dbr_stat_db_max_fifo_depth); 693 if (!pacing_data->fifo_max_depth) 694 pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH(cctx); 695 pacing_data->fifo_room_mask = le32_to_cpu(resp.dbr_stat_db_fifo_reg_fifo_room_mask); 696 pacing_data->fifo_room_shift = resp.dbr_stat_db_fifo_reg_fifo_room_shift; 697 698 return 0; 699 } 700 701 /* Update the pacing tunable parameters to the default values */ 702 static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev) 703 { 704 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; 705 706 pacing_data->do_pacing = rdev->pacing.dbr_def_do_pacing; 707 pacing_data->pacing_th = rdev->pacing.pacing_algo_th; 708 pacing_data->alarm_th = 709 pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE; 710 } 711 712 static u32 __get_fifo_occupancy(struct bnxt_re_dev *rdev) 713 { 714 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; 715 u32 read_val, fifo_occup; 716 717 read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off); 718 fifo_occup = pacing_data->fifo_max_depth - 719 ((read_val & pacing_data->fifo_room_mask) >> 720 pacing_data->fifo_room_shift); 721 return fifo_occup; 722 } 723 724 static bool is_dbr_fifo_full(struct bnxt_re_dev *rdev) 725 { 726 u32 max_occup, fifo_occup; 727 728 fifo_occup = __get_fifo_occupancy(rdev); 729 max_occup = BNXT_RE_MAX_FIFO_DEPTH(rdev->chip_ctx) - 1; 730 if (fifo_occup == max_occup) 731 return true; 732 733 return false; 734 } 735 736 static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev) 737 { 738 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data; 739 u32 retry_fifo_check = 1000; 740 u32 fifo_occup; 741 742 /* loop shouldn't run infintely as the occupancy usually goes 743 * below pacing algo threshold as soon as pacing kicks in. 744 */ 745 while (1) { 746 fifo_occup = __get_fifo_occupancy(rdev); 747 /* Fifo occupancy cannot be greater the MAX FIFO depth */ 748 if (fifo_occup > pacing_data->fifo_max_depth) 749 break; 750 751 if (fifo_occup < pacing_data->pacing_th) 752 break; 753 if (!retry_fifo_check--) { 754 dev_info_once(rdev_to_dev(rdev), 755 "%s: fifo_occup = 0x%xfifo_max_depth = 0x%x pacing_th = 0x%x\n", 756 __func__, fifo_occup, pacing_data->fifo_max_depth, 757 pacing_data->pacing_th); 758 break; 759 } 760 761 } 762 } 763 764 static void bnxt_re_db_fifo_check(struct work_struct *work) 765 { 766 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, 767 dbq_fifo_check_work); 768 struct bnxt_qplib_db_pacing_data *pacing_data; 769 u32 pacing_save; 770 771 if (!mutex_trylock(&rdev->pacing.dbq_lock)) 772 return; 773 pacing_data = rdev->qplib_res.pacing_data; 774 pacing_save = rdev->pacing.do_pacing_save; 775 __wait_for_fifo_occupancy_below_th(rdev); 776 cancel_delayed_work_sync(&rdev->dbq_pacing_work); 777 if (pacing_save > rdev->pacing.dbr_def_do_pacing) { 778 /* Double the do_pacing value during the congestion */ 779 pacing_save = pacing_save << 1; 780 } else { 781 /* 782 * when a new congestion is detected increase the do_pacing 783 * by 8 times. And also increase the pacing_th by 4 times. The 784 * reason to increase pacing_th is to give more space for the 785 * queue to oscillate down without getting empty, but also more 786 * room for the queue to increase without causing another alarm. 787 */ 788 pacing_save = pacing_save << 3; 789 pacing_data->pacing_th = rdev->pacing.pacing_algo_th * 4; 790 } 791 792 if (pacing_save > BNXT_RE_MAX_DBR_DO_PACING) 793 pacing_save = BNXT_RE_MAX_DBR_DO_PACING; 794 795 pacing_data->do_pacing = pacing_save; 796 rdev->pacing.do_pacing_save = pacing_data->do_pacing; 797 pacing_data->alarm_th = 798 pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE; 799 schedule_delayed_work(&rdev->dbq_pacing_work, 800 msecs_to_jiffies(rdev->pacing.dbq_pacing_time)); 801 rdev->stats.pacing.alerts++; 802 mutex_unlock(&rdev->pacing.dbq_lock); 803 } 804 805 static void bnxt_re_pacing_timer_exp(struct work_struct *work) 806 { 807 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, 808 dbq_pacing_work.work); 809 struct bnxt_qplib_db_pacing_data *pacing_data; 810 u32 fifo_occup; 811 812 if (!mutex_trylock(&rdev->pacing.dbq_lock)) 813 return; 814 815 pacing_data = rdev->qplib_res.pacing_data; 816 fifo_occup = __get_fifo_occupancy(rdev); 817 818 if (fifo_occup > pacing_data->pacing_th) 819 goto restart_timer; 820 821 /* 822 * Instead of immediately going back to the default do_pacing 823 * reduce it by 1/8 times and restart the timer. 824 */ 825 pacing_data->do_pacing = pacing_data->do_pacing - (pacing_data->do_pacing >> 3); 826 pacing_data->do_pacing = max_t(u32, rdev->pacing.dbr_def_do_pacing, pacing_data->do_pacing); 827 if (pacing_data->do_pacing <= rdev->pacing.dbr_def_do_pacing) { 828 bnxt_re_set_default_pacing_data(rdev); 829 rdev->stats.pacing.complete++; 830 goto dbq_unlock; 831 } 832 833 restart_timer: 834 schedule_delayed_work(&rdev->dbq_pacing_work, 835 msecs_to_jiffies(rdev->pacing.dbq_pacing_time)); 836 rdev->stats.pacing.resched++; 837 dbq_unlock: 838 rdev->pacing.do_pacing_save = pacing_data->do_pacing; 839 mutex_unlock(&rdev->pacing.dbq_lock); 840 } 841 842 void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev) 843 { 844 struct bnxt_qplib_db_pacing_data *pacing_data; 845 846 if (!rdev->pacing.dbr_pacing) 847 return; 848 mutex_lock(&rdev->pacing.dbq_lock); 849 pacing_data = rdev->qplib_res.pacing_data; 850 851 /* 852 * Increase the alarm_th to max so that other user lib instances do not 853 * keep alerting the driver. 854 */ 855 pacing_data->alarm_th = pacing_data->fifo_max_depth; 856 pacing_data->do_pacing = BNXT_RE_MAX_DBR_DO_PACING; 857 cancel_work_sync(&rdev->dbq_fifo_check_work); 858 schedule_work(&rdev->dbq_fifo_check_work); 859 mutex_unlock(&rdev->pacing.dbq_lock); 860 } 861 862 static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev) 863 { 864 /* Allocate a page for app use */ 865 rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL); 866 if (!rdev->pacing.dbr_page) 867 return -ENOMEM; 868 869 memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE); 870 rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->pacing.dbr_page; 871 872 if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev)) { 873 free_page((u64)rdev->pacing.dbr_page); 874 rdev->pacing.dbr_page = NULL; 875 return -EIO; 876 } 877 878 /* MAP HW window 2 for reading db fifo depth */ 879 writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK, 880 rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 881 rdev->pacing.dbr_db_fifo_reg_off = 882 (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) + 883 BNXT_RE_GRC_FIFO_REG_BASE; 884 rdev->pacing.dbr_bar_addr = 885 pci_resource_start(rdev->qplib_res.pdev, 0) + rdev->pacing.dbr_db_fifo_reg_off; 886 887 if (is_dbr_fifo_full(rdev)) { 888 free_page((u64)rdev->pacing.dbr_page); 889 rdev->pacing.dbr_page = NULL; 890 return -EIO; 891 } 892 893 rdev->pacing.pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD; 894 rdev->pacing.dbq_pacing_time = BNXT_RE_DBR_PACING_TIME; 895 rdev->pacing.dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION; 896 rdev->pacing.do_pacing_save = rdev->pacing.dbr_def_do_pacing; 897 rdev->qplib_res.pacing_data->grc_reg_offset = rdev->pacing.dbr_db_fifo_reg_off; 898 bnxt_re_set_default_pacing_data(rdev); 899 /* Initialize worker for DBR Pacing */ 900 INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check); 901 INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp); 902 return 0; 903 } 904 905 static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev) 906 { 907 cancel_work_sync(&rdev->dbq_fifo_check_work); 908 cancel_delayed_work_sync(&rdev->dbq_pacing_work); 909 if (rdev->pacing.dbr_page) 910 free_page((u64)rdev->pacing.dbr_page); 911 912 rdev->pacing.dbr_page = NULL; 913 rdev->pacing.dbr_pacing = false; 914 } 915 916 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, 917 u16 fw_ring_id, int type) 918 { 919 struct bnxt_en_dev *en_dev = rdev->en_dev; 920 struct hwrm_ring_free_input req = {}; 921 struct hwrm_ring_free_output resp; 922 struct bnxt_fw_msg fw_msg = {}; 923 int rc = -EINVAL; 924 925 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) 926 return 0; 927 928 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE); 929 req.ring_type = type; 930 req.ring_id = cpu_to_le16(fw_ring_id); 931 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 932 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 933 rc = bnxt_send_msg(en_dev, &fw_msg); 934 if (rc) 935 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x", 936 req.ring_id, rc); 937 return rc; 938 } 939 940 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, 941 struct bnxt_re_ring_attr *ring_attr, 942 u16 *fw_ring_id) 943 { 944 struct bnxt_en_dev *en_dev = rdev->en_dev; 945 struct hwrm_ring_alloc_input req = {}; 946 struct hwrm_ring_alloc_output resp; 947 struct bnxt_fw_msg fw_msg = {}; 948 int rc = -EINVAL; 949 950 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC); 951 req.enables = 0; 952 req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]); 953 if (ring_attr->pages > 1) { 954 /* Page size is in log2 units */ 955 req.page_size = BNXT_PAGE_SHIFT; 956 req.page_tbl_depth = 1; 957 } 958 req.fbo = 0; 959 /* Association of ring index with doorbell index and MSIX number */ 960 req.logical_id = cpu_to_le16(ring_attr->lrid); 961 req.length = cpu_to_le32(ring_attr->depth + 1); 962 req.ring_type = ring_attr->type; 963 req.int_mode = ring_attr->mode; 964 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 965 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 966 rc = bnxt_send_msg(en_dev, &fw_msg); 967 if (!rc) 968 *fw_ring_id = le16_to_cpu(resp.ring_id); 969 970 return rc; 971 } 972 973 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, 974 u32 fw_stats_ctx_id) 975 { 976 struct bnxt_en_dev *en_dev = rdev->en_dev; 977 struct hwrm_stat_ctx_free_input req = {}; 978 struct hwrm_stat_ctx_free_output resp = {}; 979 struct bnxt_fw_msg fw_msg = {}; 980 int rc = -EINVAL; 981 982 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) 983 return 0; 984 985 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE); 986 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); 987 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 988 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 989 rc = bnxt_send_msg(en_dev, &fw_msg); 990 if (rc) 991 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x", 992 rc); 993 994 return rc; 995 } 996 997 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, 998 struct bnxt_qplib_stats *stats) 999 { 1000 struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx; 1001 struct hwrm_stat_ctx_alloc_output resp = {}; 1002 struct hwrm_stat_ctx_alloc_input req = {}; 1003 struct bnxt_en_dev *en_dev = rdev->en_dev; 1004 struct bnxt_fw_msg fw_msg = {}; 1005 int rc = -EINVAL; 1006 1007 stats->fw_id = INVALID_STATS_CTX_ID; 1008 1009 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC); 1010 req.update_period_ms = cpu_to_le32(1000); 1011 req.stats_dma_addr = cpu_to_le64(stats->dma_map); 1012 req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size); 1013 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; 1014 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 1015 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); 1016 rc = bnxt_send_msg(en_dev, &fw_msg); 1017 if (!rc) 1018 stats->fw_id = le32_to_cpu(resp.stat_ctx_id); 1019 1020 return rc; 1021 } 1022 1023 static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext) 1024 { 1025 } 1026 1027 /* Device */ 1028 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, 1029 char *buf) 1030 { 1031 struct bnxt_re_dev *rdev = 1032 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); 1033 1034 return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->revision); 1035 } 1036 static DEVICE_ATTR_RO(hw_rev); 1037 1038 static ssize_t hca_type_show(struct device *device, 1039 struct device_attribute *attr, char *buf) 1040 { 1041 struct bnxt_re_dev *rdev = 1042 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); 1043 1044 return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->device); 1045 } 1046 static DEVICE_ATTR_RO(hca_type); 1047 1048 static ssize_t board_id_show(struct device *device, struct device_attribute *attr, 1049 char *buf) 1050 { 1051 struct bnxt_re_dev *rdev = rdma_device_to_drv_device(device, 1052 struct bnxt_re_dev, ibdev); 1053 char buffer[BNXT_VPD_FLD_LEN] = {}; 1054 1055 if (!rdev->is_virtfn) 1056 memcpy(buffer, rdev->board_partno, BNXT_VPD_FLD_LEN - 1); 1057 else 1058 scnprintf(buffer, BNXT_VPD_FLD_LEN, "0x%x-VF", 1059 rdev->en_dev->pdev->device); 1060 1061 return sysfs_emit(buf, "%s\n", buffer); 1062 } 1063 static DEVICE_ATTR_RO(board_id); 1064 1065 static struct attribute *bnxt_re_attributes[] = { 1066 &dev_attr_hw_rev.attr, 1067 &dev_attr_hca_type.attr, 1068 &dev_attr_board_id.attr, 1069 NULL 1070 }; 1071 1072 static const struct attribute_group bnxt_re_dev_attr_group = { 1073 .attrs = bnxt_re_attributes, 1074 }; 1075 1076 static int bnxt_re_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr) 1077 { 1078 struct bnxt_qplib_hwq *mr_hwq; 1079 struct nlattr *table_attr; 1080 struct bnxt_re_mr *mr; 1081 1082 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 1083 if (!table_attr) 1084 return -EMSGSIZE; 1085 1086 mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); 1087 mr_hwq = &mr->qplib_mr.hwq; 1088 1089 if (rdma_nl_put_driver_u32(msg, "page_size", 1090 mr_hwq->qe_ppg * mr_hwq->element_size)) 1091 goto err; 1092 if (rdma_nl_put_driver_u32(msg, "max_elements", mr_hwq->max_elements)) 1093 goto err; 1094 if (rdma_nl_put_driver_u32(msg, "element_size", mr_hwq->element_size)) 1095 goto err; 1096 if (rdma_nl_put_driver_u64_hex(msg, "hwq", (unsigned long)mr_hwq)) 1097 goto err; 1098 if (rdma_nl_put_driver_u64_hex(msg, "va", mr->qplib_mr.va)) 1099 goto err; 1100 1101 nla_nest_end(msg, table_attr); 1102 return 0; 1103 1104 err: 1105 nla_nest_cancel(msg, table_attr); 1106 return -EMSGSIZE; 1107 } 1108 1109 static int bnxt_re_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr) 1110 { 1111 struct bnxt_re_dev *rdev; 1112 struct bnxt_re_mr *mr; 1113 int err, len; 1114 void *data; 1115 1116 mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); 1117 rdev = mr->rdev; 1118 1119 err = bnxt_re_read_context_allowed(rdev); 1120 if (err) 1121 return err; 1122 1123 len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 : 1124 BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P5; 1125 data = kzalloc(len, GFP_KERNEL); 1126 if (!data) 1127 return -ENOMEM; 1128 1129 err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_MRW, 1130 mr->qplib_mr.lkey, len, data); 1131 if (!err) 1132 err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data); 1133 1134 kfree(data); 1135 return err; 1136 } 1137 1138 static int bnxt_re_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq) 1139 { 1140 struct bnxt_qplib_hwq *cq_hwq; 1141 struct nlattr *table_attr; 1142 struct bnxt_re_cq *cq; 1143 1144 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 1145 cq_hwq = &cq->qplib_cq.hwq; 1146 1147 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 1148 if (!table_attr) 1149 return -EMSGSIZE; 1150 1151 if (rdma_nl_put_driver_u32(msg, "cq_depth", cq_hwq->depth)) 1152 goto err; 1153 if (rdma_nl_put_driver_u32(msg, "max_elements", cq_hwq->max_elements)) 1154 goto err; 1155 if (rdma_nl_put_driver_u32(msg, "element_size", cq_hwq->element_size)) 1156 goto err; 1157 if (rdma_nl_put_driver_u32(msg, "max_wqe", cq->qplib_cq.max_wqe)) 1158 goto err; 1159 1160 nla_nest_end(msg, table_attr); 1161 return 0; 1162 1163 err: 1164 nla_nest_cancel(msg, table_attr); 1165 return -EMSGSIZE; 1166 } 1167 1168 static int bnxt_re_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq) 1169 { 1170 struct bnxt_re_dev *rdev; 1171 struct bnxt_re_cq *cq; 1172 int err, len; 1173 void *data; 1174 1175 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 1176 rdev = cq->rdev; 1177 1178 err = bnxt_re_read_context_allowed(rdev); 1179 if (err) 1180 return err; 1181 1182 len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P7 : 1183 BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P5; 1184 data = kzalloc(len, GFP_KERNEL); 1185 if (!data) 1186 return -ENOMEM; 1187 1188 err = bnxt_qplib_read_context(&rdev->rcfw, 1189 CMDQ_READ_CONTEXT_TYPE_CQ, 1190 cq->qplib_cq.id, len, data); 1191 if (!err) 1192 err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data); 1193 1194 kfree(data); 1195 return err; 1196 } 1197 1198 static int bnxt_re_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp) 1199 { 1200 struct bnxt_qplib_qp *qplib_qp; 1201 struct nlattr *table_attr; 1202 struct bnxt_re_qp *qp; 1203 1204 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 1205 if (!table_attr) 1206 return -EMSGSIZE; 1207 1208 qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 1209 qplib_qp = &qp->qplib_qp; 1210 1211 if (rdma_nl_put_driver_u32(msg, "sq_max_wqe", qplib_qp->sq.max_wqe)) 1212 goto err; 1213 if (rdma_nl_put_driver_u32(msg, "sq_max_sge", qplib_qp->sq.max_sge)) 1214 goto err; 1215 if (rdma_nl_put_driver_u32(msg, "sq_wqe_size", qplib_qp->sq.wqe_size)) 1216 goto err; 1217 if (rdma_nl_put_driver_u32(msg, "sq_swq_start", qplib_qp->sq.swq_start)) 1218 goto err; 1219 if (rdma_nl_put_driver_u32(msg, "sq_swq_last", qplib_qp->sq.swq_last)) 1220 goto err; 1221 if (rdma_nl_put_driver_u32(msg, "rq_max_wqe", qplib_qp->rq.max_wqe)) 1222 goto err; 1223 if (rdma_nl_put_driver_u32(msg, "rq_max_sge", qplib_qp->rq.max_sge)) 1224 goto err; 1225 if (rdma_nl_put_driver_u32(msg, "rq_wqe_size", qplib_qp->rq.wqe_size)) 1226 goto err; 1227 if (rdma_nl_put_driver_u32(msg, "rq_swq_start", qplib_qp->rq.swq_start)) 1228 goto err; 1229 if (rdma_nl_put_driver_u32(msg, "rq_swq_last", qplib_qp->rq.swq_last)) 1230 goto err; 1231 if (rdma_nl_put_driver_u32(msg, "timeout", qplib_qp->timeout)) 1232 goto err; 1233 1234 nla_nest_end(msg, table_attr); 1235 return 0; 1236 1237 err: 1238 nla_nest_cancel(msg, table_attr); 1239 return -EMSGSIZE; 1240 } 1241 1242 static int bnxt_re_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp) 1243 { 1244 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibqp->device, ibdev); 1245 int err, len; 1246 void *data; 1247 1248 err = bnxt_re_read_context_allowed(rdev); 1249 if (err) 1250 return err; 1251 1252 len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P7 : 1253 BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P5; 1254 data = kzalloc(len, GFP_KERNEL); 1255 if (!data) 1256 return -ENOMEM; 1257 1258 err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_QPC, 1259 ibqp->qp_num, len, data); 1260 if (!err) 1261 err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data); 1262 1263 kfree(data); 1264 return err; 1265 } 1266 1267 static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq) 1268 { 1269 struct nlattr *table_attr; 1270 struct bnxt_re_srq *srq; 1271 1272 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 1273 if (!table_attr) 1274 return -EMSGSIZE; 1275 1276 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); 1277 1278 if (rdma_nl_put_driver_u32_hex(msg, "wqe_size", srq->qplib_srq.wqe_size)) 1279 goto err; 1280 if (rdma_nl_put_driver_u32_hex(msg, "max_wqe", srq->qplib_srq.max_wqe)) 1281 goto err; 1282 if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge)) 1283 goto err; 1284 if (rdma_nl_put_driver_u32_hex(msg, "srq_limit", srq->qplib_srq.threshold)) 1285 goto err; 1286 1287 nla_nest_end(msg, table_attr); 1288 return 0; 1289 1290 err: 1291 nla_nest_cancel(msg, table_attr); 1292 return -EMSGSIZE; 1293 } 1294 1295 static int bnxt_re_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq) 1296 { 1297 struct bnxt_re_dev *rdev; 1298 struct bnxt_re_srq *srq; 1299 int err, len; 1300 void *data; 1301 1302 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); 1303 rdev = srq->rdev; 1304 1305 err = bnxt_re_read_context_allowed(rdev); 1306 if (err) 1307 return err; 1308 1309 len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 : 1310 BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P5; 1311 1312 data = kzalloc(len, GFP_KERNEL); 1313 if (!data) 1314 return -ENOMEM; 1315 1316 err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_SRQ, 1317 srq->qplib_srq.id, len, data); 1318 if (!err) 1319 err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data); 1320 1321 kfree(data); 1322 return err; 1323 } 1324 1325 static const struct ib_device_ops bnxt_re_dev_ops = { 1326 .owner = THIS_MODULE, 1327 .driver_id = RDMA_DRIVER_BNXT_RE, 1328 .uverbs_abi_ver = BNXT_RE_ABI_VERSION, 1329 1330 .add_gid = bnxt_re_add_gid, 1331 .alloc_hw_port_stats = bnxt_re_ib_alloc_hw_port_stats, 1332 .alloc_mr = bnxt_re_alloc_mr, 1333 .alloc_pd = bnxt_re_alloc_pd, 1334 .alloc_ucontext = bnxt_re_alloc_ucontext, 1335 .create_ah = bnxt_re_create_ah, 1336 .create_cq = bnxt_re_create_cq, 1337 .create_qp = bnxt_re_create_qp, 1338 .create_srq = bnxt_re_create_srq, 1339 .create_user_ah = bnxt_re_create_ah, 1340 .dealloc_pd = bnxt_re_dealloc_pd, 1341 .dealloc_ucontext = bnxt_re_dealloc_ucontext, 1342 .del_gid = bnxt_re_del_gid, 1343 .dereg_mr = bnxt_re_dereg_mr, 1344 .destroy_ah = bnxt_re_destroy_ah, 1345 .destroy_cq = bnxt_re_destroy_cq, 1346 .destroy_qp = bnxt_re_destroy_qp, 1347 .destroy_srq = bnxt_re_destroy_srq, 1348 .device_group = &bnxt_re_dev_attr_group, 1349 .disassociate_ucontext = bnxt_re_disassociate_ucontext, 1350 .get_dev_fw_str = bnxt_re_query_fw_str, 1351 .get_dma_mr = bnxt_re_get_dma_mr, 1352 .get_hw_stats = bnxt_re_ib_get_hw_stats, 1353 .get_link_layer = bnxt_re_get_link_layer, 1354 .get_port_immutable = bnxt_re_get_port_immutable, 1355 .map_mr_sg = bnxt_re_map_mr_sg, 1356 .mmap = bnxt_re_mmap, 1357 .mmap_free = bnxt_re_mmap_free, 1358 .modify_qp = bnxt_re_modify_qp, 1359 .modify_srq = bnxt_re_modify_srq, 1360 .poll_cq = bnxt_re_poll_cq, 1361 .post_recv = bnxt_re_post_recv, 1362 .post_send = bnxt_re_post_send, 1363 .post_srq_recv = bnxt_re_post_srq_recv, 1364 .process_mad = bnxt_re_process_mad, 1365 .query_ah = bnxt_re_query_ah, 1366 .query_device = bnxt_re_query_device, 1367 .modify_device = bnxt_re_modify_device, 1368 .query_pkey = bnxt_re_query_pkey, 1369 .query_port = bnxt_re_query_port, 1370 .query_qp = bnxt_re_query_qp, 1371 .query_srq = bnxt_re_query_srq, 1372 .reg_user_mr = bnxt_re_reg_user_mr, 1373 .reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf, 1374 .req_notify_cq = bnxt_re_req_notify_cq, 1375 .resize_cq = bnxt_re_resize_cq, 1376 .create_flow = bnxt_re_create_flow, 1377 .destroy_flow = bnxt_re_destroy_flow, 1378 INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah), 1379 INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq), 1380 INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), 1381 INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp), 1382 INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq), 1383 INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx), 1384 }; 1385 1386 static const struct ib_device_ops restrack_ops = { 1387 .fill_res_cq_entry = bnxt_re_fill_res_cq_entry, 1388 .fill_res_cq_entry_raw = bnxt_re_fill_res_cq_entry_raw, 1389 .fill_res_qp_entry = bnxt_re_fill_res_qp_entry, 1390 .fill_res_qp_entry_raw = bnxt_re_fill_res_qp_entry_raw, 1391 .fill_res_mr_entry = bnxt_re_fill_res_mr_entry, 1392 .fill_res_mr_entry_raw = bnxt_re_fill_res_mr_entry_raw, 1393 .fill_res_srq_entry = bnxt_re_fill_res_srq_entry, 1394 .fill_res_srq_entry_raw = bnxt_re_fill_res_srq_entry_raw, 1395 }; 1396 1397 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) 1398 { 1399 struct ib_device *ibdev = &rdev->ibdev; 1400 int ret; 1401 1402 /* ib device init */ 1403 ibdev->node_type = RDMA_NODE_IB_CA; 1404 strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA"); 1405 ibdev->phys_port_cnt = 1; 1406 1407 addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr); 1408 1409 ibdev->num_comp_vectors = rdev->nqr->num_msix - 1; 1410 ibdev->dev.parent = &rdev->en_dev->pdev->dev; 1411 ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY; 1412 1413 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) 1414 ibdev->driver_def = bnxt_re_uapi_defs; 1415 1416 ib_set_device_ops(ibdev, &bnxt_re_dev_ops); 1417 ib_set_device_ops(ibdev, &restrack_ops); 1418 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1); 1419 if (ret) 1420 return ret; 1421 1422 dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX); 1423 ibdev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ); 1424 return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev); 1425 } 1426 1427 static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev, 1428 struct bnxt_en_dev *en_dev) 1429 { 1430 struct bnxt_re_dev *rdev; 1431 1432 /* Allocate bnxt_re_dev instance here */ 1433 rdev = ib_alloc_device(bnxt_re_dev, ibdev); 1434 if (!rdev) { 1435 ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!", 1436 ROCE_DRV_MODULE_NAME); 1437 return NULL; 1438 } 1439 /* Default values */ 1440 rdev->netdev = en_dev->net; 1441 rdev->en_dev = en_dev; 1442 rdev->adev = adev; 1443 rdev->id = rdev->en_dev->pdev->devfn; 1444 INIT_LIST_HEAD(&rdev->qp_list); 1445 mutex_init(&rdev->qp_lock); 1446 mutex_init(&rdev->pacing.dbq_lock); 1447 atomic_set(&rdev->stats.res.qp_count, 0); 1448 atomic_set(&rdev->stats.res.cq_count, 0); 1449 atomic_set(&rdev->stats.res.srq_count, 0); 1450 atomic_set(&rdev->stats.res.mr_count, 0); 1451 atomic_set(&rdev->stats.res.mw_count, 0); 1452 atomic_set(&rdev->stats.res.ah_count, 0); 1453 atomic_set(&rdev->stats.res.pd_count, 0); 1454 rdev->cosq[0] = 0xFFFF; 1455 rdev->cosq[1] = 0xFFFF; 1456 rdev->cq_coalescing.buf_maxtime = BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME; 1457 if (bnxt_re_chip_gen_p7(en_dev->chip_num)) { 1458 rdev->cq_coalescing.normal_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7; 1459 rdev->cq_coalescing.during_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7; 1460 } else { 1461 rdev->cq_coalescing.normal_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5; 1462 rdev->cq_coalescing.during_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5; 1463 } 1464 rdev->cq_coalescing.en_ring_idle_mode = BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE; 1465 1466 return rdev; 1467 } 1468 1469 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event 1470 *unaffi_async) 1471 { 1472 switch (unaffi_async->event) { 1473 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: 1474 break; 1475 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: 1476 break; 1477 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: 1478 break; 1479 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: 1480 break; 1481 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: 1482 break; 1483 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: 1484 break; 1485 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: 1486 break; 1487 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: 1488 break; 1489 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: 1490 break; 1491 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: 1492 break; 1493 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: 1494 break; 1495 default: 1496 return -EINVAL; 1497 } 1498 return 0; 1499 } 1500 1501 static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, 1502 struct bnxt_re_qp *qp) 1503 { 1504 struct creq_qp_error_notification *err_event; 1505 struct bnxt_re_srq *srq = NULL; 1506 struct ib_event event = {}; 1507 unsigned int flags; 1508 1509 if (qp->qplib_qp.srq) 1510 srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq, 1511 qplib_srq); 1512 1513 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && 1514 rdma_is_kernel_res(&qp->ib_qp.res)) { 1515 flags = bnxt_re_lock_cqs(qp); 1516 bnxt_qplib_add_flush_qp(&qp->qplib_qp); 1517 bnxt_re_unlock_cqs(qp, flags); 1518 } 1519 1520 event.device = &qp->rdev->ibdev; 1521 event.element.qp = &qp->ib_qp; 1522 event.event = IB_EVENT_QP_FATAL; 1523 1524 err_event = (struct creq_qp_error_notification *)qp_event; 1525 1526 switch (err_event->req_err_state_reason) { 1527 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR: 1528 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT: 1529 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT: 1530 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2: 1531 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3: 1532 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP: 1533 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND: 1534 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG: 1535 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE: 1536 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR: 1537 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR: 1538 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR: 1539 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR: 1540 event.event = IB_EVENT_QP_ACCESS_ERR; 1541 break; 1542 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1: 1543 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4: 1544 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH: 1545 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR: 1546 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR: 1547 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR: 1548 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR: 1549 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR: 1550 event.event = IB_EVENT_QP_REQ_ERR; 1551 break; 1552 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR: 1553 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR: 1554 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR: 1555 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR: 1556 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR: 1557 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR: 1558 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR: 1559 event.event = IB_EVENT_QP_FATAL; 1560 break; 1561 1562 default: 1563 break; 1564 } 1565 1566 switch (err_event->res_err_state_reason) { 1567 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX: 1568 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH: 1569 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT: 1570 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY: 1571 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR: 1572 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION: 1573 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR: 1574 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY: 1575 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR: 1576 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION: 1577 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR: 1578 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC: 1579 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND: 1580 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY: 1581 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR: 1582 event.event = IB_EVENT_QP_ACCESS_ERR; 1583 break; 1584 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE: 1585 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR: 1586 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE: 1587 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE: 1588 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR: 1589 event.event = IB_EVENT_QP_REQ_ERR; 1590 break; 1591 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW: 1592 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR: 1593 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR: 1594 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR: 1595 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR: 1596 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR: 1597 event.event = IB_EVENT_QP_FATAL; 1598 break; 1599 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR: 1600 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR: 1601 if (srq) 1602 event.event = IB_EVENT_SRQ_ERR; 1603 break; 1604 default: 1605 break; 1606 } 1607 1608 if (err_event->res_err_state_reason || err_event->req_err_state_reason) { 1609 ibdev_dbg(&qp->rdev->ibdev, 1610 "%s %s qp_id: %d cons (%d %d) req (%d %d) res (%d %d)\n", 1611 __func__, rdma_is_kernel_res(&qp->ib_qp.res) ? "kernel" : "user", 1612 qp->qplib_qp.id, 1613 err_event->sq_cons_idx, 1614 err_event->rq_cons_idx, 1615 err_event->req_slow_path_state, 1616 err_event->req_err_state_reason, 1617 err_event->res_slow_path_state, 1618 err_event->res_err_state_reason); 1619 } else { 1620 if (srq) 1621 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 1622 } 1623 1624 if (event.event == IB_EVENT_SRQ_ERR && srq->ib_srq.event_handler) { 1625 (*srq->ib_srq.event_handler)(&event, 1626 srq->ib_srq.srq_context); 1627 } else if (event.device && qp->ib_qp.event_handler) { 1628 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); 1629 } 1630 1631 return 0; 1632 } 1633 1634 static int bnxt_re_handle_cq_async_error(void *event, struct bnxt_re_cq *cq) 1635 { 1636 struct creq_cq_error_notification *cqerr; 1637 struct ib_event ibevent = {}; 1638 1639 cqerr = event; 1640 switch (cqerr->cq_err_reason) { 1641 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR: 1642 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR: 1643 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR: 1644 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR: 1645 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR: 1646 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR: 1647 ibevent.event = IB_EVENT_CQ_ERR; 1648 break; 1649 default: 1650 break; 1651 } 1652 1653 if (ibevent.event == IB_EVENT_CQ_ERR && cq->ib_cq.event_handler) { 1654 ibevent.element.cq = &cq->ib_cq; 1655 ibevent.device = &cq->rdev->ibdev; 1656 1657 ibdev_dbg(&cq->rdev->ibdev, 1658 "%s err reason %d\n", __func__, cqerr->cq_err_reason); 1659 cq->ib_cq.event_handler(&ibevent, cq->ib_cq.cq_context); 1660 } 1661 1662 return 0; 1663 } 1664 1665 static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async, 1666 void *obj) 1667 { 1668 struct bnxt_qplib_qp *lib_qp; 1669 struct bnxt_qplib_cq *lib_cq; 1670 struct bnxt_re_qp *qp; 1671 struct bnxt_re_cq *cq; 1672 int rc = 0; 1673 u8 event; 1674 1675 if (!obj) 1676 return rc; /* QP was already dead, still return success */ 1677 1678 event = affi_async->event; 1679 switch (event) { 1680 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: 1681 lib_qp = obj; 1682 qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp); 1683 rc = bnxt_re_handle_qp_async_event(affi_async, qp); 1684 break; 1685 case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION: 1686 lib_cq = obj; 1687 cq = container_of(lib_cq, struct bnxt_re_cq, qplib_cq); 1688 rc = bnxt_re_handle_cq_async_error(affi_async, cq); 1689 break; 1690 default: 1691 rc = -EINVAL; 1692 } 1693 return rc; 1694 } 1695 1696 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw, 1697 void *aeqe, void *obj) 1698 { 1699 struct creq_qp_event *affi_async; 1700 struct creq_func_event *unaffi_async; 1701 u8 type; 1702 int rc; 1703 1704 type = ((struct creq_base *)aeqe)->type; 1705 if (type == CREQ_BASE_TYPE_FUNC_EVENT) { 1706 unaffi_async = aeqe; 1707 rc = bnxt_re_handle_unaffi_async_event(unaffi_async); 1708 } else { 1709 affi_async = aeqe; 1710 rc = bnxt_re_handle_affi_async_event(affi_async, obj); 1711 } 1712 1713 return rc; 1714 } 1715 1716 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, 1717 struct bnxt_qplib_srq *handle, u8 event) 1718 { 1719 struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq, 1720 qplib_srq); 1721 struct ib_event ib_event; 1722 1723 ib_event.device = &srq->rdev->ibdev; 1724 ib_event.element.srq = &srq->ib_srq; 1725 1726 if (srq->ib_srq.event_handler) { 1727 if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT) 1728 ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED; 1729 (*srq->ib_srq.event_handler)(&ib_event, 1730 srq->ib_srq.srq_context); 1731 } 1732 return 0; 1733 } 1734 1735 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, 1736 struct bnxt_qplib_cq *handle) 1737 { 1738 struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq, 1739 qplib_cq); 1740 1741 if (cq->ib_cq.comp_handler) 1742 (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context); 1743 1744 return 0; 1745 } 1746 1747 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) 1748 { 1749 int i; 1750 1751 for (i = 1; i < rdev->nqr->num_msix; i++) 1752 bnxt_qplib_disable_nq(&rdev->nqr->nq[i - 1]); 1753 1754 if (rdev->qplib_res.rcfw) 1755 bnxt_qplib_cleanup_res(&rdev->qplib_res); 1756 } 1757 1758 static int bnxt_re_init_res(struct bnxt_re_dev *rdev) 1759 { 1760 int num_vec_enabled = 0; 1761 int rc = 0, i; 1762 u32 db_offt; 1763 1764 bnxt_qplib_init_res(&rdev->qplib_res); 1765 1766 mutex_init(&rdev->nqr->load_lock); 1767 1768 for (i = 1; i < rdev->nqr->num_msix ; i++) { 1769 db_offt = rdev->nqr->msix_entries[i].db_offset; 1770 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nqr->nq[i - 1], 1771 i - 1, rdev->nqr->msix_entries[i].vector, 1772 db_offt, &bnxt_re_cqn_handler, 1773 &bnxt_re_srqn_handler); 1774 if (rc) { 1775 ibdev_err(&rdev->ibdev, 1776 "Failed to enable NQ with rc = 0x%x", rc); 1777 goto fail; 1778 } 1779 num_vec_enabled++; 1780 } 1781 return 0; 1782 fail: 1783 for (i = num_vec_enabled; i >= 0; i--) 1784 bnxt_qplib_disable_nq(&rdev->nqr->nq[i]); 1785 return rc; 1786 } 1787 1788 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) 1789 { 1790 struct bnxt_qplib_nq *nq; 1791 u8 type; 1792 int i; 1793 1794 for (i = 0; i < rdev->nqr->num_msix - 1; i++) { 1795 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1796 nq = &rdev->nqr->nq[i]; 1797 bnxt_re_net_ring_free(rdev, nq->ring_id, type); 1798 bnxt_qplib_free_nq(nq); 1799 nq->res = NULL; 1800 } 1801 } 1802 1803 static void bnxt_re_free_res(struct bnxt_re_dev *rdev) 1804 { 1805 bnxt_re_free_nq_res(rdev); 1806 1807 if (rdev->qplib_res.dpi_tbl.max) { 1808 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, 1809 &rdev->dpi_privileged); 1810 } 1811 if (rdev->qplib_res.rcfw) { 1812 bnxt_qplib_free_res(&rdev->qplib_res); 1813 rdev->qplib_res.rcfw = NULL; 1814 } 1815 } 1816 1817 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) 1818 { 1819 struct bnxt_re_ring_attr rattr = {}; 1820 int num_vec_created = 0; 1821 int rc, i; 1822 u8 type; 1823 1824 /* Configure and allocate resources for qplib */ 1825 rdev->qplib_res.rcfw = &rdev->rcfw; 1826 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw); 1827 if (rc) 1828 goto fail; 1829 1830 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev); 1831 if (rc) 1832 goto fail; 1833 1834 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res, 1835 &rdev->dpi_privileged, 1836 rdev, BNXT_QPLIB_DPI_TYPE_KERNEL); 1837 if (rc) 1838 goto dealloc_res; 1839 1840 for (i = 0; i < rdev->nqr->num_msix - 1; i++) { 1841 struct bnxt_qplib_nq *nq; 1842 1843 nq = &rdev->nqr->nq[i]; 1844 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; 1845 rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, nq); 1846 if (rc) { 1847 ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", 1848 i, rc); 1849 goto free_nq; 1850 } 1851 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1852 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; 1853 rattr.pages = nq->hwq.pbl[rdev->nqr->nq[i].hwq.level].pg_count; 1854 rattr.type = type; 1855 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; 1856 rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1; 1857 rattr.lrid = rdev->nqr->msix_entries[i + 1].ring_idx; 1858 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); 1859 if (rc) { 1860 ibdev_err(&rdev->ibdev, 1861 "Failed to allocate NQ fw id with rc = 0x%x", 1862 rc); 1863 bnxt_qplib_free_nq(nq); 1864 goto free_nq; 1865 } 1866 num_vec_created++; 1867 } 1868 return 0; 1869 free_nq: 1870 for (i = num_vec_created - 1; i >= 0; i--) { 1871 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1872 bnxt_re_net_ring_free(rdev, rdev->nqr->nq[i].ring_id, type); 1873 bnxt_qplib_free_nq(&rdev->nqr->nq[i]); 1874 } 1875 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, 1876 &rdev->dpi_privileged); 1877 dealloc_res: 1878 bnxt_qplib_free_res(&rdev->qplib_res); 1879 1880 fail: 1881 rdev->qplib_res.rcfw = NULL; 1882 return rc; 1883 } 1884 1885 static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp, 1886 u8 port_num, enum ib_event_type event) 1887 { 1888 struct ib_event ib_event; 1889 1890 ib_event.device = ibdev; 1891 if (qp) { 1892 ib_event.element.qp = qp; 1893 ib_event.event = event; 1894 if (qp->event_handler) 1895 qp->event_handler(&ib_event, qp->qp_context); 1896 1897 } else { 1898 ib_event.element.port_num = port_num; 1899 ib_event.event = event; 1900 ib_dispatch_event(&ib_event); 1901 } 1902 } 1903 1904 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev, 1905 struct bnxt_re_qp *qp) 1906 { 1907 return (qp->ib_qp.qp_type == IB_QPT_GSI) || 1908 (qp == rdev->gsi_ctx.gsi_sqp); 1909 } 1910 1911 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev) 1912 { 1913 struct bnxt_re_qp *qp; 1914 1915 mutex_lock(&rdev->qp_lock); 1916 list_for_each_entry(qp, &rdev->qp_list, list) { 1917 /* Modify the state of all QPs except QP1/Shadow QP */ 1918 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) { 1919 if (qp->qplib_qp.state != 1920 CMDQ_MODIFY_QP_NEW_STATE_RESET && 1921 qp->qplib_qp.state != 1922 CMDQ_MODIFY_QP_NEW_STATE_ERR) 1923 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp, 1924 1, IB_EVENT_QP_FATAL); 1925 } 1926 } 1927 mutex_unlock(&rdev->qp_lock); 1928 } 1929 1930 static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev) 1931 { 1932 if (rdev->is_virtfn) 1933 return; 1934 1935 memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap)); 1936 bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap, 1937 ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE); 1938 } 1939 1940 static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev) 1941 { 1942 if (rdev->is_virtfn) 1943 return; 1944 1945 rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE); 1946 bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap, 1947 ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE); 1948 } 1949 1950 static void bnxt_re_read_vpd_info(struct bnxt_re_dev *rdev) 1951 { 1952 struct pci_dev *pdev = rdev->en_dev->pdev; 1953 unsigned int vpd_size, kw_len; 1954 int pos, size; 1955 u8 *vpd_data; 1956 1957 vpd_data = pci_vpd_alloc(pdev, &vpd_size); 1958 if (IS_ERR(vpd_data)) { 1959 pci_warn(pdev, "Unable to read VPD, err=%pe\n", vpd_data); 1960 return; 1961 } 1962 1963 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 1964 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 1965 if (pos < 0) 1966 goto free; 1967 1968 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 1969 memcpy(rdev->board_partno, &vpd_data[pos], size); 1970 free: 1971 kfree(vpd_data); 1972 } 1973 1974 static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) 1975 { 1976 struct bnxt_en_dev *en_dev = rdev->en_dev; 1977 struct hwrm_ver_get_output resp = {}; 1978 struct hwrm_ver_get_input req = {}; 1979 struct bnxt_qplib_chip_ctx *cctx; 1980 struct bnxt_fw_msg fw_msg = {}; 1981 int rc; 1982 1983 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VER_GET); 1984 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 1985 req.hwrm_intf_min = HWRM_VERSION_MINOR; 1986 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 1987 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 1988 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 1989 rc = bnxt_send_msg(en_dev, &fw_msg); 1990 if (rc) { 1991 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x", 1992 rc); 1993 return rc; 1994 } 1995 1996 cctx = rdev->chip_ctx; 1997 cctx->hwrm_intf_ver = 1998 (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 | 1999 (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 | 2000 (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 | 2001 le16_to_cpu(resp.hwrm_intf_patch); 2002 2003 cctx->hwrm_cmd_max_timeout = le16_to_cpu(resp.max_req_timeout); 2004 2005 if (!cctx->hwrm_cmd_max_timeout) 2006 cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT; 2007 2008 return 0; 2009 } 2010 2011 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) 2012 { 2013 int rc; 2014 u32 event; 2015 2016 /* Register ib dev */ 2017 rc = bnxt_re_register_ib(rdev); 2018 if (rc) { 2019 pr_err("Failed to register with IB: %#x\n", rc); 2020 return rc; 2021 } 2022 dev_info(rdev_to_dev(rdev), "Device registered with IB successfully"); 2023 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); 2024 2025 event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ? 2026 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 2027 2028 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event); 2029 2030 return rc; 2031 } 2032 2033 static int bnxt_re_alloc_nqr_mem(struct bnxt_re_dev *rdev) 2034 { 2035 rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL); 2036 if (!rdev->nqr) 2037 return -ENOMEM; 2038 2039 return 0; 2040 } 2041 2042 static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev) 2043 { 2044 kfree(rdev->nqr); 2045 rdev->nqr = NULL; 2046 } 2047 2048 /* When DEL_GID fails, driver is not freeing GID ctx memory. 2049 * To avoid the memory leak, free the memory during unload 2050 */ 2051 static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev) 2052 { 2053 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 2054 struct bnxt_re_gid_ctx *ctx, **ctx_tbl; 2055 int i; 2056 2057 if (!sgid_tbl->active) 2058 return; 2059 2060 ctx_tbl = sgid_tbl->ctx; 2061 for (i = 0; i < sgid_tbl->max; i++) { 2062 if (sgid_tbl->hw_id[i] == 0xFFFF) 2063 continue; 2064 2065 ctx = ctx_tbl[i]; 2066 kfree(ctx); 2067 } 2068 } 2069 2070 static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev) 2071 { 2072 struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; 2073 struct bnxt_qplib_res *res = &rdev->qplib_res; 2074 int rc; 2075 2076 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats); 2077 if (rc) 2078 return rc; 2079 2080 rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats); 2081 if (rc) 2082 goto free_stat_mem; 2083 2084 return 0; 2085 free_stat_mem: 2086 bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats); 2087 2088 return rc; 2089 } 2090 2091 static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev) 2092 { 2093 struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; 2094 struct bnxt_qplib_res *res = &rdev->qplib_res; 2095 int rc; 2096 2097 if (!rdev->rcfw.roce_mirror) 2098 return 0; 2099 2100 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3); 2101 if (rc) 2102 return rc; 2103 2104 rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3); 2105 if (rc) 2106 goto free_stat_mem; 2107 2108 return 0; 2109 free_stat_mem: 2110 bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3); 2111 2112 return rc; 2113 } 2114 2115 static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev) 2116 { 2117 struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; 2118 struct bnxt_qplib_res *res = &rdev->qplib_res; 2119 2120 if (!rdev->rcfw.roce_mirror) 2121 return; 2122 2123 bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id); 2124 bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3); 2125 } 2126 2127 static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev) 2128 { 2129 struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; 2130 struct bnxt_qplib_res *res = &rdev->qplib_res; 2131 2132 bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id); 2133 bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats); 2134 } 2135 2136 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) 2137 { 2138 u8 type; 2139 int rc; 2140 2141 bnxt_re_debugfs_rem_pdev(rdev); 2142 2143 bnxt_re_net_unregister_async_event(rdev); 2144 bnxt_re_uninit_dcb_wq(rdev); 2145 2146 bnxt_re_put_stats3_ctx(rdev); 2147 2148 bnxt_re_free_gid_ctx(rdev); 2149 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, 2150 &rdev->flags)) 2151 bnxt_re_cleanup_res(rdev); 2152 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags)) 2153 bnxt_re_free_res(rdev); 2154 2155 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { 2156 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); 2157 if (rc) 2158 ibdev_warn(&rdev->ibdev, 2159 "Failed to deinitialize RCFW: %#x", rc); 2160 bnxt_re_put_stats_ctx(rdev); 2161 bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx); 2162 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 2163 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 2164 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); 2165 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 2166 } 2167 2168 rdev->nqr->num_msix = 0; 2169 2170 if (rdev->pacing.dbr_pacing) 2171 bnxt_re_deinitialize_dbr_pacing(rdev); 2172 2173 bnxt_re_free_nqr_mem(rdev); 2174 bnxt_re_destroy_chip_ctx(rdev); 2175 if (op_type == BNXT_RE_COMPLETE_REMOVE) { 2176 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) 2177 bnxt_unregister_dev(rdev->en_dev); 2178 } 2179 } 2180 2181 static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) 2182 { 2183 struct bnxt_re_ring_attr rattr = {}; 2184 struct bnxt_qplib_creq_ctx *creq; 2185 u32 db_offt; 2186 int vid; 2187 u8 type; 2188 int rc; 2189 2190 if (op_type == BNXT_RE_COMPLETE_INIT) { 2191 /* Registered a new RoCE device instance to netdev */ 2192 rc = bnxt_re_register_netdev(rdev); 2193 if (rc) { 2194 ibdev_err(&rdev->ibdev, 2195 "Failed to register with Ethernet driver, rc %d\n", 2196 rc); 2197 return rc; 2198 } 2199 } 2200 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 2201 2202 if (rdev->en_dev->ulp_tbl->msix_requested < BNXT_RE_MIN_MSIX) { 2203 ibdev_err(&rdev->ibdev, 2204 "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n", 2205 rdev->en_dev->ulp_tbl->msix_requested); 2206 bnxt_unregister_dev(rdev->en_dev); 2207 clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 2208 return -EINVAL; 2209 } 2210 ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n", 2211 rdev->en_dev->ulp_tbl->msix_requested); 2212 2213 rc = bnxt_re_setup_chip_ctx(rdev); 2214 if (rc) { 2215 bnxt_unregister_dev(rdev->en_dev); 2216 clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 2217 ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); 2218 return -EINVAL; 2219 } 2220 2221 rc = bnxt_re_alloc_nqr_mem(rdev); 2222 if (rc) { 2223 bnxt_re_destroy_chip_ctx(rdev); 2224 bnxt_unregister_dev(rdev->en_dev); 2225 clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 2226 return rc; 2227 } 2228 rdev->nqr->num_msix = rdev->en_dev->ulp_tbl->msix_requested; 2229 memcpy(rdev->nqr->msix_entries, rdev->en_dev->msix_entries, 2230 sizeof(struct bnxt_msix_entry) * rdev->nqr->num_msix); 2231 2232 /* Check whether VF or PF */ 2233 bnxt_re_get_sriov_func_type(rdev); 2234 2235 /* Establish RCFW Communication Channel to initialize the context 2236 * memory for the function and all child VFs 2237 */ 2238 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw, 2239 &rdev->qplib_ctx); 2240 if (rc) { 2241 ibdev_err(&rdev->ibdev, 2242 "Failed to allocate RCFW Channel: %#x\n", rc); 2243 goto fail; 2244 } 2245 2246 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 2247 creq = &rdev->rcfw.creq; 2248 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; 2249 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; 2250 rattr.type = type; 2251 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; 2252 rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1; 2253 rattr.lrid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; 2254 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); 2255 if (rc) { 2256 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); 2257 goto free_rcfw; 2258 } 2259 db_offt = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].db_offset; 2260 vid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].vector; 2261 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, 2262 vid, db_offt, 2263 &bnxt_re_aeq_handler); 2264 if (rc) { 2265 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n", 2266 rc); 2267 goto free_ring; 2268 } 2269 2270 if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx)) { 2271 rc = bnxt_re_initialize_dbr_pacing(rdev); 2272 if (!rc) { 2273 rdev->pacing.dbr_pacing = true; 2274 } else { 2275 ibdev_err(&rdev->ibdev, 2276 "DBR pacing disabled with error : %d\n", rc); 2277 rdev->pacing.dbr_pacing = false; 2278 } 2279 } 2280 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw); 2281 if (rc) 2282 goto disable_rcfw; 2283 2284 bnxt_qplib_query_version(&rdev->rcfw); 2285 bnxt_re_set_resource_limits(rdev); 2286 2287 if (!rdev->is_virtfn && 2288 !bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) { 2289 rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res, &rdev->qplib_ctx); 2290 if (rc) { 2291 ibdev_err(&rdev->ibdev, 2292 "Failed to allocate hw context: %#x\n", rc); 2293 goto disable_rcfw; 2294 } 2295 } 2296 2297 rc = bnxt_re_get_stats_ctx(rdev); 2298 if (rc) { 2299 ibdev_err(&rdev->ibdev, 2300 "Failed to allocate stats context: %#x\n", rc); 2301 goto free_ctx; 2302 } 2303 2304 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, 2305 rdev->is_virtfn); 2306 if (rc) { 2307 ibdev_err(&rdev->ibdev, 2308 "Failed to initialize RCFW: %#x\n", rc); 2309 goto free_sctx; 2310 } 2311 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags); 2312 2313 /* Resources based on the 'new' device caps */ 2314 rc = bnxt_re_alloc_res(rdev); 2315 if (rc) { 2316 ibdev_err(&rdev->ibdev, 2317 "Failed to allocate resources: %#x\n", rc); 2318 goto fail; 2319 } 2320 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags); 2321 rc = bnxt_re_init_res(rdev); 2322 if (rc) { 2323 ibdev_err(&rdev->ibdev, 2324 "Failed to initialize resources: %#x\n", rc); 2325 goto fail; 2326 } 2327 2328 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags); 2329 2330 if (!rdev->is_virtfn) { 2331 /* Query f/w defaults of CC params */ 2332 rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param); 2333 if (rc) 2334 ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n"); 2335 2336 if (!(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)) 2337 bnxt_re_vf_res_config(rdev); 2338 } 2339 hash_init(rdev->cq_hash); 2340 if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) 2341 hash_init(rdev->srq_hash); 2342 2343 bnxt_re_debugfs_add_pdev(rdev); 2344 2345 bnxt_re_init_dcb_wq(rdev); 2346 bnxt_re_net_register_async_event(rdev); 2347 2348 if (!rdev->is_virtfn) 2349 bnxt_re_read_vpd_info(rdev); 2350 2351 rc = bnxt_re_get_stats3_ctx(rdev); 2352 if (rc) 2353 goto fail; 2354 2355 return 0; 2356 free_sctx: 2357 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); 2358 free_ctx: 2359 bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx); 2360 disable_rcfw: 2361 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 2362 free_ring: 2363 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 2364 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); 2365 free_rcfw: 2366 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 2367 fail: 2368 bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); 2369 2370 return rc; 2371 } 2372 2373 static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable) 2374 { 2375 struct bnxt_qplib_cc_param cc_param = {}; 2376 2377 /* Do not enable congestion control on VFs */ 2378 if (rdev->is_virtfn) 2379 return; 2380 2381 /* Currently enabling only for GenP5 adapters */ 2382 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) 2383 return; 2384 2385 if (enable) { 2386 cc_param.enable = 1; 2387 cc_param.tos_ecn = 1; 2388 } 2389 2390 cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC | 2391 CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN); 2392 2393 if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param)) 2394 ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable); 2395 } 2396 2397 static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev, 2398 struct bnxt_re_en_dev_info *en_info, 2399 struct auxiliary_device *adev) 2400 { 2401 /* Before updating the rdev pointer in bnxt_re_en_dev_info structure, 2402 * take the rtnl lock to avoid accessing invalid rdev pointer from 2403 * L2 ULP callbacks. This is applicable in all the places where rdev 2404 * pointer is updated in bnxt_re_en_dev_info. 2405 */ 2406 rtnl_lock(); 2407 en_info->rdev = rdev; 2408 rtnl_unlock(); 2409 } 2410 2411 static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type) 2412 { 2413 struct bnxt_aux_priv *aux_priv = 2414 container_of(adev, struct bnxt_aux_priv, aux_dev); 2415 struct bnxt_re_en_dev_info *en_info; 2416 struct bnxt_en_dev *en_dev; 2417 struct bnxt_re_dev *rdev; 2418 int rc; 2419 2420 en_info = auxiliary_get_drvdata(adev); 2421 en_dev = en_info->en_dev; 2422 2423 2424 rdev = bnxt_re_dev_add(adev, en_dev); 2425 if (!rdev || !rdev_to_dev(rdev)) { 2426 rc = -ENOMEM; 2427 goto exit; 2428 } 2429 2430 bnxt_re_update_en_info_rdev(rdev, en_info, adev); 2431 2432 rc = bnxt_re_dev_init(rdev, op_type); 2433 if (rc) 2434 goto re_dev_dealloc; 2435 2436 rc = bnxt_re_ib_init(rdev); 2437 if (rc) { 2438 pr_err("Failed to register with IB: %s", 2439 aux_priv->aux_dev.name); 2440 goto re_dev_uninit; 2441 } 2442 2443 bnxt_re_setup_cc(rdev, true); 2444 2445 return 0; 2446 2447 re_dev_uninit: 2448 bnxt_re_update_en_info_rdev(NULL, en_info, adev); 2449 bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); 2450 re_dev_dealloc: 2451 ib_dealloc_device(&rdev->ibdev); 2452 exit: 2453 return rc; 2454 } 2455 2456 #define BNXT_ADEV_NAME "bnxt_en" 2457 2458 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type, 2459 struct auxiliary_device *aux_dev) 2460 { 2461 bnxt_re_setup_cc(rdev, false); 2462 ib_unregister_device(&rdev->ibdev); 2463 bnxt_re_dev_uninit(rdev, op_type); 2464 ib_dealloc_device(&rdev->ibdev); 2465 } 2466 2467 static void bnxt_re_remove(struct auxiliary_device *adev) 2468 { 2469 struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); 2470 struct bnxt_re_dev *rdev; 2471 2472 mutex_lock(&bnxt_re_mutex); 2473 rdev = en_info->rdev; 2474 2475 if (rdev) 2476 bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev); 2477 kfree(en_info); 2478 mutex_unlock(&bnxt_re_mutex); 2479 } 2480 2481 static int bnxt_re_probe(struct auxiliary_device *adev, 2482 const struct auxiliary_device_id *id) 2483 { 2484 struct bnxt_aux_priv *aux_priv = 2485 container_of(adev, struct bnxt_aux_priv, aux_dev); 2486 struct bnxt_re_en_dev_info *en_info; 2487 struct bnxt_en_dev *en_dev; 2488 int rc; 2489 2490 en_dev = aux_priv->edev; 2491 2492 mutex_lock(&bnxt_re_mutex); 2493 en_info = kzalloc(sizeof(*en_info), GFP_KERNEL); 2494 if (!en_info) { 2495 mutex_unlock(&bnxt_re_mutex); 2496 return -ENOMEM; 2497 } 2498 en_info->en_dev = en_dev; 2499 2500 auxiliary_set_drvdata(adev, en_info); 2501 2502 rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT); 2503 if (rc) 2504 kfree(en_info); 2505 2506 mutex_unlock(&bnxt_re_mutex); 2507 2508 return rc; 2509 } 2510 2511 static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state) 2512 { 2513 struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); 2514 struct bnxt_en_dev *en_dev; 2515 struct bnxt_re_dev *rdev; 2516 2517 rdev = en_info->rdev; 2518 en_dev = en_info->en_dev; 2519 mutex_lock(&bnxt_re_mutex); 2520 2521 ibdev_info(&rdev->ibdev, "Handle device suspend call"); 2522 /* Check the current device state from bnxt_en_dev and move the 2523 * device to detached state if FW_FATAL_COND is set. 2524 * This prevents more commands to HW during clean-up, 2525 * in case the device is already in error. 2526 */ 2527 if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state)) { 2528 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); 2529 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); 2530 wake_up_all(&rdev->rcfw.cmdq.waitq); 2531 bnxt_re_dev_stop(rdev); 2532 } 2533 2534 if (rdev->pacing.dbr_pacing) 2535 bnxt_re_set_pacing_dev_state(rdev); 2536 2537 ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx", 2538 __func__, en_dev->en_state); 2539 bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev); 2540 bnxt_re_update_en_info_rdev(NULL, en_info, adev); 2541 mutex_unlock(&bnxt_re_mutex); 2542 2543 return 0; 2544 } 2545 2546 static int bnxt_re_resume(struct auxiliary_device *adev) 2547 { 2548 struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); 2549 struct bnxt_re_dev *rdev; 2550 2551 mutex_lock(&bnxt_re_mutex); 2552 bnxt_re_add_device(adev, BNXT_RE_POST_RECOVERY_INIT); 2553 rdev = en_info->rdev; 2554 ibdev_info(&rdev->ibdev, "Device resume completed"); 2555 mutex_unlock(&bnxt_re_mutex); 2556 2557 return 0; 2558 } 2559 2560 static void bnxt_re_shutdown(struct auxiliary_device *adev) 2561 { 2562 struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev); 2563 struct bnxt_re_dev *rdev; 2564 2565 rdev = en_info->rdev; 2566 ib_unregister_device(&rdev->ibdev); 2567 bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE); 2568 } 2569 2570 static const struct auxiliary_device_id bnxt_re_id_table[] = { 2571 { .name = BNXT_ADEV_NAME ".rdma", }, 2572 {}, 2573 }; 2574 2575 MODULE_DEVICE_TABLE(auxiliary, bnxt_re_id_table); 2576 2577 static struct auxiliary_driver bnxt_re_driver = { 2578 .name = "rdma", 2579 .probe = bnxt_re_probe, 2580 .remove = bnxt_re_remove, 2581 .shutdown = bnxt_re_shutdown, 2582 .suspend = bnxt_re_suspend, 2583 .resume = bnxt_re_resume, 2584 .id_table = bnxt_re_id_table, 2585 }; 2586 2587 static int __init bnxt_re_mod_init(void) 2588 { 2589 int rc; 2590 2591 pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version); 2592 bnxt_re_register_debugfs(); 2593 2594 rc = auxiliary_driver_register(&bnxt_re_driver); 2595 if (rc) { 2596 pr_err("%s: Failed to register auxiliary driver\n", 2597 ROCE_DRV_MODULE_NAME); 2598 goto err_debug; 2599 } 2600 return 0; 2601 err_debug: 2602 bnxt_re_unregister_debugfs(); 2603 return rc; 2604 } 2605 2606 static void __exit bnxt_re_mod_exit(void) 2607 { 2608 auxiliary_driver_unregister(&bnxt_re_driver); 2609 bnxt_re_unregister_debugfs(); 2610 } 2611 2612 module_init(bnxt_re_mod_init); 2613 module_exit(bnxt_re_mod_exit); 2614