1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2025 Broadcom. 3 4 #include <linux/module.h> 5 #include <linux/pci.h> 6 #include <linux/auxiliary_bus.h> 7 8 #include <rdma/ib_verbs.h> 9 10 #include "bng_res.h" 11 #include "bng_sp.h" 12 #include "bng_fw.h" 13 #include "bnge.h" 14 #include "bnge_auxr.h" 15 #include "bng_re.h" 16 #include "bnge_hwrm.h" 17 #include "bng_debugfs.h" 18 19 MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@broadcom.com>"); 20 MODULE_DESCRIPTION(BNG_RE_DESC); 21 MODULE_LICENSE("Dual BSD/GPL"); 22 23 static struct bng_re_dev *bng_re_dev_add(struct auxiliary_device *adev, 24 struct bnge_auxr_dev *aux_dev) 25 { 26 struct bng_re_dev *rdev; 27 28 /* Allocate bng_re_dev instance */ 29 rdev = ib_alloc_device(bng_re_dev, ibdev); 30 if (!rdev) { 31 pr_err("%s: bng_re_dev allocation failure!", KBUILD_MODNAME); 32 return NULL; 33 } 34 35 /* Assign auxiliary device specific data */ 36 rdev->netdev = aux_dev->net; 37 rdev->aux_dev = aux_dev; 38 rdev->adev = adev; 39 rdev->fn_id = rdev->aux_dev->pdev->devfn; 40 41 return rdev; 42 } 43 44 45 static int bng_re_register_netdev(struct bng_re_dev *rdev) 46 { 47 struct bnge_auxr_dev *aux_dev; 48 49 aux_dev = rdev->aux_dev; 50 return bnge_register_dev(aux_dev, rdev->adev); 51 } 52 53 static void bng_re_destroy_chip_ctx(struct bng_re_dev *rdev) 54 { 55 struct bng_re_chip_ctx *chip_ctx; 56 57 if (!rdev->chip_ctx) 58 return; 59 60 kfree(rdev->dev_attr); 61 rdev->dev_attr = NULL; 62 63 chip_ctx = rdev->chip_ctx; 64 rdev->chip_ctx = NULL; 65 rdev->rcfw.res = NULL; 66 rdev->bng_res.cctx = NULL; 67 rdev->bng_res.pdev = NULL; 68 kfree(chip_ctx); 69 } 70 71 static int bng_re_setup_chip_ctx(struct bng_re_dev *rdev) 72 { 73 struct bng_re_chip_ctx *chip_ctx; 74 struct bnge_auxr_dev *aux_dev; 75 int rc = -ENOMEM; 76 77 aux_dev = rdev->aux_dev; 78 rdev->bng_res.pdev = aux_dev->pdev; 79 rdev->rcfw.res = &rdev->bng_res; 80 chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL); 81 if (!chip_ctx) 82 return -ENOMEM; 83 chip_ctx->chip_num = aux_dev->chip_num; 84 chip_ctx->hw_stats_size = aux_dev->hw_ring_stats_size; 85 86 rdev->chip_ctx = chip_ctx; 87 rdev->bng_res.cctx = rdev->chip_ctx; 88 rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL); 89 if (!rdev->dev_attr) 90 goto free_chip_ctx; 91 rdev->bng_res.dattr = rdev->dev_attr; 92 93 return 0; 94 free_chip_ctx: 95 kfree(rdev->chip_ctx); 96 rdev->chip_ctx = NULL; 97 return rc; 98 } 99 100 static void bng_re_init_hwrm_hdr(struct input *hdr, u16 opcd) 101 { 102 hdr->req_type = cpu_to_le16(opcd); 103 hdr->cmpl_ring = cpu_to_le16(-1); 104 hdr->target_id = cpu_to_le16(-1); 105 } 106 107 static void bng_re_fill_fw_msg(struct bnge_fw_msg *fw_msg, void *msg, 108 int msg_len, void *resp, int resp_max_len, 109 int timeout) 110 { 111 fw_msg->msg = msg; 112 fw_msg->msg_len = msg_len; 113 fw_msg->resp = resp; 114 fw_msg->resp_max_len = resp_max_len; 115 fw_msg->timeout = timeout; 116 } 117 118 static int bng_re_net_ring_free(struct bng_re_dev *rdev, 119 u16 fw_ring_id, int type) 120 { 121 struct bnge_auxr_dev *aux_dev = rdev->aux_dev; 122 struct hwrm_ring_free_input req = {}; 123 struct hwrm_ring_free_output resp; 124 struct bnge_fw_msg fw_msg = {}; 125 int rc = -EINVAL; 126 127 if (!rdev) 128 return rc; 129 130 if (!aux_dev) 131 return rc; 132 133 bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE); 134 req.ring_type = type; 135 req.ring_id = cpu_to_le16(fw_ring_id); 136 bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 137 sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT); 138 rc = bnge_send_msg(aux_dev, &fw_msg); 139 if (rc) 140 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x", 141 req.ring_id, rc); 142 return rc; 143 } 144 145 static int bng_re_net_ring_alloc(struct bng_re_dev *rdev, 146 struct bng_re_ring_attr *ring_attr, 147 u16 *fw_ring_id) 148 { 149 struct bnge_auxr_dev *aux_dev = rdev->aux_dev; 150 struct hwrm_ring_alloc_input req = {}; 151 struct hwrm_ring_alloc_output resp; 152 struct bnge_fw_msg fw_msg = {}; 153 int rc = -EINVAL; 154 155 if (!aux_dev) 156 return rc; 157 158 bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC); 159 req.enables = 0; 160 req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]); 161 if (ring_attr->pages > 1) { 162 /* Page size is in log2 units */ 163 req.page_size = BNGE_PAGE_SHIFT; 164 req.page_tbl_depth = 1; 165 } 166 req.fbo = 0; 167 /* Association of ring index with doorbell index and MSIX number */ 168 req.logical_id = cpu_to_le16(ring_attr->lrid); 169 req.length = cpu_to_le32(ring_attr->depth + 1); 170 req.ring_type = ring_attr->type; 171 req.int_mode = ring_attr->mode; 172 bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 173 sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT); 174 rc = bnge_send_msg(aux_dev, &fw_msg); 175 if (!rc) 176 *fw_ring_id = le16_to_cpu(resp.ring_id); 177 178 return rc; 179 } 180 181 static int bng_re_stats_ctx_free(struct bng_re_dev *rdev) 182 { 183 struct bnge_auxr_dev *aux_dev = rdev->aux_dev; 184 struct hwrm_stat_ctx_free_input req = {}; 185 struct hwrm_stat_ctx_free_output resp = {}; 186 struct bnge_fw_msg fw_msg = {}; 187 int rc = -EINVAL; 188 189 if (!aux_dev) 190 return rc; 191 192 bng_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE); 193 req.stat_ctx_id = cpu_to_le32(rdev->stats_ctx.fw_id); 194 bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 195 sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT); 196 rc = bnge_send_msg(aux_dev, &fw_msg); 197 if (rc) 198 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x", 199 rc); 200 201 return rc; 202 } 203 204 static int bng_re_stats_ctx_alloc(struct bng_re_dev *rdev) 205 { 206 struct bnge_auxr_dev *aux_dev = rdev->aux_dev; 207 struct bng_re_stats *stats = &rdev->stats_ctx; 208 struct hwrm_stat_ctx_alloc_output resp = {}; 209 struct hwrm_stat_ctx_alloc_input req = {}; 210 struct bnge_fw_msg fw_msg = {}; 211 int rc = -EINVAL; 212 213 stats->fw_id = BNGE_INVALID_STATS_CTX_ID; 214 215 if (!aux_dev) 216 return rc; 217 218 bng_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC); 219 req.update_period_ms = cpu_to_le32(1000); 220 req.stats_dma_addr = cpu_to_le64(stats->dma_map); 221 req.stats_dma_length = cpu_to_le16(rdev->chip_ctx->hw_stats_size); 222 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; 223 bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 224 sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT); 225 rc = bnge_send_msg(aux_dev, &fw_msg); 226 if (!rc) 227 stats->fw_id = le32_to_cpu(resp.stat_ctx_id); 228 return rc; 229 } 230 231 static void bng_re_query_hwrm_version(struct bng_re_dev *rdev) 232 { 233 struct bnge_auxr_dev *aux_dev = rdev->aux_dev; 234 struct hwrm_ver_get_output ver_get_resp = {}; 235 struct hwrm_ver_get_input ver_get_req = {}; 236 struct bng_re_chip_ctx *cctx; 237 struct bnge_fw_msg fw_msg = {}; 238 int rc; 239 240 bng_re_init_hwrm_hdr((void *)&ver_get_req, HWRM_VER_GET); 241 ver_get_req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 242 ver_get_req.hwrm_intf_min = HWRM_VERSION_MINOR; 243 ver_get_req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 244 bng_re_fill_fw_msg(&fw_msg, (void *)&ver_get_req, sizeof(ver_get_req), 245 (void *)&ver_get_resp, sizeof(ver_get_resp), 246 BNGE_DFLT_HWRM_CMD_TIMEOUT); 247 rc = bnge_send_msg(aux_dev, &fw_msg); 248 if (rc) { 249 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x", 250 rc); 251 return; 252 } 253 254 cctx = rdev->chip_ctx; 255 cctx->hwrm_intf_ver = 256 (u64)le16_to_cpu(ver_get_resp.hwrm_intf_major) << 48 | 257 (u64)le16_to_cpu(ver_get_resp.hwrm_intf_minor) << 32 | 258 (u64)le16_to_cpu(ver_get_resp.hwrm_intf_build) << 16 | 259 le16_to_cpu(ver_get_resp.hwrm_intf_patch); 260 261 cctx->hwrm_cmd_max_timeout = le16_to_cpu(ver_get_resp.max_req_timeout); 262 263 if (!cctx->hwrm_cmd_max_timeout) 264 cctx->hwrm_cmd_max_timeout = BNG_ROCE_FW_MAX_TIMEOUT; 265 } 266 267 static void bng_re_dev_uninit(struct bng_re_dev *rdev) 268 { 269 int rc; 270 bng_re_debugfs_rem_pdev(rdev); 271 272 if (test_and_clear_bit(BNG_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { 273 rc = bng_re_deinit_rcfw(&rdev->rcfw); 274 if (rc) 275 ibdev_warn(&rdev->ibdev, 276 "Failed to deinitialize RCFW: %#x", rc); 277 bng_re_stats_ctx_free(rdev); 278 bng_re_free_stats_ctx_mem(rdev->bng_res.pdev, &rdev->stats_ctx); 279 bng_re_disable_rcfw_channel(&rdev->rcfw); 280 bng_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, 281 RING_ALLOC_REQ_RING_TYPE_NQ); 282 bng_re_free_rcfw_channel(&rdev->rcfw); 283 } 284 285 kfree(rdev->nqr); 286 rdev->nqr = NULL; 287 bng_re_destroy_chip_ctx(rdev); 288 if (test_and_clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) 289 bnge_unregister_dev(rdev->aux_dev); 290 } 291 292 static int bng_re_dev_init(struct bng_re_dev *rdev) 293 { 294 struct bng_re_ring_attr rattr = {}; 295 struct bng_re_creq_ctx *creq; 296 u32 db_offt; 297 int vid; 298 u8 type; 299 int rc; 300 301 /* Registered a new RoCE device instance to netdev */ 302 rc = bng_re_register_netdev(rdev); 303 if (rc) { 304 ibdev_err(&rdev->ibdev, 305 "Failed to register with netedev: %#x\n", rc); 306 return -EINVAL; 307 } 308 309 set_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 310 311 if (rdev->aux_dev->auxr_info->msix_requested < BNG_RE_MIN_MSIX) { 312 ibdev_err(&rdev->ibdev, 313 "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n", 314 rdev->aux_dev->auxr_info->msix_requested); 315 bnge_unregister_dev(rdev->aux_dev); 316 clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 317 return -EINVAL; 318 } 319 ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n", 320 rdev->aux_dev->auxr_info->msix_requested); 321 322 rc = bng_re_setup_chip_ctx(rdev); 323 if (rc) { 324 bnge_unregister_dev(rdev->aux_dev); 325 clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 326 ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); 327 return -EINVAL; 328 } 329 330 bng_re_query_hwrm_version(rdev); 331 332 rc = bng_re_alloc_fw_channel(&rdev->bng_res, &rdev->rcfw); 333 if (rc) { 334 ibdev_err(&rdev->ibdev, 335 "Failed to allocate RCFW Channel: %#x\n", rc); 336 goto fail; 337 } 338 339 /* Allocate nq record memory */ 340 rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL); 341 if (!rdev->nqr) { 342 bng_re_destroy_chip_ctx(rdev); 343 bnge_unregister_dev(rdev->aux_dev); 344 clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 345 return -ENOMEM; 346 } 347 348 rdev->nqr->num_msix = rdev->aux_dev->auxr_info->msix_requested; 349 memcpy(rdev->nqr->msix_entries, rdev->aux_dev->msix_info, 350 sizeof(struct bnge_msix_info) * rdev->nqr->num_msix); 351 352 type = RING_ALLOC_REQ_RING_TYPE_NQ; 353 creq = &rdev->rcfw.creq; 354 rattr.dma_arr = creq->hwq.pbl[BNG_PBL_LVL_0].pg_map_arr; 355 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; 356 rattr.type = type; 357 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; 358 rattr.depth = BNG_FW_CREQE_MAX_CNT - 1; 359 rattr.lrid = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].ring_idx; 360 rc = bng_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); 361 if (rc) { 362 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); 363 goto free_rcfw; 364 } 365 db_offt = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].db_offset; 366 vid = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].vector; 367 368 rc = bng_re_enable_fw_channel(&rdev->rcfw, 369 vid, db_offt); 370 if (rc) { 371 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n", 372 rc); 373 goto free_ring; 374 } 375 376 rc = bng_re_get_dev_attr(&rdev->rcfw); 377 if (rc) 378 goto disable_rcfw; 379 380 bng_re_debugfs_add_pdev(rdev); 381 rc = bng_re_alloc_stats_ctx_mem(rdev->bng_res.pdev, rdev->chip_ctx, 382 &rdev->stats_ctx); 383 if (rc) { 384 ibdev_err(&rdev->ibdev, 385 "Failed to allocate stats context: %#x\n", rc); 386 goto disable_rcfw; 387 } 388 389 rc = bng_re_stats_ctx_alloc(rdev); 390 if (rc) { 391 ibdev_err(&rdev->ibdev, 392 "Failed to allocate QPLIB context: %#x\n", rc); 393 goto free_stats_ctx; 394 } 395 396 rc = bng_re_init_rcfw(&rdev->rcfw, &rdev->stats_ctx); 397 if (rc) { 398 ibdev_err(&rdev->ibdev, 399 "Failed to initialize RCFW: %#x\n", rc); 400 goto free_sctx; 401 } 402 set_bit(BNG_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags); 403 404 return 0; 405 free_sctx: 406 bng_re_stats_ctx_free(rdev); 407 free_stats_ctx: 408 bng_re_free_stats_ctx_mem(rdev->bng_res.pdev, &rdev->stats_ctx); 409 disable_rcfw: 410 bng_re_disable_rcfw_channel(&rdev->rcfw); 411 free_ring: 412 bng_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); 413 free_rcfw: 414 bng_re_free_rcfw_channel(&rdev->rcfw); 415 fail: 416 bng_re_dev_uninit(rdev); 417 return rc; 418 } 419 420 static int bng_re_add_device(struct auxiliary_device *adev) 421 { 422 struct bnge_auxr_priv *auxr_priv = 423 container_of(adev, struct bnge_auxr_priv, aux_dev); 424 struct bng_re_en_dev_info *dev_info; 425 struct bng_re_dev *rdev; 426 int rc; 427 428 dev_info = auxiliary_get_drvdata(adev); 429 430 rdev = bng_re_dev_add(adev, auxr_priv->auxr_dev); 431 if (!rdev) { 432 rc = -ENOMEM; 433 goto exit; 434 } 435 436 dev_info->rdev = rdev; 437 438 rc = bng_re_dev_init(rdev); 439 if (rc) 440 goto re_dev_dealloc; 441 442 return 0; 443 444 re_dev_dealloc: 445 ib_dealloc_device(&rdev->ibdev); 446 exit: 447 return rc; 448 } 449 450 451 static void bng_re_remove_device(struct bng_re_dev *rdev, 452 struct auxiliary_device *aux_dev) 453 { 454 bng_re_dev_uninit(rdev); 455 ib_dealloc_device(&rdev->ibdev); 456 } 457 458 459 static int bng_re_probe(struct auxiliary_device *adev, 460 const struct auxiliary_device_id *id) 461 { 462 struct bnge_auxr_priv *aux_priv = 463 container_of(adev, struct bnge_auxr_priv, aux_dev); 464 struct bng_re_en_dev_info *en_info; 465 int rc; 466 467 en_info = kzalloc(sizeof(*en_info), GFP_KERNEL); 468 if (!en_info) 469 return -ENOMEM; 470 471 en_info->auxr_dev = aux_priv->auxr_dev; 472 473 auxiliary_set_drvdata(adev, en_info); 474 475 rc = bng_re_add_device(adev); 476 if (rc) 477 kfree(en_info); 478 479 return rc; 480 } 481 482 static void bng_re_remove(struct auxiliary_device *adev) 483 { 484 struct bng_re_en_dev_info *dev_info = auxiliary_get_drvdata(adev); 485 struct bng_re_dev *rdev; 486 487 rdev = dev_info->rdev; 488 489 if (rdev) 490 bng_re_remove_device(rdev, adev); 491 kfree(dev_info); 492 } 493 494 static const struct auxiliary_device_id bng_re_id_table[] = { 495 { .name = BNG_RE_ADEV_NAME ".rdma", }, 496 {}, 497 }; 498 499 MODULE_DEVICE_TABLE(auxiliary, bng_re_id_table); 500 501 static struct auxiliary_driver bng_re_driver = { 502 .name = "rdma", 503 .probe = bng_re_probe, 504 .remove = bng_re_remove, 505 .id_table = bng_re_id_table, 506 }; 507 508 static int __init bng_re_mod_init(void) 509 { 510 int rc; 511 512 513 bng_re_register_debugfs(); 514 515 rc = auxiliary_driver_register(&bng_re_driver); 516 if (rc) { 517 pr_err("%s: Failed to register auxiliary driver\n", 518 KBUILD_MODNAME); 519 goto unreg_debugfs; 520 } 521 return 0; 522 unreg_debugfs: 523 bng_re_unregister_debugfs(); 524 return rc; 525 } 526 527 static void __exit bng_re_mod_exit(void) 528 { 529 auxiliary_driver_unregister(&bng_re_driver); 530 bng_re_unregister_debugfs(); 531 } 532 533 module_init(bng_re_mod_init); 534 module_exit(bng_re_mod_exit); 535