1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2025 Broadcom. 3 4 #include <linux/module.h> 5 #include <linux/pci.h> 6 #include <linux/auxiliary_bus.h> 7 8 #include <rdma/ib_verbs.h> 9 10 #include "bng_res.h" 11 #include "bng_sp.h" 12 #include "bng_fw.h" 13 #include "bnge.h" 14 #include "bnge_auxr.h" 15 #include "bng_re.h" 16 #include "bnge_hwrm.h" 17 18 MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@broadcom.com>"); 19 MODULE_DESCRIPTION(BNG_RE_DESC); 20 MODULE_LICENSE("Dual BSD/GPL"); 21 22 static struct bng_re_dev *bng_re_dev_add(struct auxiliary_device *adev, 23 struct bnge_auxr_dev *aux_dev) 24 { 25 struct bng_re_dev *rdev; 26 27 /* Allocate bng_re_dev instance */ 28 rdev = ib_alloc_device(bng_re_dev, ibdev); 29 if (!rdev) { 30 pr_err("%s: bng_re_dev allocation failure!", KBUILD_MODNAME); 31 return NULL; 32 } 33 34 /* Assign auxiliary device specific data */ 35 rdev->netdev = aux_dev->net; 36 rdev->aux_dev = aux_dev; 37 rdev->adev = adev; 38 rdev->fn_id = rdev->aux_dev->pdev->devfn; 39 40 return rdev; 41 } 42 43 44 static int bng_re_register_netdev(struct bng_re_dev *rdev) 45 { 46 struct bnge_auxr_dev *aux_dev; 47 48 aux_dev = rdev->aux_dev; 49 return bnge_register_dev(aux_dev, rdev->adev); 50 } 51 52 static void bng_re_destroy_chip_ctx(struct bng_re_dev *rdev) 53 { 54 struct bng_re_chip_ctx *chip_ctx; 55 56 if (!rdev->chip_ctx) 57 return; 58 59 kfree(rdev->dev_attr); 60 rdev->dev_attr = NULL; 61 62 chip_ctx = rdev->chip_ctx; 63 rdev->chip_ctx = NULL; 64 rdev->rcfw.res = NULL; 65 rdev->bng_res.cctx = NULL; 66 rdev->bng_res.pdev = NULL; 67 kfree(chip_ctx); 68 } 69 70 static int bng_re_setup_chip_ctx(struct bng_re_dev *rdev) 71 { 72 struct bng_re_chip_ctx *chip_ctx; 73 struct bnge_auxr_dev *aux_dev; 74 int rc = -ENOMEM; 75 76 aux_dev = rdev->aux_dev; 77 rdev->bng_res.pdev = aux_dev->pdev; 78 rdev->rcfw.res = &rdev->bng_res; 79 chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL); 80 if (!chip_ctx) 81 return -ENOMEM; 82 chip_ctx->chip_num = aux_dev->chip_num; 83 chip_ctx->hw_stats_size = aux_dev->hw_ring_stats_size; 84 85 rdev->chip_ctx = chip_ctx; 86 rdev->bng_res.cctx = rdev->chip_ctx; 87 rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL); 88 if (!rdev->dev_attr) 89 goto free_chip_ctx; 90 rdev->bng_res.dattr = rdev->dev_attr; 91 92 return 0; 93 free_chip_ctx: 94 kfree(rdev->chip_ctx); 95 rdev->chip_ctx = NULL; 96 return rc; 97 } 98 99 static void bng_re_init_hwrm_hdr(struct input *hdr, u16 opcd) 100 { 101 hdr->req_type = cpu_to_le16(opcd); 102 hdr->cmpl_ring = cpu_to_le16(-1); 103 hdr->target_id = cpu_to_le16(-1); 104 } 105 106 static void bng_re_fill_fw_msg(struct bnge_fw_msg *fw_msg, void *msg, 107 int msg_len, void *resp, int resp_max_len, 108 int timeout) 109 { 110 fw_msg->msg = msg; 111 fw_msg->msg_len = msg_len; 112 fw_msg->resp = resp; 113 fw_msg->resp_max_len = resp_max_len; 114 fw_msg->timeout = timeout; 115 } 116 117 static int bng_re_net_ring_free(struct bng_re_dev *rdev, 118 u16 fw_ring_id, int type) 119 { 120 struct bnge_auxr_dev *aux_dev = rdev->aux_dev; 121 struct hwrm_ring_free_input req = {}; 122 struct hwrm_ring_free_output resp; 123 struct bnge_fw_msg fw_msg = {}; 124 int rc = -EINVAL; 125 126 if (!rdev) 127 return rc; 128 129 if (!aux_dev) 130 return rc; 131 132 bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE); 133 req.ring_type = type; 134 req.ring_id = cpu_to_le16(fw_ring_id); 135 bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 136 sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT); 137 rc = bnge_send_msg(aux_dev, &fw_msg); 138 if (rc) 139 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x", 140 req.ring_id, rc); 141 return rc; 142 } 143 144 static int bng_re_net_ring_alloc(struct bng_re_dev *rdev, 145 struct bng_re_ring_attr *ring_attr, 146 u16 *fw_ring_id) 147 { 148 struct bnge_auxr_dev *aux_dev = rdev->aux_dev; 149 struct hwrm_ring_alloc_input req = {}; 150 struct hwrm_ring_alloc_output resp; 151 struct bnge_fw_msg fw_msg = {}; 152 int rc = -EINVAL; 153 154 if (!aux_dev) 155 return rc; 156 157 bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC); 158 req.enables = 0; 159 req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]); 160 if (ring_attr->pages > 1) { 161 /* Page size is in log2 units */ 162 req.page_size = BNGE_PAGE_SHIFT; 163 req.page_tbl_depth = 1; 164 } 165 req.fbo = 0; 166 /* Association of ring index with doorbell index and MSIX number */ 167 req.logical_id = cpu_to_le16(ring_attr->lrid); 168 req.length = cpu_to_le32(ring_attr->depth + 1); 169 req.ring_type = ring_attr->type; 170 req.int_mode = ring_attr->mode; 171 bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 172 sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT); 173 rc = bnge_send_msg(aux_dev, &fw_msg); 174 if (!rc) 175 *fw_ring_id = le16_to_cpu(resp.ring_id); 176 177 return rc; 178 } 179 180 static void bng_re_query_hwrm_version(struct bng_re_dev *rdev) 181 { 182 struct bnge_auxr_dev *aux_dev = rdev->aux_dev; 183 struct hwrm_ver_get_output ver_get_resp = {}; 184 struct hwrm_ver_get_input ver_get_req = {}; 185 struct bng_re_chip_ctx *cctx; 186 struct bnge_fw_msg fw_msg = {}; 187 int rc; 188 189 bng_re_init_hwrm_hdr((void *)&ver_get_req, HWRM_VER_GET); 190 ver_get_req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 191 ver_get_req.hwrm_intf_min = HWRM_VERSION_MINOR; 192 ver_get_req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 193 bng_re_fill_fw_msg(&fw_msg, (void *)&ver_get_req, sizeof(ver_get_req), 194 (void *)&ver_get_resp, sizeof(ver_get_resp), 195 BNGE_DFLT_HWRM_CMD_TIMEOUT); 196 rc = bnge_send_msg(aux_dev, &fw_msg); 197 if (rc) { 198 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x", 199 rc); 200 return; 201 } 202 203 cctx = rdev->chip_ctx; 204 cctx->hwrm_intf_ver = 205 (u64)le16_to_cpu(ver_get_resp.hwrm_intf_major) << 48 | 206 (u64)le16_to_cpu(ver_get_resp.hwrm_intf_minor) << 32 | 207 (u64)le16_to_cpu(ver_get_resp.hwrm_intf_build) << 16 | 208 le16_to_cpu(ver_get_resp.hwrm_intf_patch); 209 210 cctx->hwrm_cmd_max_timeout = le16_to_cpu(ver_get_resp.max_req_timeout); 211 212 if (!cctx->hwrm_cmd_max_timeout) 213 cctx->hwrm_cmd_max_timeout = BNG_ROCE_FW_MAX_TIMEOUT; 214 } 215 216 static void bng_re_dev_uninit(struct bng_re_dev *rdev) 217 { 218 bng_re_disable_rcfw_channel(&rdev->rcfw); 219 bng_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, 220 RING_ALLOC_REQ_RING_TYPE_NQ); 221 bng_re_free_rcfw_channel(&rdev->rcfw); 222 223 kfree(rdev->nqr); 224 rdev->nqr = NULL; 225 bng_re_destroy_chip_ctx(rdev); 226 if (test_and_clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) 227 bnge_unregister_dev(rdev->aux_dev); 228 } 229 230 static int bng_re_dev_init(struct bng_re_dev *rdev) 231 { 232 struct bng_re_ring_attr rattr = {}; 233 struct bng_re_creq_ctx *creq; 234 u32 db_offt; 235 int vid; 236 u8 type; 237 int rc; 238 239 /* Registered a new RoCE device instance to netdev */ 240 rc = bng_re_register_netdev(rdev); 241 if (rc) { 242 ibdev_err(&rdev->ibdev, 243 "Failed to register with netedev: %#x\n", rc); 244 return -EINVAL; 245 } 246 247 set_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 248 249 if (rdev->aux_dev->auxr_info->msix_requested < BNG_RE_MIN_MSIX) { 250 ibdev_err(&rdev->ibdev, 251 "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n", 252 rdev->aux_dev->auxr_info->msix_requested); 253 bnge_unregister_dev(rdev->aux_dev); 254 clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 255 return -EINVAL; 256 } 257 ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n", 258 rdev->aux_dev->auxr_info->msix_requested); 259 260 rc = bng_re_setup_chip_ctx(rdev); 261 if (rc) { 262 bnge_unregister_dev(rdev->aux_dev); 263 clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 264 ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); 265 return -EINVAL; 266 } 267 268 bng_re_query_hwrm_version(rdev); 269 270 rc = bng_re_alloc_fw_channel(&rdev->bng_res, &rdev->rcfw); 271 if (rc) { 272 ibdev_err(&rdev->ibdev, 273 "Failed to allocate RCFW Channel: %#x\n", rc); 274 goto fail; 275 } 276 277 /* Allocate nq record memory */ 278 rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL); 279 if (!rdev->nqr) { 280 bng_re_destroy_chip_ctx(rdev); 281 bnge_unregister_dev(rdev->aux_dev); 282 clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 283 return -ENOMEM; 284 } 285 286 rdev->nqr->num_msix = rdev->aux_dev->auxr_info->msix_requested; 287 memcpy(rdev->nqr->msix_entries, rdev->aux_dev->msix_info, 288 sizeof(struct bnge_msix_info) * rdev->nqr->num_msix); 289 290 type = RING_ALLOC_REQ_RING_TYPE_NQ; 291 creq = &rdev->rcfw.creq; 292 rattr.dma_arr = creq->hwq.pbl[BNG_PBL_LVL_0].pg_map_arr; 293 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; 294 rattr.type = type; 295 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; 296 rattr.depth = BNG_FW_CREQE_MAX_CNT - 1; 297 rattr.lrid = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].ring_idx; 298 rc = bng_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); 299 if (rc) { 300 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); 301 goto free_rcfw; 302 } 303 db_offt = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].db_offset; 304 vid = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].vector; 305 306 rc = bng_re_enable_fw_channel(&rdev->rcfw, 307 vid, db_offt); 308 if (rc) { 309 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n", 310 rc); 311 goto free_ring; 312 } 313 314 rc = bng_re_get_dev_attr(&rdev->rcfw); 315 if (rc) 316 goto disable_rcfw; 317 return 0; 318 disable_rcfw: 319 bng_re_disable_rcfw_channel(&rdev->rcfw); 320 free_ring: 321 bng_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); 322 free_rcfw: 323 bng_re_free_rcfw_channel(&rdev->rcfw); 324 fail: 325 bng_re_dev_uninit(rdev); 326 return rc; 327 } 328 329 static int bng_re_add_device(struct auxiliary_device *adev) 330 { 331 struct bnge_auxr_priv *auxr_priv = 332 container_of(adev, struct bnge_auxr_priv, aux_dev); 333 struct bng_re_en_dev_info *dev_info; 334 struct bng_re_dev *rdev; 335 int rc; 336 337 dev_info = auxiliary_get_drvdata(adev); 338 339 rdev = bng_re_dev_add(adev, auxr_priv->auxr_dev); 340 if (!rdev) { 341 rc = -ENOMEM; 342 goto exit; 343 } 344 345 dev_info->rdev = rdev; 346 347 rc = bng_re_dev_init(rdev); 348 if (rc) 349 goto re_dev_dealloc; 350 351 return 0; 352 353 re_dev_dealloc: 354 ib_dealloc_device(&rdev->ibdev); 355 exit: 356 return rc; 357 } 358 359 360 static void bng_re_remove_device(struct bng_re_dev *rdev, 361 struct auxiliary_device *aux_dev) 362 { 363 bng_re_dev_uninit(rdev); 364 ib_dealloc_device(&rdev->ibdev); 365 } 366 367 368 static int bng_re_probe(struct auxiliary_device *adev, 369 const struct auxiliary_device_id *id) 370 { 371 struct bnge_auxr_priv *aux_priv = 372 container_of(adev, struct bnge_auxr_priv, aux_dev); 373 struct bng_re_en_dev_info *en_info; 374 int rc; 375 376 en_info = kzalloc(sizeof(*en_info), GFP_KERNEL); 377 if (!en_info) 378 return -ENOMEM; 379 380 en_info->auxr_dev = aux_priv->auxr_dev; 381 382 auxiliary_set_drvdata(adev, en_info); 383 384 rc = bng_re_add_device(adev); 385 if (rc) 386 kfree(en_info); 387 388 return rc; 389 } 390 391 static void bng_re_remove(struct auxiliary_device *adev) 392 { 393 struct bng_re_en_dev_info *dev_info = auxiliary_get_drvdata(adev); 394 struct bng_re_dev *rdev; 395 396 rdev = dev_info->rdev; 397 398 if (rdev) 399 bng_re_remove_device(rdev, adev); 400 kfree(dev_info); 401 } 402 403 static const struct auxiliary_device_id bng_re_id_table[] = { 404 { .name = BNG_RE_ADEV_NAME ".rdma", }, 405 {}, 406 }; 407 408 MODULE_DEVICE_TABLE(auxiliary, bng_re_id_table); 409 410 static struct auxiliary_driver bng_re_driver = { 411 .name = "rdma", 412 .probe = bng_re_probe, 413 .remove = bng_re_remove, 414 .id_table = bng_re_id_table, 415 }; 416 417 static int __init bng_re_mod_init(void) 418 { 419 int rc; 420 421 422 rc = auxiliary_driver_register(&bng_re_driver); 423 if (rc) { 424 pr_err("%s: Failed to register auxiliary driver\n", 425 KBUILD_MODNAME); 426 } 427 return rc; 428 } 429 430 static void __exit bng_re_mod_exit(void) 431 { 432 auxiliary_driver_unregister(&bng_re_driver); 433 } 434 435 module_init(bng_re_mod_init); 436 module_exit(bng_re_mod_exit); 437