1 /* QLogic qedr NIC Driver 2 * Copyright (c) 2015-2016 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <rdma/ib_verbs.h> 34 #include <rdma/ib_addr.h> 35 #include <rdma/ib_user_verbs.h> 36 #include <rdma/iw_cm.h> 37 #include <rdma/ib_mad.h> 38 #include <linux/netdevice.h> 39 #include <linux/iommu.h> 40 #include <linux/pci.h> 41 #include <net/addrconf.h> 42 43 #include <linux/qed/qed_chain.h> 44 #include <linux/qed/qed_if.h> 45 #include "qedr.h" 46 #include "verbs.h" 47 #include <rdma/qedr-abi.h> 48 #include "qedr_iw_cm.h" 49 50 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver"); 51 MODULE_AUTHOR("QLogic Corporation"); 52 MODULE_LICENSE("Dual BSD/GPL"); 53 54 #define QEDR_WQ_MULTIPLIER_DFT (3) 55 56 static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num, 57 enum ib_event_type type) 58 { 59 struct ib_event ibev; 60 61 ibev.device = &dev->ibdev; 62 ibev.element.port_num = port_num; 63 ibev.event = type; 64 65 ib_dispatch_event(&ibev); 66 } 67 68 static enum rdma_link_layer qedr_link_layer(struct ib_device *device, 69 u32 port_num) 70 { 71 return IB_LINK_LAYER_ETHERNET; 72 } 73 74 static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str) 75 { 76 struct qedr_dev *qedr = get_qedr_dev(ibdev); 77 u32 fw_ver = (u32)qedr->attr.fw_ver; 78 79 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d", 80 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF, 81 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); 82 } 83 84 static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num, 85 struct ib_port_immutable *immutable) 86 { 87 struct ib_port_attr attr; 88 int err; 89 90 err = qedr_query_port(ibdev, port_num, &attr); 91 if (err) 92 return err; 93 94 immutable->pkey_tbl_len = attr.pkey_tbl_len; 95 immutable->gid_tbl_len = attr.gid_tbl_len; 96 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | 97 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 98 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 99 100 return 0; 101 } 102 103 static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num, 104 struct ib_port_immutable *immutable) 105 { 106 struct ib_port_attr attr; 107 int err; 108 109 err = qedr_query_port(ibdev, port_num, &attr); 110 if (err) 111 return err; 112 113 immutable->gid_tbl_len = 1; 114 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 115 immutable->max_mad_size = 0; 116 117 return 0; 118 } 119 120 /* QEDR sysfs interface */ 121 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, 122 char *buf) 123 { 124 struct qedr_dev *dev = 125 rdma_device_to_drv_device(device, struct qedr_dev, ibdev); 126 127 return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver); 128 } 129 static DEVICE_ATTR_RO(hw_rev); 130 131 static ssize_t hca_type_show(struct device *device, 132 struct device_attribute *attr, char *buf) 133 { 134 struct qedr_dev *dev = 135 rdma_device_to_drv_device(device, struct qedr_dev, ibdev); 136 137 return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device, 138 rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" : 139 "RoCE"); 140 } 141 static DEVICE_ATTR_RO(hca_type); 142 143 static struct attribute *qedr_attributes[] = { 144 &dev_attr_hw_rev.attr, 145 &dev_attr_hca_type.attr, 146 NULL 147 }; 148 149 static const struct attribute_group qedr_attr_group = { 150 .attrs = qedr_attributes, 151 }; 152 153 static const struct ib_device_ops qedr_iw_dev_ops = { 154 .get_port_immutable = qedr_iw_port_immutable, 155 .iw_accept = qedr_iw_accept, 156 .iw_add_ref = qedr_iw_qp_add_ref, 157 .iw_connect = qedr_iw_connect, 158 .iw_create_listen = qedr_iw_create_listen, 159 .iw_destroy_listen = qedr_iw_destroy_listen, 160 .iw_get_qp = qedr_iw_get_qp, 161 .iw_reject = qedr_iw_reject, 162 .iw_rem_ref = qedr_iw_qp_rem_ref, 163 .query_gid = qedr_iw_query_gid, 164 }; 165 166 static int qedr_iw_register_device(struct qedr_dev *dev) 167 { 168 dev->ibdev.node_type = RDMA_NODE_RNIC; 169 170 ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops); 171 172 memcpy(dev->ibdev.iw_ifname, 173 dev->ndev->name, sizeof(dev->ibdev.iw_ifname)); 174 175 return 0; 176 } 177 178 static const struct ib_device_ops qedr_roce_dev_ops = { 179 .alloc_xrcd = qedr_alloc_xrcd, 180 .dealloc_xrcd = qedr_dealloc_xrcd, 181 .get_port_immutable = qedr_roce_port_immutable, 182 .query_pkey = qedr_query_pkey, 183 }; 184 185 static void qedr_roce_register_device(struct qedr_dev *dev) 186 { 187 dev->ibdev.node_type = RDMA_NODE_IB_CA; 188 189 ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops); 190 } 191 192 static const struct ib_device_ops qedr_dev_ops = { 193 .owner = THIS_MODULE, 194 .driver_id = RDMA_DRIVER_QEDR, 195 .uverbs_abi_ver = QEDR_ABI_VERSION, 196 197 .alloc_mr = qedr_alloc_mr, 198 .alloc_pd = qedr_alloc_pd, 199 .alloc_ucontext = qedr_alloc_ucontext, 200 .create_ah = qedr_create_ah, 201 .create_cq = qedr_create_cq, 202 .create_qp = qedr_create_qp, 203 .create_srq = qedr_create_srq, 204 .dealloc_pd = qedr_dealloc_pd, 205 .dealloc_ucontext = qedr_dealloc_ucontext, 206 .dereg_mr = qedr_dereg_mr, 207 .destroy_ah = qedr_destroy_ah, 208 .destroy_cq = qedr_destroy_cq, 209 .destroy_qp = qedr_destroy_qp, 210 .destroy_srq = qedr_destroy_srq, 211 .device_group = &qedr_attr_group, 212 .get_dev_fw_str = qedr_get_dev_fw_str, 213 .get_dma_mr = qedr_get_dma_mr, 214 .get_link_layer = qedr_link_layer, 215 .map_mr_sg = qedr_map_mr_sg, 216 .mmap = qedr_mmap, 217 .mmap_free = qedr_mmap_free, 218 .modify_qp = qedr_modify_qp, 219 .modify_srq = qedr_modify_srq, 220 .poll_cq = qedr_poll_cq, 221 .post_recv = qedr_post_recv, 222 .post_send = qedr_post_send, 223 .post_srq_recv = qedr_post_srq_recv, 224 .process_mad = qedr_process_mad, 225 .query_device = qedr_query_device, 226 .query_port = qedr_query_port, 227 .query_qp = qedr_query_qp, 228 .query_srq = qedr_query_srq, 229 .reg_user_mr = qedr_reg_user_mr, 230 .req_notify_cq = qedr_arm_cq, 231 .resize_cq = qedr_resize_cq, 232 233 INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah), 234 INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq), 235 INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd), 236 INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq), 237 INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd), 238 INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext), 239 }; 240 241 static int qedr_register_device(struct qedr_dev *dev) 242 { 243 int rc; 244 245 dev->ibdev.node_guid = dev->attr.node_guid; 246 memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC)); 247 248 if (IS_IWARP(dev)) { 249 rc = qedr_iw_register_device(dev); 250 if (rc) 251 return rc; 252 } else { 253 qedr_roce_register_device(dev); 254 } 255 256 dev->ibdev.phys_port_cnt = 1; 257 dev->ibdev.num_comp_vectors = dev->num_cnq; 258 dev->ibdev.dev.parent = &dev->pdev->dev; 259 260 ib_set_device_ops(&dev->ibdev, &qedr_dev_ops); 261 262 rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1); 263 if (rc) 264 return rc; 265 266 dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX); 267 return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev); 268 } 269 270 /* This function allocates fast-path status block memory */ 271 static int qedr_alloc_mem_sb(struct qedr_dev *dev, 272 struct qed_sb_info *sb_info, u16 sb_id) 273 { 274 struct status_block_e4 *sb_virt; 275 dma_addr_t sb_phys; 276 int rc; 277 278 sb_virt = dma_alloc_coherent(&dev->pdev->dev, 279 sizeof(*sb_virt), &sb_phys, GFP_KERNEL); 280 if (!sb_virt) 281 return -ENOMEM; 282 283 rc = dev->ops->common->sb_init(dev->cdev, sb_info, 284 sb_virt, sb_phys, sb_id, 285 QED_SB_TYPE_CNQ); 286 if (rc) { 287 pr_err("Status block initialization failed\n"); 288 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt), 289 sb_virt, sb_phys); 290 return rc; 291 } 292 293 return 0; 294 } 295 296 static void qedr_free_mem_sb(struct qedr_dev *dev, 297 struct qed_sb_info *sb_info, int sb_id) 298 { 299 if (sb_info->sb_virt) { 300 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id, 301 QED_SB_TYPE_CNQ); 302 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt), 303 (void *)sb_info->sb_virt, sb_info->sb_phys); 304 } 305 } 306 307 static void qedr_free_resources(struct qedr_dev *dev) 308 { 309 int i; 310 311 if (IS_IWARP(dev)) 312 destroy_workqueue(dev->iwarp_wq); 313 314 for (i = 0; i < dev->num_cnq; i++) { 315 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); 316 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); 317 } 318 319 kfree(dev->cnq_array); 320 kfree(dev->sb_array); 321 kfree(dev->sgid_tbl); 322 } 323 324 static int qedr_alloc_resources(struct qedr_dev *dev) 325 { 326 struct qed_chain_init_params params = { 327 .mode = QED_CHAIN_MODE_PBL, 328 .intended_use = QED_CHAIN_USE_TO_CONSUME, 329 .cnt_type = QED_CHAIN_CNT_TYPE_U16, 330 .elem_size = sizeof(struct regpair *), 331 }; 332 struct qedr_cnq *cnq; 333 __le16 *cons_pi; 334 int i, rc; 335 336 dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid), 337 GFP_KERNEL); 338 if (!dev->sgid_tbl) 339 return -ENOMEM; 340 341 spin_lock_init(&dev->sgid_lock); 342 xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ); 343 344 if (IS_IWARP(dev)) { 345 xa_init(&dev->qps); 346 dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); 347 } 348 349 /* Allocate Status blocks for CNQ */ 350 dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array), 351 GFP_KERNEL); 352 if (!dev->sb_array) { 353 rc = -ENOMEM; 354 goto err1; 355 } 356 357 dev->cnq_array = kcalloc(dev->num_cnq, 358 sizeof(*dev->cnq_array), GFP_KERNEL); 359 if (!dev->cnq_array) { 360 rc = -ENOMEM; 361 goto err2; 362 } 363 364 dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); 365 366 /* Allocate CNQ PBLs */ 367 params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, 368 QEDR_ROCE_MAX_CNQ_SIZE); 369 370 for (i = 0; i < dev->num_cnq; i++) { 371 cnq = &dev->cnq_array[i]; 372 373 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i], 374 dev->sb_start + i); 375 if (rc) 376 goto err3; 377 378 rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl, 379 ¶ms); 380 if (rc) 381 goto err4; 382 383 cnq->dev = dev; 384 cnq->sb = &dev->sb_array[i]; 385 cons_pi = dev->sb_array[i].sb_virt->pi_array; 386 cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX]; 387 cnq->index = i; 388 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev)); 389 390 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n", 391 i, qed_chain_get_cons_idx(&cnq->pbl)); 392 } 393 394 return 0; 395 err4: 396 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); 397 err3: 398 for (--i; i >= 0; i--) { 399 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl); 400 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i); 401 } 402 kfree(dev->cnq_array); 403 err2: 404 kfree(dev->sb_array); 405 err1: 406 kfree(dev->sgid_tbl); 407 return rc; 408 } 409 410 static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) 411 { 412 int rc = pci_enable_atomic_ops_to_root(pdev, 413 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 414 415 if (rc) { 416 dev->atomic_cap = IB_ATOMIC_NONE; 417 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n"); 418 } else { 419 dev->atomic_cap = IB_ATOMIC_GLOB; 420 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n"); 421 } 422 } 423 424 static const struct qed_rdma_ops *qed_ops; 425 426 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 427 428 static irqreturn_t qedr_irq_handler(int irq, void *handle) 429 { 430 u16 hw_comp_cons, sw_comp_cons; 431 struct qedr_cnq *cnq = handle; 432 struct regpair *cq_handle; 433 struct qedr_cq *cq; 434 435 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); 436 437 qed_sb_update_sb_idx(cnq->sb); 438 439 hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr); 440 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); 441 442 /* Align protocol-index and chain reads */ 443 rmb(); 444 445 while (sw_comp_cons != hw_comp_cons) { 446 cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl); 447 cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi, 448 cq_handle->lo); 449 450 if (cq == NULL) { 451 DP_ERR(cnq->dev, 452 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n", 453 cq_handle->hi, cq_handle->lo, sw_comp_cons, 454 hw_comp_cons); 455 456 break; 457 } 458 459 if (cq->sig != QEDR_CQ_MAGIC_NUMBER) { 460 DP_ERR(cnq->dev, 461 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n", 462 cq_handle->hi, cq_handle->lo, cq); 463 break; 464 } 465 466 cq->arm_flags = 0; 467 468 if (!cq->destroyed && cq->ibcq.comp_handler) 469 (*cq->ibcq.comp_handler) 470 (&cq->ibcq, cq->ibcq.cq_context); 471 472 /* The CQ's CNQ notification counter is checked before 473 * destroying the CQ in a busy-wait loop that waits for all of 474 * the CQ's CNQ interrupts to be processed. It is increased 475 * here, only after the completion handler, to ensure that the 476 * the handler is not running when the CQ is destroyed. 477 */ 478 cq->cnq_notif++; 479 480 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); 481 482 cnq->n_comp++; 483 } 484 485 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, 486 sw_comp_cons); 487 488 qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1); 489 490 return IRQ_HANDLED; 491 } 492 493 static void qedr_sync_free_irqs(struct qedr_dev *dev) 494 { 495 u32 vector; 496 u16 idx; 497 int i; 498 499 for (i = 0; i < dev->int_info.used_cnt; i++) { 500 if (dev->int_info.msix_cnt) { 501 idx = i * dev->num_hwfns + dev->affin_hwfn_idx; 502 vector = dev->int_info.msix[idx].vector; 503 synchronize_irq(vector); 504 free_irq(vector, &dev->cnq_array[i]); 505 } 506 } 507 508 dev->int_info.used_cnt = 0; 509 } 510 511 static int qedr_req_msix_irqs(struct qedr_dev *dev) 512 { 513 int i, rc = 0; 514 u16 idx; 515 516 if (dev->num_cnq > dev->int_info.msix_cnt) { 517 DP_ERR(dev, 518 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n", 519 dev->num_cnq, dev->int_info.msix_cnt); 520 return -EINVAL; 521 } 522 523 for (i = 0; i < dev->num_cnq; i++) { 524 idx = i * dev->num_hwfns + dev->affin_hwfn_idx; 525 rc = request_irq(dev->int_info.msix[idx].vector, 526 qedr_irq_handler, 0, dev->cnq_array[i].name, 527 &dev->cnq_array[i]); 528 if (rc) { 529 DP_ERR(dev, "Request cnq %d irq failed\n", i); 530 qedr_sync_free_irqs(dev); 531 } else { 532 DP_DEBUG(dev, QEDR_MSG_INIT, 533 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n", 534 dev->cnq_array[i].name, i, 535 &dev->cnq_array[i]); 536 dev->int_info.used_cnt++; 537 } 538 } 539 540 return rc; 541 } 542 543 static int qedr_setup_irqs(struct qedr_dev *dev) 544 { 545 int rc; 546 547 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n"); 548 549 /* Learn Interrupt configuration */ 550 rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq); 551 if (rc < 0) 552 return rc; 553 554 rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info); 555 if (rc) { 556 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n"); 557 return rc; 558 } 559 560 if (dev->int_info.msix_cnt) { 561 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n", 562 dev->int_info.msix_cnt); 563 rc = qedr_req_msix_irqs(dev); 564 if (rc) 565 return rc; 566 } 567 568 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n"); 569 570 return 0; 571 } 572 573 static int qedr_set_device_attr(struct qedr_dev *dev) 574 { 575 struct qed_rdma_device *qed_attr; 576 struct qedr_device_attr *attr; 577 u32 page_size; 578 579 /* Part 1 - query core capabilities */ 580 qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); 581 582 /* Part 2 - check capabilities */ 583 page_size = ~qed_attr->page_size_caps + 1; 584 if (page_size > PAGE_SIZE) { 585 DP_ERR(dev, 586 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", 587 PAGE_SIZE, page_size); 588 return -ENODEV; 589 } 590 591 /* Part 3 - copy and update capabilities */ 592 attr = &dev->attr; 593 attr->vendor_id = qed_attr->vendor_id; 594 attr->vendor_part_id = qed_attr->vendor_part_id; 595 attr->hw_ver = qed_attr->hw_ver; 596 attr->fw_ver = qed_attr->fw_ver; 597 attr->node_guid = qed_attr->node_guid; 598 attr->sys_image_guid = qed_attr->sys_image_guid; 599 attr->max_cnq = qed_attr->max_cnq; 600 attr->max_sge = qed_attr->max_sge; 601 attr->max_inline = qed_attr->max_inline; 602 attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE); 603 attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE); 604 attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc; 605 attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc; 606 attr->max_dev_resp_rd_atomic_resc = 607 qed_attr->max_dev_resp_rd_atomic_resc; 608 attr->max_cq = qed_attr->max_cq; 609 attr->max_qp = qed_attr->max_qp; 610 attr->max_mr = qed_attr->max_mr; 611 attr->max_mr_size = qed_attr->max_mr_size; 612 attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES); 613 attr->max_mw = qed_attr->max_mw; 614 attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl; 615 attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size; 616 attr->max_pd = qed_attr->max_pd; 617 attr->max_ah = qed_attr->max_ah; 618 attr->max_pkey = qed_attr->max_pkey; 619 attr->max_srq = qed_attr->max_srq; 620 attr->max_srq_wr = qed_attr->max_srq_wr; 621 attr->dev_caps = qed_attr->dev_caps; 622 attr->page_size_caps = qed_attr->page_size_caps; 623 attr->dev_ack_delay = qed_attr->dev_ack_delay; 624 attr->reserved_lkey = qed_attr->reserved_lkey; 625 attr->bad_pkey_counter = qed_attr->bad_pkey_counter; 626 attr->max_stats_queues = qed_attr->max_stats_queues; 627 628 return 0; 629 } 630 631 static void qedr_unaffiliated_event(void *context, u8 event_code) 632 { 633 pr_err("unaffiliated event not implemented yet\n"); 634 } 635 636 static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle) 637 { 638 #define EVENT_TYPE_NOT_DEFINED 0 639 #define EVENT_TYPE_CQ 1 640 #define EVENT_TYPE_QP 2 641 #define EVENT_TYPE_SRQ 3 642 struct qedr_dev *dev = (struct qedr_dev *)context; 643 struct regpair *async_handle = (struct regpair *)fw_handle; 644 u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo; 645 u8 event_type = EVENT_TYPE_NOT_DEFINED; 646 struct ib_event event; 647 struct ib_srq *ibsrq; 648 struct qedr_srq *srq; 649 unsigned long flags; 650 struct ib_cq *ibcq; 651 struct ib_qp *ibqp; 652 struct qedr_cq *cq; 653 struct qedr_qp *qp; 654 u16 srq_id; 655 656 if (IS_ROCE(dev)) { 657 switch (e_code) { 658 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR: 659 event.event = IB_EVENT_CQ_ERR; 660 event_type = EVENT_TYPE_CQ; 661 break; 662 case ROCE_ASYNC_EVENT_SQ_DRAINED: 663 event.event = IB_EVENT_SQ_DRAINED; 664 event_type = EVENT_TYPE_QP; 665 break; 666 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR: 667 event.event = IB_EVENT_QP_FATAL; 668 event_type = EVENT_TYPE_QP; 669 break; 670 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR: 671 event.event = IB_EVENT_QP_REQ_ERR; 672 event_type = EVENT_TYPE_QP; 673 break; 674 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR: 675 event.event = IB_EVENT_QP_ACCESS_ERR; 676 event_type = EVENT_TYPE_QP; 677 break; 678 case ROCE_ASYNC_EVENT_SRQ_LIMIT: 679 event.event = IB_EVENT_SRQ_LIMIT_REACHED; 680 event_type = EVENT_TYPE_SRQ; 681 break; 682 case ROCE_ASYNC_EVENT_SRQ_EMPTY: 683 event.event = IB_EVENT_SRQ_ERR; 684 event_type = EVENT_TYPE_SRQ; 685 break; 686 case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR: 687 event.event = IB_EVENT_QP_ACCESS_ERR; 688 event_type = EVENT_TYPE_QP; 689 break; 690 case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR: 691 event.event = IB_EVENT_QP_ACCESS_ERR; 692 event_type = EVENT_TYPE_QP; 693 break; 694 case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR: 695 event.event = IB_EVENT_CQ_ERR; 696 event_type = EVENT_TYPE_CQ; 697 break; 698 default: 699 DP_ERR(dev, "unsupported event %d on handle=%llx\n", 700 e_code, roce_handle64); 701 } 702 } else { 703 switch (e_code) { 704 case QED_IWARP_EVENT_SRQ_LIMIT: 705 event.event = IB_EVENT_SRQ_LIMIT_REACHED; 706 event_type = EVENT_TYPE_SRQ; 707 break; 708 case QED_IWARP_EVENT_SRQ_EMPTY: 709 event.event = IB_EVENT_SRQ_ERR; 710 event_type = EVENT_TYPE_SRQ; 711 break; 712 default: 713 DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code, 714 roce_handle64); 715 } 716 } 717 switch (event_type) { 718 case EVENT_TYPE_CQ: 719 cq = (struct qedr_cq *)(uintptr_t)roce_handle64; 720 if (cq) { 721 ibcq = &cq->ibcq; 722 if (ibcq->event_handler) { 723 event.device = ibcq->device; 724 event.element.cq = ibcq; 725 ibcq->event_handler(&event, ibcq->cq_context); 726 } 727 } else { 728 WARN(1, 729 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n", 730 roce_handle64); 731 } 732 DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq); 733 break; 734 case EVENT_TYPE_QP: 735 qp = (struct qedr_qp *)(uintptr_t)roce_handle64; 736 if (qp) { 737 ibqp = &qp->ibqp; 738 if (ibqp->event_handler) { 739 event.device = ibqp->device; 740 event.element.qp = ibqp; 741 ibqp->event_handler(&event, ibqp->qp_context); 742 } 743 } else { 744 WARN(1, 745 "Error: QP event with NULL pointer ibqp. Handle=%llx\n", 746 roce_handle64); 747 } 748 DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp); 749 break; 750 case EVENT_TYPE_SRQ: 751 srq_id = (u16)roce_handle64; 752 xa_lock_irqsave(&dev->srqs, flags); 753 srq = xa_load(&dev->srqs, srq_id); 754 if (srq) { 755 ibsrq = &srq->ibsrq; 756 if (ibsrq->event_handler) { 757 event.device = ibsrq->device; 758 event.element.srq = ibsrq; 759 ibsrq->event_handler(&event, 760 ibsrq->srq_context); 761 } 762 } else { 763 DP_NOTICE(dev, 764 "SRQ event with NULL pointer ibsrq. Handle=%llx\n", 765 roce_handle64); 766 } 767 xa_unlock_irqrestore(&dev->srqs, flags); 768 DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq); 769 break; 770 default: 771 break; 772 } 773 } 774 775 static int qedr_init_hw(struct qedr_dev *dev) 776 { 777 struct qed_rdma_add_user_out_params out_params; 778 struct qed_rdma_start_in_params *in_params; 779 struct qed_rdma_cnq_params *cur_pbl; 780 struct qed_rdma_events events; 781 dma_addr_t p_phys_table; 782 u32 page_cnt; 783 int rc = 0; 784 int i; 785 786 in_params = kzalloc(sizeof(*in_params), GFP_KERNEL); 787 if (!in_params) { 788 rc = -ENOMEM; 789 goto out; 790 } 791 792 in_params->desired_cnq = dev->num_cnq; 793 for (i = 0; i < dev->num_cnq; i++) { 794 cur_pbl = &in_params->cnq_pbl_list[i]; 795 796 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl); 797 cur_pbl->num_pbl_pages = page_cnt; 798 799 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl); 800 cur_pbl->pbl_ptr = (u64)p_phys_table; 801 } 802 803 events.affiliated_event = qedr_affiliated_event; 804 events.unaffiliated_event = qedr_unaffiliated_event; 805 events.context = dev; 806 807 in_params->events = &events; 808 in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS; 809 in_params->max_mtu = dev->ndev->mtu; 810 dev->iwarp_max_mtu = dev->ndev->mtu; 811 ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr); 812 813 rc = dev->ops->rdma_init(dev->cdev, in_params); 814 if (rc) 815 goto out; 816 817 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params); 818 if (rc) 819 goto out; 820 821 dev->db_addr = out_params.dpi_addr; 822 dev->db_phys_addr = out_params.dpi_phys_addr; 823 dev->db_size = out_params.dpi_size; 824 dev->dpi = out_params.dpi; 825 826 rc = qedr_set_device_attr(dev); 827 out: 828 kfree(in_params); 829 if (rc) 830 DP_ERR(dev, "Init HW Failed rc = %d\n", rc); 831 832 return rc; 833 } 834 835 static void qedr_stop_hw(struct qedr_dev *dev) 836 { 837 dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi); 838 dev->ops->rdma_stop(dev->rdma_ctx); 839 } 840 841 static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, 842 struct net_device *ndev) 843 { 844 struct qed_dev_rdma_info dev_info; 845 struct qedr_dev *dev; 846 int rc = 0; 847 848 dev = ib_alloc_device(qedr_dev, ibdev); 849 if (!dev) { 850 pr_err("Unable to allocate ib device\n"); 851 return NULL; 852 } 853 854 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n"); 855 856 dev->pdev = pdev; 857 dev->ndev = ndev; 858 dev->cdev = cdev; 859 860 qed_ops = qed_get_rdma_ops(); 861 if (!qed_ops) { 862 DP_ERR(dev, "Failed to get qed roce operations\n"); 863 goto init_err; 864 } 865 866 dev->ops = qed_ops; 867 rc = qed_ops->fill_dev_info(cdev, &dev_info); 868 if (rc) 869 goto init_err; 870 871 dev->user_dpm_enabled = dev_info.user_dpm_enabled; 872 dev->rdma_type = dev_info.rdma_type; 873 dev->num_hwfns = dev_info.common.num_hwfns; 874 875 if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) { 876 rc = dev->ops->iwarp_set_engine_affin(cdev, false); 877 if (rc) { 878 DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n"); 879 goto init_err; 880 } 881 } 882 dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev); 883 884 dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); 885 886 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); 887 if (!dev->num_cnq) { 888 DP_ERR(dev, "Failed. At least one CNQ is required.\n"); 889 rc = -ENOMEM; 890 goto init_err; 891 } 892 893 dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; 894 895 qedr_pci_set_atomic(dev, pdev); 896 897 rc = qedr_alloc_resources(dev); 898 if (rc) 899 goto init_err; 900 901 rc = qedr_init_hw(dev); 902 if (rc) 903 goto alloc_err; 904 905 rc = qedr_setup_irqs(dev); 906 if (rc) 907 goto irq_err; 908 909 rc = qedr_register_device(dev); 910 if (rc) { 911 DP_ERR(dev, "Unable to allocate register device\n"); 912 goto reg_err; 913 } 914 915 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) 916 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); 917 918 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); 919 return dev; 920 921 reg_err: 922 qedr_sync_free_irqs(dev); 923 irq_err: 924 qedr_stop_hw(dev); 925 alloc_err: 926 qedr_free_resources(dev); 927 init_err: 928 ib_dealloc_device(&dev->ibdev); 929 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc); 930 931 return NULL; 932 } 933 934 static void qedr_remove(struct qedr_dev *dev) 935 { 936 /* First unregister with stack to stop all the active traffic 937 * of the registered clients. 938 */ 939 ib_unregister_device(&dev->ibdev); 940 941 qedr_stop_hw(dev); 942 qedr_sync_free_irqs(dev); 943 qedr_free_resources(dev); 944 945 if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) 946 dev->ops->iwarp_set_engine_affin(dev->cdev, true); 947 948 ib_dealloc_device(&dev->ibdev); 949 } 950 951 static void qedr_close(struct qedr_dev *dev) 952 { 953 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) 954 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); 955 } 956 957 static void qedr_shutdown(struct qedr_dev *dev) 958 { 959 qedr_close(dev); 960 qedr_remove(dev); 961 } 962 963 static void qedr_open(struct qedr_dev *dev) 964 { 965 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) 966 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); 967 } 968 969 static void qedr_mac_address_change(struct qedr_dev *dev) 970 { 971 union ib_gid *sgid = &dev->sgid_tbl[0]; 972 u8 guid[8], mac_addr[6]; 973 int rc; 974 975 /* Update SGID */ 976 ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr); 977 guid[0] = mac_addr[0] ^ 2; 978 guid[1] = mac_addr[1]; 979 guid[2] = mac_addr[2]; 980 guid[3] = 0xff; 981 guid[4] = 0xfe; 982 guid[5] = mac_addr[3]; 983 guid[6] = mac_addr[4]; 984 guid[7] = mac_addr[5]; 985 sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 986 memcpy(&sgid->raw[8], guid, sizeof(guid)); 987 988 /* Update LL2 */ 989 rc = dev->ops->ll2_set_mac_filter(dev->cdev, 990 dev->gsi_ll2_mac_address, 991 dev->ndev->dev_addr); 992 993 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); 994 995 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); 996 997 if (rc) 998 DP_ERR(dev, "Error updating mac filter\n"); 999 } 1000 1001 /* event handling via NIC driver ensures that all the NIC specific 1002 * initialization done before RoCE driver notifies 1003 * event to stack. 1004 */ 1005 static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) 1006 { 1007 switch (event) { 1008 case QEDE_UP: 1009 qedr_open(dev); 1010 break; 1011 case QEDE_DOWN: 1012 qedr_close(dev); 1013 break; 1014 case QEDE_CLOSE: 1015 qedr_shutdown(dev); 1016 break; 1017 case QEDE_CHANGE_ADDR: 1018 qedr_mac_address_change(dev); 1019 break; 1020 case QEDE_CHANGE_MTU: 1021 if (rdma_protocol_iwarp(&dev->ibdev, 1)) 1022 if (dev->ndev->mtu != dev->iwarp_max_mtu) 1023 DP_NOTICE(dev, 1024 "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n", 1025 dev->iwarp_max_mtu, dev->ndev->mtu); 1026 break; 1027 default: 1028 pr_err("Event not supported\n"); 1029 } 1030 } 1031 1032 static struct qedr_driver qedr_drv = { 1033 .name = "qedr_driver", 1034 .add = qedr_add, 1035 .remove = qedr_remove, 1036 .notify = qedr_notify, 1037 }; 1038 1039 static int __init qedr_init_module(void) 1040 { 1041 return qede_rdma_register_driver(&qedr_drv); 1042 } 1043 1044 static void __exit qedr_exit_module(void) 1045 { 1046 qede_rdma_unregister_driver(&qedr_drv); 1047 } 1048 1049 module_init(qedr_init_module); 1050 module_exit(qedr_exit_module); 1051