1 /* 2 * Copyright (c) 2018-2019 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: qlnxr_os.c 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "qlnxr_def.h" 35 36 SYSCTL_NODE(_dev, OID_AUTO, qnxr, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 37 "Qlogic RDMA module"); 38 39 uint32_t delayed_ack = 0; 40 SYSCTL_UINT(_dev_qnxr, OID_AUTO, delayed_ack, CTLFLAG_RW, &delayed_ack, 1, 41 "iWARP: Delayed Ack: 0 - Disabled 1 - Enabled. Default: Disabled"); 42 43 uint32_t timestamp = 1; 44 SYSCTL_UINT(_dev_qnxr, OID_AUTO, timestamp, CTLFLAG_RW, ×tamp, 1, 45 "iWARP: Timestamp: 0 - Disabled 1 - Enabled. Default:Enabled"); 46 47 uint32_t rcv_wnd_size = 0; 48 SYSCTL_UINT(_dev_qnxr, OID_AUTO, rcv_wnd_size, CTLFLAG_RW, &rcv_wnd_size, 1, 49 "iWARP: Receive Window Size in K. Default 1M"); 50 51 uint32_t crc_needed = 1; 52 SYSCTL_UINT(_dev_qnxr, OID_AUTO, crc_needed, CTLFLAG_RW, &crc_needed, 1, 53 "iWARP: CRC needed 0 - Disabled 1 - Enabled. Default:Enabled"); 54 55 uint32_t peer2peer = 1; 56 SYSCTL_UINT(_dev_qnxr, OID_AUTO, peer2peer, CTLFLAG_RW, &peer2peer, 1, 57 "iWARP: Support peer2peer ULPs 0 - Disabled 1 - Enabled. Default:Enabled"); 58 59 uint32_t mpa_enhanced = 1; 60 SYSCTL_UINT(_dev_qnxr, OID_AUTO, mpa_enhanced, CTLFLAG_RW, &mpa_enhanced, 1, 61 "iWARP: MPA Enhanced mode. Default:1"); 62 63 uint32_t rtr_type = 7; 64 SYSCTL_UINT(_dev_qnxr, OID_AUTO, rtr_type, CTLFLAG_RW, &rtr_type, 1, 65 "iWARP: RDMAP opcode to use for the RTR message: BITMAP 1: RDMA_SEND 2: RDMA_WRITE 4: RDMA_READ. Default: 7"); 66 67 #define QNXR_WQ_MULTIPLIER_MIN (1) 68 #define QNXR_WQ_MULTIPLIER_MAX (7) 69 #define QNXR_WQ_MULTIPLIER_DFT (3) 70 71 uint32_t wq_multiplier= QNXR_WQ_MULTIPLIER_DFT; 72 SYSCTL_UINT(_dev_qnxr, OID_AUTO, wq_multiplier, CTLFLAG_RW, &wq_multiplier, 1, 73 " When creating a WQ the actual number of WQE created will" 74 " be multiplied by this number (default is 3)."); 75 static ssize_t 76 show_rev(struct device *device, struct device_attribute *attr, 77 char *buf) 78 { 79 struct qlnxr_dev *dev = dev_get_drvdata(device); 80 81 return sprintf(buf, "0x%x\n", dev->cdev->vendor_id); 82 } 83 84 static ssize_t 85 show_hca_type(struct device *device, 86 struct device_attribute *attr, char *buf) 87 { 88 struct qlnxr_dev *dev = dev_get_drvdata(device); 89 return sprintf(buf, "QLogic0x%x\n", dev->cdev->device_id); 90 } 91 92 static ssize_t 93 show_fw_ver(struct device *device, 94 struct device_attribute *attr, char *buf) 95 { 96 struct qlnxr_dev *dev = dev_get_drvdata(device); 97 uint32_t fw_ver = (uint32_t) dev->attr.fw_ver; 98 99 return sprintf(buf, "%d.%d.%d\n", 100 (fw_ver >> 24) & 0xff, (fw_ver >> 16) & 0xff, 101 (fw_ver >> 8) & 0xff); 102 } 103 static ssize_t 104 show_board(struct device *device, 105 struct device_attribute *attr, char *buf) 106 { 107 struct qlnxr_dev *dev = dev_get_drvdata(device); 108 return sprintf(buf, "%x\n", dev->cdev->device_id); 109 } 110 111 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 112 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL); 113 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 114 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 115 116 static struct device_attribute *qlnxr_class_attributes[] = { 117 &dev_attr_hw_rev, 118 &dev_attr_hca_type, 119 &dev_attr_fw_ver, 120 &dev_attr_board_id 121 }; 122 123 static void 124 qlnxr_ib_dispatch_event(qlnxr_dev_t *dev, uint8_t port_num, 125 enum ib_event_type type) 126 { 127 struct ib_event ibev; 128 129 QL_DPRINT12(dev->ha, "enter\n"); 130 131 ibev.device = &dev->ibdev; 132 ibev.element.port_num = port_num; 133 ibev.event = type; 134 135 ib_dispatch_event(&ibev); 136 137 QL_DPRINT12(dev->ha, "exit\n"); 138 } 139 140 static int 141 __qlnxr_iw_destroy_listen(struct iw_cm_id *cm_id) 142 { 143 qlnxr_iw_destroy_listen(cm_id); 144 145 return (0); 146 } 147 148 static int 149 qlnxr_register_device(qlnxr_dev_t *dev) 150 { 151 struct ib_device *ibdev; 152 struct iw_cm_verbs *iwcm; 153 int ret; 154 155 QL_DPRINT12(dev->ha, "enter\n"); 156 157 ibdev = &dev->ibdev; 158 159 strlcpy(ibdev->name, "qlnxr%d", IB_DEVICE_NAME_MAX); 160 161 memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid)); 162 memcpy(&ibdev->node_guid, dev->ha->primary_mac, ETHER_ADDR_LEN); 163 164 memcpy(ibdev->node_desc, QLNXR_NODE_DESC, sizeof(QLNXR_NODE_DESC)); 165 166 ibdev->owner = THIS_MODULE; 167 ibdev->uverbs_abi_ver = 7; 168 ibdev->local_dma_lkey = 0; 169 170 ibdev->uverbs_cmd_mask = 171 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 172 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 173 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 174 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 175 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 176 (1ull << IB_USER_VERBS_CMD_REG_MR) | 177 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 178 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 179 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 180 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 181 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 182 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 183 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 184 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 185 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 186 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 187 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 188 (1ull << IB_USER_VERBS_CMD_POST_RECV); 189 190 if (QLNX_IS_IWARP(dev)) { 191 ibdev->node_type = RDMA_NODE_RNIC; 192 ibdev->query_gid = qlnxr_iw_query_gid; 193 } else { 194 ibdev->node_type = RDMA_NODE_IB_CA; 195 ibdev->query_gid = qlnxr_query_gid; 196 ibdev->uverbs_cmd_mask |= 197 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 198 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 199 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 200 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 201 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 202 ibdev->create_srq = qlnxr_create_srq; 203 ibdev->destroy_srq = qlnxr_destroy_srq; 204 ibdev->modify_srq = qlnxr_modify_srq; 205 ibdev->query_srq = qlnxr_query_srq; 206 ibdev->post_srq_recv = qlnxr_post_srq_recv; 207 } 208 209 ibdev->phys_port_cnt = 1; 210 ibdev->num_comp_vectors = dev->num_cnq; 211 212 /* mandatory verbs. */ 213 ibdev->query_device = qlnxr_query_device; 214 ibdev->query_port = qlnxr_query_port; 215 ibdev->modify_port = qlnxr_modify_port; 216 217 ibdev->alloc_ucontext = qlnxr_alloc_ucontext; 218 ibdev->dealloc_ucontext = qlnxr_dealloc_ucontext; 219 /* mandatory to support user space verbs consumer. */ 220 ibdev->mmap = qlnxr_mmap; 221 222 ibdev->alloc_pd = qlnxr_alloc_pd; 223 ibdev->dealloc_pd = qlnxr_dealloc_pd; 224 225 ibdev->create_cq = qlnxr_create_cq; 226 ibdev->destroy_cq = qlnxr_destroy_cq; 227 ibdev->resize_cq = qlnxr_resize_cq; 228 ibdev->req_notify_cq = qlnxr_arm_cq; 229 230 ibdev->create_qp = qlnxr_create_qp; 231 ibdev->modify_qp = qlnxr_modify_qp; 232 ibdev->query_qp = qlnxr_query_qp; 233 ibdev->destroy_qp = qlnxr_destroy_qp; 234 235 ibdev->query_pkey = qlnxr_query_pkey; 236 ibdev->create_ah = qlnxr_create_ah; 237 ibdev->destroy_ah = qlnxr_destroy_ah; 238 ibdev->query_ah = qlnxr_query_ah; 239 ibdev->modify_ah = qlnxr_modify_ah; 240 ibdev->get_dma_mr = qlnxr_get_dma_mr; 241 ibdev->dereg_mr = qlnxr_dereg_mr; 242 ibdev->reg_user_mr = qlnxr_reg_user_mr; 243 244 #if __FreeBSD_version >= 1102000 245 ibdev->alloc_mr = qlnxr_alloc_mr; 246 ibdev->map_mr_sg = qlnxr_map_mr_sg; 247 ibdev->get_port_immutable = qlnxr_get_port_immutable; 248 #else 249 ibdev->reg_phys_mr = qlnxr_reg_kernel_mr; 250 ibdev->alloc_fast_reg_mr = qlnxr_alloc_frmr; 251 ibdev->alloc_fast_reg_page_list = qlnxr_alloc_frmr_page_list; 252 ibdev->free_fast_reg_page_list = qlnxr_free_frmr_page_list; 253 #endif /* #if __FreeBSD_version >= 1102000 */ 254 255 ibdev->poll_cq = qlnxr_poll_cq; 256 ibdev->post_send = qlnxr_post_send; 257 ibdev->post_recv = qlnxr_post_recv; 258 ibdev->process_mad = qlnxr_process_mad; 259 260 ibdev->dma_device = &dev->pdev.dev; 261 262 ibdev->get_link_layer = qlnxr_link_layer; 263 264 if (QLNX_IS_IWARP(dev)) { 265 iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL); 266 267 device_printf(dev->ha->pci_dev, "device is IWARP\n"); 268 if (iwcm == NULL) 269 return (-ENOMEM); 270 271 ibdev->iwcm = iwcm; 272 273 iwcm->connect = qlnxr_iw_connect; 274 iwcm->accept = qlnxr_iw_accept; 275 iwcm->reject = qlnxr_iw_reject; 276 277 #if (__FreeBSD_version >= 1004000) && (__FreeBSD_version < 1102000) 278 279 iwcm->create_listen_ep = qlnxr_iw_create_listen; 280 iwcm->destroy_listen_ep = qlnxr_iw_destroy_listen; 281 #else 282 iwcm->create_listen = qlnxr_iw_create_listen; 283 iwcm->destroy_listen = __qlnxr_iw_destroy_listen; 284 #endif 285 iwcm->add_ref = qlnxr_iw_qp_add_ref; 286 iwcm->rem_ref = qlnxr_iw_qp_rem_ref; 287 iwcm->get_qp = qlnxr_iw_get_qp; 288 } 289 290 ret = ib_register_device(ibdev, NULL); 291 if (ret) { 292 kfree(iwcm); 293 } 294 295 QL_DPRINT12(dev->ha, "exit\n"); 296 return ret; 297 } 298 299 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 300 301 static void 302 qlnxr_intr(void *handle) 303 { 304 struct qlnxr_cnq *cnq = handle; 305 struct qlnxr_cq *cq; 306 struct regpair *cq_handle; 307 u16 hw_comp_cons, sw_comp_cons; 308 qlnx_host_t *ha; 309 310 ha = cnq->dev->ha; 311 312 QL_DPRINT12(ha, "enter cnq = %p\n", handle); 313 314 ecore_sb_ack(cnq->sb, IGU_INT_DISABLE, 0 /*do not update*/); 315 316 ecore_sb_update_sb_idx(cnq->sb); 317 318 hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr); 319 sw_comp_cons = ecore_chain_get_cons_idx(&cnq->pbl); 320 321 rmb(); 322 323 QL_DPRINT12(ha, "enter cnq = %p hw_comp_cons = 0x%x sw_comp_cons = 0x%x\n", 324 handle, hw_comp_cons, sw_comp_cons); 325 326 while (sw_comp_cons != hw_comp_cons) { 327 cq_handle = (struct regpair *)ecore_chain_consume(&cnq->pbl); 328 cq = (struct qlnxr_cq *)(uintptr_t)HILO_U64(cq_handle->hi, 329 cq_handle->lo); 330 331 if (cq == NULL) { 332 QL_DPRINT11(ha, "cq == NULL\n"); 333 break; 334 } 335 336 if (cq->sig != QLNXR_CQ_MAGIC_NUMBER) { 337 QL_DPRINT11(ha, 338 "cq->sig = 0x%x QLNXR_CQ_MAGIC_NUMBER = 0x%x\n", 339 cq->sig, QLNXR_CQ_MAGIC_NUMBER); 340 break; 341 } 342 cq->arm_flags = 0; 343 344 if (!cq->destroyed && cq->ibcq.comp_handler) { 345 QL_DPRINT11(ha, "calling comp_handler = %p " 346 "ibcq = %p cq_context = 0x%x\n", 347 &cq->ibcq, cq->ibcq.cq_context); 348 349 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 350 } 351 cq->cnq_notif++; 352 353 sw_comp_cons = ecore_chain_get_cons_idx(&cnq->pbl); 354 355 cnq->n_comp++; 356 } 357 358 ecore_rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, sw_comp_cons); 359 360 ecore_sb_ack(cnq->sb, IGU_INT_ENABLE, 1 /*update*/); 361 362 QL_DPRINT12(ha, "exit cnq = %p\n", handle); 363 return; 364 } 365 366 static void 367 qlnxr_release_irqs(struct qlnxr_dev *dev) 368 { 369 int i; 370 qlnx_host_t *ha; 371 372 ha = dev->ha; 373 374 QL_DPRINT12(ha, "enter\n"); 375 376 for (i = 0; i < dev->num_cnq; i++) { 377 if (dev->cnq_array[i].irq_handle) 378 (void)bus_teardown_intr(dev->ha->pci_dev, 379 dev->cnq_array[i].irq, 380 dev->cnq_array[i].irq_handle); 381 382 if (dev->cnq_array[i].irq) 383 (void) bus_release_resource(dev->ha->pci_dev, 384 SYS_RES_IRQ, 385 dev->cnq_array[i].irq_rid, 386 dev->cnq_array[i].irq); 387 } 388 QL_DPRINT12(ha, "exit\n"); 389 return; 390 } 391 392 static int 393 qlnxr_setup_irqs(struct qlnxr_dev *dev) 394 { 395 int start_irq_rid; 396 int i; 397 qlnx_host_t *ha; 398 399 ha = dev->ha; 400 401 start_irq_rid = dev->sb_start + 2; 402 403 QL_DPRINT12(ha, "enter start_irq_rid = %d num_rss = %d\n", 404 start_irq_rid, dev->ha->num_rss); 405 406 for (i = 0; i < dev->num_cnq; i++) { 407 dev->cnq_array[i].irq_rid = start_irq_rid + i; 408 409 dev->cnq_array[i].irq = bus_alloc_resource_any(dev->ha->pci_dev, 410 SYS_RES_IRQ, 411 &dev->cnq_array[i].irq_rid, 412 (RF_ACTIVE | RF_SHAREABLE)); 413 414 if (dev->cnq_array[i].irq == NULL) { 415 QL_DPRINT11(ha, 416 "bus_alloc_resource_any failed irq_rid = %d\n", 417 dev->cnq_array[i].irq_rid); 418 419 goto qlnxr_setup_irqs_err; 420 } 421 422 if (bus_setup_intr(dev->ha->pci_dev, 423 dev->cnq_array[i].irq, 424 (INTR_TYPE_NET | INTR_MPSAFE), 425 NULL, qlnxr_intr, &dev->cnq_array[i], 426 &dev->cnq_array[i].irq_handle)) { 427 QL_DPRINT11(ha, "bus_setup_intr failed\n"); 428 goto qlnxr_setup_irqs_err; 429 } 430 QL_DPRINT12(ha, "irq_rid = %d irq = %p irq_handle = %p\n", 431 dev->cnq_array[i].irq_rid, dev->cnq_array[i].irq, 432 dev->cnq_array[i].irq_handle); 433 } 434 435 QL_DPRINT12(ha, "exit\n"); 436 return (0); 437 438 qlnxr_setup_irqs_err: 439 qlnxr_release_irqs(dev); 440 441 QL_DPRINT12(ha, "exit -1\n"); 442 return (-1); 443 } 444 445 static void 446 qlnxr_free_resources(struct qlnxr_dev *dev) 447 { 448 int i; 449 qlnx_host_t *ha; 450 451 ha = dev->ha; 452 453 QL_DPRINT12(ha, "enter dev->num_cnq = %d\n", dev->num_cnq); 454 455 if (QLNX_IS_IWARP(dev)) { 456 if (dev->iwarp_wq != NULL) 457 destroy_workqueue(dev->iwarp_wq); 458 } 459 460 for (i = 0; i < dev->num_cnq; i++) { 461 qlnx_free_mem_sb(dev->ha, &dev->sb_array[i]); 462 ecore_chain_free(&dev->ha->cdev, &dev->cnq_array[i].pbl); 463 } 464 465 bzero(dev->cnq_array, (sizeof(struct qlnxr_cnq) * QLNXR_MAX_MSIX)); 466 bzero(dev->sb_array, (sizeof(struct ecore_sb_info) * QLNXR_MAX_MSIX)); 467 bzero(dev->sgid_tbl, (sizeof(union ib_gid) * QLNXR_MAX_SGID)); 468 469 if (mtx_initialized(&dev->idr_lock)) 470 mtx_destroy(&dev->idr_lock); 471 472 if (mtx_initialized(&dev->sgid_lock)) 473 mtx_destroy(&dev->sgid_lock); 474 475 QL_DPRINT12(ha, "exit\n"); 476 return; 477 } 478 479 static int 480 qlnxr_alloc_resources(struct qlnxr_dev *dev) 481 { 482 uint16_t n_entries; 483 int i, rc; 484 qlnx_host_t *ha; 485 486 ha = dev->ha; 487 488 QL_DPRINT12(ha, "enter\n"); 489 490 bzero(dev->sgid_tbl, (sizeof (union ib_gid) * QLNXR_MAX_SGID)); 491 492 mtx_init(&dev->idr_lock, "idr_lock", NULL, MTX_DEF); 493 mtx_init(&dev->sgid_lock, "sgid_lock", NULL, MTX_DEF); 494 495 idr_init(&dev->qpidr); 496 497 bzero(dev->sb_array, (sizeof (struct ecore_sb_info) * QLNXR_MAX_MSIX)); 498 bzero(dev->cnq_array, (sizeof (struct qlnxr_cnq) * QLNXR_MAX_MSIX)); 499 500 dev->sb_start = ecore_rdma_get_sb_id(dev->rdma_ctx, 0); 501 502 QL_DPRINT12(ha, "dev->sb_start = 0x%x\n", dev->sb_start); 503 504 /* Allocate CNQ PBLs */ 505 506 n_entries = min_t(u32, ECORE_RDMA_MAX_CNQ_SIZE, QLNXR_ROCE_MAX_CNQ_SIZE); 507 508 for (i = 0; i < dev->num_cnq; i++) { 509 rc = qlnx_alloc_mem_sb(dev->ha, &dev->sb_array[i], 510 dev->sb_start + i); 511 if (rc) 512 goto qlnxr_alloc_resources_exit; 513 514 rc = ecore_chain_alloc(&dev->ha->cdev, 515 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 516 ECORE_CHAIN_MODE_PBL, 517 ECORE_CHAIN_CNT_TYPE_U16, 518 n_entries, 519 sizeof(struct regpair *), 520 &dev->cnq_array[i].pbl, 521 NULL); 522 523 /* configure cnq, except name since ibdev.name is still NULL */ 524 dev->cnq_array[i].dev = dev; 525 dev->cnq_array[i].sb = &dev->sb_array[i]; 526 dev->cnq_array[i].hw_cons_ptr = 527 &(dev->sb_array[i].sb_virt->pi_array[ECORE_ROCE_PROTOCOL_INDEX]); 528 dev->cnq_array[i].index = i; 529 sprintf(dev->cnq_array[i].name, "qlnxr%d@pci:%d", 530 i, (dev->ha->pci_func)); 531 } 532 533 QL_DPRINT12(ha, "exit\n"); 534 return 0; 535 536 qlnxr_alloc_resources_exit: 537 538 qlnxr_free_resources(dev); 539 540 QL_DPRINT12(ha, "exit -ENOMEM\n"); 541 return -ENOMEM; 542 } 543 544 void 545 qlnxr_affiliated_event(void *context, u8 e_code, void *fw_handle) 546 { 547 #define EVENT_TYPE_NOT_DEFINED 0 548 #define EVENT_TYPE_CQ 1 549 #define EVENT_TYPE_QP 2 550 #define EVENT_TYPE_GENERAL 3 551 552 struct qlnxr_dev *dev = (struct qlnxr_dev *)context; 553 struct regpair *async_handle = (struct regpair *)fw_handle; 554 u64 roceHandle64 = ((u64)async_handle->hi << 32) + async_handle->lo; 555 struct qlnxr_cq *cq = (struct qlnxr_cq *)(uintptr_t)roceHandle64; 556 struct qlnxr_qp *qp = (struct qlnxr_qp *)(uintptr_t)roceHandle64; 557 u8 event_type = EVENT_TYPE_NOT_DEFINED; 558 struct ib_event event; 559 qlnx_host_t *ha; 560 561 ha = dev->ha; 562 563 QL_DPRINT12(ha, "enter context = %p e_code = 0x%x fw_handle = %p\n", 564 context, e_code, fw_handle); 565 566 if (QLNX_IS_IWARP(dev)) { 567 switch (e_code) { 568 case ECORE_IWARP_EVENT_CQ_OVERFLOW: 569 event.event = IB_EVENT_CQ_ERR; 570 event_type = EVENT_TYPE_CQ; 571 break; 572 573 default: 574 QL_DPRINT12(ha, 575 "unsupported event %d on handle=%llx\n", 576 e_code, roceHandle64); 577 break; 578 } 579 } else { 580 switch (e_code) { 581 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR: 582 event.event = IB_EVENT_CQ_ERR; 583 event_type = EVENT_TYPE_CQ; 584 break; 585 586 case ROCE_ASYNC_EVENT_SQ_DRAINED: 587 event.event = IB_EVENT_SQ_DRAINED; 588 event_type = EVENT_TYPE_QP; 589 break; 590 591 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR: 592 event.event = IB_EVENT_QP_FATAL; 593 event_type = EVENT_TYPE_QP; 594 break; 595 596 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR: 597 event.event = IB_EVENT_QP_REQ_ERR; 598 event_type = EVENT_TYPE_QP; 599 break; 600 601 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR: 602 event.event = IB_EVENT_QP_ACCESS_ERR; 603 event_type = EVENT_TYPE_QP; 604 break; 605 606 /* NOTE the following are not implemented in FW 607 * ROCE_ASYNC_EVENT_CQ_ERR 608 * ROCE_ASYNC_EVENT_COMM_EST 609 */ 610 /* TODO associate the following events - 611 * ROCE_ASYNC_EVENT_SRQ_LIMIT 612 * ROCE_ASYNC_EVENT_LAST_WQE_REACHED 613 * ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR (un-affiliated) 614 */ 615 default: 616 QL_DPRINT12(ha, 617 "unsupported event 0x%x on fw_handle = %p\n", 618 e_code, fw_handle); 619 break; 620 } 621 } 622 623 switch (event_type) { 624 case EVENT_TYPE_CQ: 625 if (cq && cq->sig == QLNXR_CQ_MAGIC_NUMBER) { 626 struct ib_cq *ibcq = &cq->ibcq; 627 628 if (ibcq->event_handler) { 629 event.device = ibcq->device; 630 event.element.cq = ibcq; 631 ibcq->event_handler(&event, ibcq->cq_context); 632 } 633 } else { 634 QL_DPRINT11(ha, 635 "CQ event with invalid CQ pointer" 636 " Handle = %llx\n", roceHandle64); 637 } 638 QL_DPRINT12(ha, 639 "CQ event 0x%x on handle = %p\n", e_code, cq); 640 break; 641 642 case EVENT_TYPE_QP: 643 if (qp && qp->sig == QLNXR_QP_MAGIC_NUMBER) { 644 struct ib_qp *ibqp = &qp->ibqp; 645 646 if (ibqp->event_handler) { 647 event.device = ibqp->device; 648 event.element.qp = ibqp; 649 ibqp->event_handler(&event, ibqp->qp_context); 650 } 651 } else { 652 QL_DPRINT11(ha, 653 "QP event 0x%x with invalid QP pointer" 654 " qp handle = %p\n", 655 e_code, roceHandle64); 656 } 657 QL_DPRINT12(ha, "QP event 0x%x on qp handle = %p\n", 658 e_code, qp); 659 break; 660 661 case EVENT_TYPE_GENERAL: 662 break; 663 664 default: 665 break; 666 } 667 668 QL_DPRINT12(ha, "exit\n"); 669 670 return; 671 } 672 673 void 674 qlnxr_unaffiliated_event(void *context, u8 e_code) 675 { 676 struct qlnxr_dev *dev = (struct qlnxr_dev *)context; 677 qlnx_host_t *ha; 678 679 ha = dev->ha; 680 681 QL_DPRINT12(ha, "enter/exit \n"); 682 return; 683 } 684 685 static int 686 qlnxr_set_device_attr(struct qlnxr_dev *dev) 687 { 688 struct ecore_rdma_device *ecore_attr; 689 struct qlnxr_device_attr *attr; 690 u32 page_size; 691 692 ecore_attr = ecore_rdma_query_device(dev->rdma_ctx); 693 694 page_size = ~dev->attr.page_size_caps + 1; 695 if(page_size > PAGE_SIZE) { 696 QL_DPRINT12(dev->ha, "Kernel page size : %ld is smaller than" 697 " minimum page size : %ld required by qlnxr\n", 698 PAGE_SIZE, page_size); 699 return -ENODEV; 700 } 701 attr = &dev->attr; 702 attr->vendor_id = ecore_attr->vendor_id; 703 attr->vendor_part_id = ecore_attr->vendor_part_id; 704 705 QL_DPRINT12(dev->ha, "in qlnxr_set_device_attr, vendor : %x device : %x\n", 706 attr->vendor_id, attr->vendor_part_id); 707 708 attr->hw_ver = ecore_attr->hw_ver; 709 attr->fw_ver = ecore_attr->fw_ver; 710 attr->node_guid = ecore_attr->node_guid; 711 attr->sys_image_guid = ecore_attr->sys_image_guid; 712 attr->max_cnq = ecore_attr->max_cnq; 713 attr->max_sge = ecore_attr->max_sge; 714 attr->max_inline = ecore_attr->max_inline; 715 attr->max_sqe = min_t(u32, ecore_attr->max_wqe, QLNXR_MAX_SQE); 716 attr->max_rqe = min_t(u32, ecore_attr->max_wqe, QLNXR_MAX_RQE); 717 attr->max_qp_resp_rd_atomic_resc = ecore_attr->max_qp_resp_rd_atomic_resc; 718 attr->max_qp_req_rd_atomic_resc = ecore_attr->max_qp_req_rd_atomic_resc; 719 attr->max_dev_resp_rd_atomic_resc = 720 ecore_attr->max_dev_resp_rd_atomic_resc; 721 attr->max_cq = ecore_attr->max_cq; 722 attr->max_qp = ecore_attr->max_qp; 723 attr->max_mr = ecore_attr->max_mr; 724 attr->max_mr_size = ecore_attr->max_mr_size; 725 attr->max_cqe = min_t(u64, ecore_attr->max_cqe, QLNXR_MAX_CQES); 726 attr->max_mw = ecore_attr->max_mw; 727 attr->max_fmr = ecore_attr->max_fmr; 728 attr->max_mr_mw_fmr_pbl = ecore_attr->max_mr_mw_fmr_pbl; 729 attr->max_mr_mw_fmr_size = ecore_attr->max_mr_mw_fmr_size; 730 attr->max_pd = ecore_attr->max_pd; 731 attr->max_ah = ecore_attr->max_ah; 732 attr->max_pkey = ecore_attr->max_pkey; 733 attr->max_srq = ecore_attr->max_srq; 734 attr->max_srq_wr = ecore_attr->max_srq_wr; 735 //attr->dev_caps = ecore_attr->dev_caps; 736 attr->page_size_caps = ecore_attr->page_size_caps; 737 attr->dev_ack_delay = ecore_attr->dev_ack_delay; 738 attr->reserved_lkey = ecore_attr->reserved_lkey; 739 attr->bad_pkey_counter = ecore_attr->bad_pkey_counter; 740 attr->max_stats_queues = ecore_attr->max_stats_queues; 741 742 return 0; 743 } 744 745 static int 746 qlnxr_init_hw(struct qlnxr_dev *dev) 747 { 748 struct ecore_rdma_events events; 749 struct ecore_rdma_add_user_out_params out_params; 750 struct ecore_rdma_cnq_params *cur_pbl; 751 struct ecore_rdma_start_in_params *in_params; 752 dma_addr_t p_phys_table; 753 u32 page_cnt; 754 int rc = 0; 755 int i; 756 qlnx_host_t *ha; 757 758 ha = dev->ha; 759 760 QL_DPRINT12(ha, "enter\n"); 761 762 in_params = kzalloc(sizeof(*in_params), GFP_KERNEL); 763 if (!in_params) { 764 rc = -ENOMEM; 765 goto out; 766 } 767 768 bzero(&out_params, sizeof(struct ecore_rdma_add_user_out_params)); 769 bzero(&events, sizeof(struct ecore_rdma_events)); 770 771 in_params->desired_cnq = dev->num_cnq; 772 773 for (i = 0; i < dev->num_cnq; i++) { 774 cur_pbl = &in_params->cnq_pbl_list[i]; 775 776 page_cnt = ecore_chain_get_page_cnt(&dev->cnq_array[i].pbl); 777 cur_pbl->num_pbl_pages = page_cnt; 778 779 p_phys_table = ecore_chain_get_pbl_phys(&dev->cnq_array[i].pbl); 780 cur_pbl->pbl_ptr = (u64)p_phys_table; 781 } 782 783 events.affiliated_event = qlnxr_affiliated_event; 784 events.unaffiliated_event = qlnxr_unaffiliated_event; 785 events.context = dev; 786 787 in_params->events = &events; 788 in_params->roce.cq_mode = ECORE_RDMA_CQ_MODE_32_BITS; 789 in_params->max_mtu = dev->ha->max_frame_size; 790 791 if (QLNX_IS_IWARP(dev)) { 792 if (delayed_ack) 793 in_params->iwarp.flags |= ECORE_IWARP_DA_EN; 794 795 if (timestamp) 796 in_params->iwarp.flags |= ECORE_IWARP_TS_EN; 797 798 in_params->iwarp.rcv_wnd_size = rcv_wnd_size*1024; 799 in_params->iwarp.crc_needed = crc_needed; 800 in_params->iwarp.ooo_num_rx_bufs = 801 (MAX_RXMIT_CONNS * in_params->iwarp.rcv_wnd_size) / 802 in_params->max_mtu; 803 804 in_params->iwarp.mpa_peer2peer = peer2peer; 805 in_params->iwarp.mpa_rev = 806 mpa_enhanced ? ECORE_MPA_REV2 : ECORE_MPA_REV1; 807 in_params->iwarp.mpa_rtr = rtr_type; 808 } 809 810 memcpy(&in_params->mac_addr[0], dev->ha->primary_mac, ETH_ALEN); 811 812 rc = ecore_rdma_start(dev->rdma_ctx, in_params); 813 if (rc) 814 goto out; 815 816 rc = ecore_rdma_add_user(dev->rdma_ctx, &out_params); 817 if (rc) 818 goto out; 819 820 dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr; 821 dev->db_phys_addr = out_params.dpi_phys_addr; 822 dev->db_size = out_params.dpi_size; 823 dev->dpi = out_params.dpi; 824 825 qlnxr_set_device_attr(dev); 826 827 QL_DPRINT12(ha, 828 "cdev->doorbells = %p, db_phys_addr = %p db_size = 0x%x\n", 829 (void *)ha->cdev.doorbells, 830 (void *)ha->cdev.db_phys_addr, ha->cdev.db_size); 831 832 QL_DPRINT12(ha, 833 "db_addr = %p db_phys_addr = %p db_size = 0x%x dpi = 0x%x\n", 834 (void *)dev->db_addr, (void *)dev->db_phys_addr, 835 dev->db_size, dev->dpi); 836 out: 837 kfree(in_params); 838 839 QL_DPRINT12(ha, "exit\n"); 840 return rc; 841 } 842 843 static void 844 qlnxr_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr, 845 bool is_vlan, u16 vlan_id) 846 { 847 sgid->global.subnet_prefix = OSAL_CPU_TO_BE64(0xfe80000000000000LL); 848 sgid->raw[8] = mac_addr[0] ^ 2; 849 sgid->raw[9] = mac_addr[1]; 850 sgid->raw[10] = mac_addr[2]; 851 if (is_vlan) { 852 sgid->raw[11] = vlan_id >> 8; 853 sgid->raw[12] = vlan_id & 0xff; 854 } else { 855 sgid->raw[11] = 0xff; 856 sgid->raw[12] = 0xfe; 857 } 858 sgid->raw[13] = mac_addr[3]; 859 sgid->raw[14] = mac_addr[4]; 860 sgid->raw[15] = mac_addr[5]; 861 } 862 static bool 863 qlnxr_add_sgid(struct qlnxr_dev *dev, union ib_gid *new_sgid); 864 865 static void 866 qlnxr_add_ip_based_gid(struct qlnxr_dev *dev, struct ifnet *ifp) 867 { 868 struct ifaddr *ifa; 869 union ib_gid gid; 870 871 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 872 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) { 873 QL_DPRINT12(dev->ha, "IP address : %x\n", ((struct sockaddr_in *) ifa->ifa_addr)->sin_addr.s_addr); 874 ipv6_addr_set_v4mapped( 875 ((struct sockaddr_in *) ifa->ifa_addr)->sin_addr.s_addr, 876 (struct in6_addr *)&gid); 877 QL_DPRINT12(dev->ha, "gid generated : %llx\n", gid); 878 879 qlnxr_add_sgid(dev, &gid); 880 } 881 } 882 for (int i = 0; i < 16; i++) { 883 QL_DPRINT12(dev->ha, "gid generated : %x\n", gid.raw[i]); 884 } 885 } 886 887 static bool 888 qlnxr_add_sgid(struct qlnxr_dev *dev, union ib_gid *new_sgid) 889 { 890 union ib_gid zero_sgid = { { 0 } }; 891 int i; 892 //unsigned long flags; 893 mtx_lock(&dev->sgid_lock); 894 for (i = 0; i < QLNXR_MAX_SGID; i++) { 895 if (!memcmp(&dev->sgid_tbl[i], &zero_sgid, 896 sizeof(union ib_gid))) { 897 /* found free entry */ 898 memcpy(&dev->sgid_tbl[i], new_sgid, 899 sizeof(union ib_gid)); 900 QL_DPRINT12(dev->ha, "copying sgid : %llx\n", 901 *new_sgid); 902 mtx_unlock(&dev->sgid_lock); 903 //TODO ib_dispatch event here? 904 return true; 905 } else if (!memcmp(&dev->sgid_tbl[i], new_sgid, 906 sizeof(union ib_gid))) { 907 /* entry already present, no addition required */ 908 mtx_unlock(&dev->sgid_lock); 909 QL_DPRINT12(dev->ha, "sgid present : %llx\n", 910 *new_sgid); 911 return false; 912 } 913 } 914 if (i == QLNXR_MAX_SGID) { 915 QL_DPRINT12(dev->ha, "didn't find an empty entry in sgid_tbl\n"); 916 } 917 mtx_unlock(&dev->sgid_lock); 918 return false; 919 } 920 921 static bool qlnxr_del_sgid(struct qlnxr_dev *dev, union ib_gid *gid) 922 { 923 int found = false; 924 int i; 925 //unsigned long flags; 926 927 QL_DPRINT12(dev->ha, "removing gid %llx %llx\n", 928 gid->global.interface_id, 929 gid->global.subnet_prefix); 930 mtx_lock(&dev->sgid_lock); 931 /* first is the default sgid which cannot be deleted */ 932 for (i = 1; i < QLNXR_MAX_SGID; i++) { 933 if (!memcmp(&dev->sgid_tbl[i], gid, sizeof(union ib_gid))) { 934 /* found matching entry */ 935 memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid)); 936 found = true; 937 break; 938 } 939 } 940 mtx_unlock(&dev->sgid_lock); 941 942 return found; 943 } 944 945 #if __FreeBSD_version < 1100000 946 947 static inline int 948 is_vlan_dev(struct ifnet *ifp) 949 { 950 return (ifp->if_type == IFT_L2VLAN); 951 } 952 953 static inline uint16_t 954 vlan_dev_vlan_id(struct ifnet *ifp) 955 { 956 uint16_t vtag; 957 958 if (VLAN_TAG(ifp, &vtag) == 0) 959 return (vtag); 960 961 return (0); 962 } 963 964 #endif /* #if __FreeBSD_version < 1100000 */ 965 966 static void 967 qlnxr_add_sgids(struct qlnxr_dev *dev) 968 { 969 qlnx_host_t *ha = dev->ha; 970 u16 vlan_id; 971 bool is_vlan; 972 union ib_gid vgid; 973 974 qlnxr_add_ip_based_gid(dev, ha->ifp); 975 /* MAC/VLAN base GIDs */ 976 is_vlan = is_vlan_dev(ha->ifp); 977 vlan_id = (is_vlan) ? vlan_dev_vlan_id(ha->ifp) : 0; 978 qlnxr_build_sgid_mac(&vgid, ha->primary_mac, is_vlan, vlan_id); 979 qlnxr_add_sgid(dev, &vgid); 980 } 981 982 static int 983 qlnxr_add_default_sgid(struct qlnxr_dev *dev) 984 { 985 /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */ 986 union ib_gid *sgid = &dev->sgid_tbl[0]; 987 struct ecore_rdma_device *qattr; 988 qlnx_host_t *ha; 989 ha = dev->ha; 990 991 qattr = ecore_rdma_query_device(dev->rdma_ctx); 992 if(sgid == NULL) 993 QL_DPRINT12(ha, "sgid = NULL?\n"); 994 995 sgid->global.subnet_prefix = OSAL_CPU_TO_BE64(0xfe80000000000000LL); 996 QL_DPRINT12(ha, "node_guid = %llx", dev->attr.node_guid); 997 memcpy(&sgid->raw[8], &qattr->node_guid, 998 sizeof(qattr->node_guid)); 999 //memcpy(&sgid->raw[8], &dev->attr.node_guid, 1000 // sizeof(dev->attr.node_guid)); 1001 QL_DPRINT12(ha, "DEFAULT sgid=[%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x]\n", 1002 sgid->raw[0], sgid->raw[1], sgid->raw[2], sgid->raw[3], sgid->raw[4], sgid->raw[5], 1003 sgid->raw[6], sgid->raw[7], sgid->raw[8], sgid->raw[9], sgid->raw[10], sgid->raw[11], 1004 sgid->raw[12], sgid->raw[13], sgid->raw[14], sgid->raw[15]); 1005 return 0; 1006 } 1007 1008 static int qlnxr_addr_event (struct qlnxr_dev *dev, 1009 unsigned long event, 1010 struct ifnet *ifp, 1011 union ib_gid *gid) 1012 { 1013 bool is_vlan = false; 1014 union ib_gid vgid; 1015 u16 vlan_id = 0xffff; 1016 1017 QL_DPRINT12(dev->ha, "Link event occured\n"); 1018 is_vlan = is_vlan_dev(dev->ha->ifp); 1019 vlan_id = (is_vlan) ? vlan_dev_vlan_id(dev->ha->ifp) : 0; 1020 1021 switch (event) { 1022 case NETDEV_UP : 1023 qlnxr_add_sgid(dev, gid); 1024 if (is_vlan) { 1025 qlnxr_build_sgid_mac(&vgid, dev->ha->primary_mac, is_vlan, vlan_id); 1026 qlnxr_add_sgid(dev, &vgid); 1027 } 1028 break; 1029 case NETDEV_DOWN : 1030 qlnxr_del_sgid(dev, gid); 1031 if (is_vlan) { 1032 qlnxr_build_sgid_mac(&vgid, dev->ha->primary_mac, is_vlan, vlan_id); 1033 qlnxr_del_sgid(dev, &vgid); 1034 } 1035 break; 1036 default : 1037 break; 1038 } 1039 return 1; 1040 } 1041 1042 static int qlnxr_inetaddr_event(struct notifier_block *notifier, 1043 unsigned long event, void *ptr) 1044 { 1045 struct ifaddr *ifa = ptr; 1046 union ib_gid gid; 1047 struct qlnxr_dev *dev = container_of(notifier, struct qlnxr_dev, nb_inet); 1048 qlnx_host_t *ha = dev->ha; 1049 1050 ipv6_addr_set_v4mapped( 1051 ((struct sockaddr_in *) ifa->ifa_addr)->sin_addr.s_addr, 1052 (struct in6_addr *)&gid); 1053 return qlnxr_addr_event(dev, event, ha->ifp, &gid); 1054 } 1055 1056 static int 1057 qlnxr_register_inet(struct qlnxr_dev *dev) 1058 { 1059 int ret; 1060 dev->nb_inet.notifier_call = qlnxr_inetaddr_event; 1061 ret = register_inetaddr_notifier(&dev->nb_inet); 1062 if (ret) { 1063 QL_DPRINT12(dev->ha, "Failed to register inetaddr\n"); 1064 return ret; 1065 } 1066 /* TODO : add for CONFIG_IPV6) */ 1067 return 0; 1068 } 1069 1070 static int 1071 qlnxr_build_sgid_tbl(struct qlnxr_dev *dev) 1072 { 1073 qlnxr_add_default_sgid(dev); 1074 qlnxr_add_sgids(dev); 1075 return 0; 1076 } 1077 1078 static struct qlnx_rdma_if qlnxr_drv; 1079 1080 static void * 1081 qlnxr_add(void *eth_dev) 1082 { 1083 struct qlnxr_dev *dev; 1084 int ret; 1085 //device_t pci_dev; 1086 qlnx_host_t *ha; 1087 1088 ha = eth_dev; 1089 1090 QL_DPRINT12(ha, "enter [ha = %p]\n", ha); 1091 1092 dev = (struct qlnxr_dev *)ib_alloc_device(sizeof(struct qlnxr_dev)); 1093 1094 if (dev == NULL) 1095 return (NULL); 1096 1097 dev->ha = eth_dev; 1098 dev->cdev = &ha->cdev; 1099 /* Added to extend Application support */ 1100 linux_pci_attach_device(dev->ha->pci_dev, NULL, NULL, &dev->pdev); 1101 1102 dev->rdma_ctx = &ha->cdev.hwfns[0]; 1103 dev->wq_multiplier = wq_multiplier; 1104 dev->num_cnq = QLNX_NUM_CNQ; 1105 1106 QL_DPRINT12(ha, 1107 "ha = %p dev = %p ha->cdev = %p\n", 1108 ha, dev, &ha->cdev); 1109 QL_DPRINT12(ha, 1110 "dev->cdev = %p dev->rdma_ctx = %p\n", 1111 dev->cdev, dev->rdma_ctx); 1112 1113 ret = qlnxr_alloc_resources(dev); 1114 1115 if (ret) 1116 goto qlnxr_add_err; 1117 1118 ret = qlnxr_setup_irqs(dev); 1119 1120 if (ret) { 1121 qlnxr_free_resources(dev); 1122 goto qlnxr_add_err; 1123 } 1124 1125 ret = qlnxr_init_hw(dev); 1126 1127 if (ret) { 1128 qlnxr_release_irqs(dev); 1129 qlnxr_free_resources(dev); 1130 goto qlnxr_add_err; 1131 } 1132 1133 qlnxr_register_device(dev); 1134 for (int i = 0; i < ARRAY_SIZE(qlnxr_class_attributes); ++i) { 1135 if (device_create_file(&dev->ibdev.dev, qlnxr_class_attributes[i])) 1136 goto sysfs_err; 1137 } 1138 qlnxr_build_sgid_tbl(dev); 1139 //ret = qlnxr_register_inet(dev); 1140 QL_DPRINT12(ha, "exit\n"); 1141 if (!test_and_set_bit(QLNXR_ENET_STATE_BIT, &dev->enet_state)) { 1142 QL_DPRINT12(ha, "dispatching IB_PORT_ACITVE event\n"); 1143 qlnxr_ib_dispatch_event(dev, QLNXR_PORT, 1144 IB_EVENT_PORT_ACTIVE); 1145 } 1146 1147 return (dev); 1148 sysfs_err: 1149 for (int i = 0; i < ARRAY_SIZE(qlnxr_class_attributes); ++i) { 1150 device_remove_file(&dev->ibdev.dev, qlnxr_class_attributes[i]); 1151 } 1152 ib_unregister_device(&dev->ibdev); 1153 1154 qlnxr_add_err: 1155 ib_dealloc_device(&dev->ibdev); 1156 1157 QL_DPRINT12(ha, "exit failed\n"); 1158 return (NULL); 1159 } 1160 1161 static void 1162 qlnxr_remove_sysfiles(struct qlnxr_dev *dev) 1163 { 1164 int i; 1165 for (i = 0; i < ARRAY_SIZE(qlnxr_class_attributes); ++i) 1166 device_remove_file(&dev->ibdev.dev, qlnxr_class_attributes[i]); 1167 } 1168 1169 static int 1170 qlnxr_remove(void *eth_dev, void *qlnx_rdma_dev) 1171 { 1172 struct qlnxr_dev *dev; 1173 qlnx_host_t *ha; 1174 1175 dev = qlnx_rdma_dev; 1176 ha = eth_dev; 1177 1178 if ((ha == NULL) || (dev == NULL)) 1179 return (0); 1180 1181 QL_DPRINT12(ha, "enter ha = %p qlnx_rdma_dev = %p pd_count = %d\n", 1182 ha, qlnx_rdma_dev, dev->pd_count); 1183 1184 qlnxr_ib_dispatch_event(dev, QLNXR_PORT, 1185 IB_EVENT_PORT_ERR); 1186 1187 if (QLNX_IS_IWARP(dev)) { 1188 if (dev->pd_count) 1189 return (EBUSY); 1190 } 1191 1192 ib_unregister_device(&dev->ibdev); 1193 1194 if (QLNX_IS_ROCE(dev)) { 1195 if (dev->pd_count) 1196 return (EBUSY); 1197 } 1198 1199 ecore_rdma_remove_user(dev->rdma_ctx, dev->dpi); 1200 ecore_rdma_stop(dev->rdma_ctx); 1201 1202 qlnxr_release_irqs(dev); 1203 1204 qlnxr_free_resources(dev); 1205 1206 qlnxr_remove_sysfiles(dev); 1207 ib_dealloc_device(&dev->ibdev); 1208 1209 linux_pci_detach_device(&dev->pdev); 1210 1211 QL_DPRINT12(ha, "exit ha = %p qlnx_rdma_dev = %p\n", ha, qlnx_rdma_dev); 1212 return (0); 1213 } 1214 1215 int 1216 qlnx_rdma_ll2_set_mac_filter(void *rdma_ctx, uint8_t *old_mac_address, 1217 uint8_t *new_mac_address) 1218 { 1219 struct ecore_hwfn *p_hwfn = rdma_ctx; 1220 struct qlnx_host *ha; 1221 int ret = 0; 1222 1223 ha = (struct qlnx_host *)(p_hwfn->p_dev); 1224 QL_DPRINT2(ha, "enter rdma_ctx (%p)\n", rdma_ctx); 1225 1226 if (old_mac_address) 1227 ecore_llh_remove_mac_filter(p_hwfn->p_dev, 0, old_mac_address); 1228 1229 if (new_mac_address) 1230 ret = ecore_llh_add_mac_filter(p_hwfn->p_dev, 0, new_mac_address); 1231 1232 QL_DPRINT2(ha, "exit rdma_ctx (%p)\n", rdma_ctx); 1233 return (ret); 1234 } 1235 1236 static void 1237 qlnxr_mac_address_change(struct qlnxr_dev *dev) 1238 { 1239 qlnx_host_t *ha; 1240 1241 ha = dev->ha; 1242 1243 QL_DPRINT12(ha, "enter/exit\n"); 1244 1245 return; 1246 } 1247 1248 static void 1249 qlnxr_notify(void *eth_dev, void *qlnx_rdma_dev, enum qlnx_rdma_event event) 1250 { 1251 struct qlnxr_dev *dev; 1252 qlnx_host_t *ha; 1253 1254 dev = qlnx_rdma_dev; 1255 1256 if (dev == NULL) 1257 return; 1258 1259 ha = dev->ha; 1260 1261 QL_DPRINT12(ha, "enter (%p, %d)\n", qlnx_rdma_dev, event); 1262 1263 switch (event) { 1264 case QLNX_ETHDEV_UP: 1265 if (!test_and_set_bit(QLNXR_ENET_STATE_BIT, &dev->enet_state)) 1266 qlnxr_ib_dispatch_event(dev, QLNXR_PORT, 1267 IB_EVENT_PORT_ACTIVE); 1268 break; 1269 1270 case QLNX_ETHDEV_CHANGE_ADDR: 1271 qlnxr_mac_address_change(dev); 1272 break; 1273 1274 case QLNX_ETHDEV_DOWN: 1275 if (test_and_set_bit(QLNXR_ENET_STATE_BIT, &dev->enet_state)) 1276 qlnxr_ib_dispatch_event(dev, QLNXR_PORT, 1277 IB_EVENT_PORT_ERR); 1278 break; 1279 } 1280 1281 QL_DPRINT12(ha, "exit (%p, %d)\n", qlnx_rdma_dev, event); 1282 return; 1283 } 1284 1285 static int 1286 qlnxr_mod_load(void) 1287 { 1288 int ret; 1289 1290 qlnxr_drv.add = qlnxr_add; 1291 qlnxr_drv.remove = qlnxr_remove; 1292 qlnxr_drv.notify = qlnxr_notify; 1293 1294 ret = qlnx_rdma_register_if(&qlnxr_drv); 1295 1296 return (0); 1297 } 1298 1299 static int 1300 qlnxr_mod_unload(void) 1301 { 1302 int ret; 1303 1304 ret = qlnx_rdma_deregister_if(&qlnxr_drv); 1305 return (ret); 1306 } 1307 1308 static int 1309 qlnxr_event_handler(module_t mod, int event, void *arg) 1310 { 1311 1312 int ret = 0; 1313 1314 switch (event) { 1315 case MOD_LOAD: 1316 ret = qlnxr_mod_load(); 1317 break; 1318 1319 case MOD_UNLOAD: 1320 ret = qlnxr_mod_unload(); 1321 break; 1322 1323 default: 1324 break; 1325 } 1326 1327 return (ret); 1328 } 1329 1330 static moduledata_t qlnxr_mod_info = { 1331 .name = "qlnxr", 1332 .evhand = qlnxr_event_handler, 1333 }; 1334 1335 MODULE_VERSION(qlnxr, 1); 1336 MODULE_DEPEND(qlnxr, if_qlnxe, 1, 1, 1); 1337 MODULE_DEPEND(qlnxr, ibcore, 1, 1, 1); 1338 1339 #if __FreeBSD_version >= 1100000 1340 MODULE_DEPEND(qlnxr, linuxkpi, 1, 1, 1); 1341 #endif /* #if __FreeBSD_version >= 1100000 */ 1342 1343 DECLARE_MODULE(qlnxr, qlnxr_mod_info, SI_SUB_LAST, SI_ORDER_ANY); 1344