1 /* 2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of EITHER the GNU General Public License 6 * version 2 as published by the Free Software Foundation or the BSD 7 * 2-Clause License. This program is distributed in the hope that it 8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED 9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. 10 * See the GNU General Public License version 2 for more details at 11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program available in the file COPYING in the main 15 * directory of this source tree. 16 * 17 * The BSD 2-Clause License 18 * 19 * Redistribution and use in source and binary forms, with or 20 * without modification, are permitted provided that the following 21 * conditions are met: 22 * 23 * - Redistributions of source code must retain the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer. 26 * 27 * - Redistributions in binary form must reproduce the above 28 * copyright notice, this list of conditions and the following 29 * disclaimer in the documentation and/or other materials 30 * provided with the distribution. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 */ 45 46 #include <linux/errno.h> 47 #include <linux/inetdevice.h> 48 #include <linux/init.h> 49 #include <linux/module.h> 50 #include <linux/slab.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/ib_smi.h> 53 #include <rdma/ib_user_verbs.h> 54 #include <net/addrconf.h> 55 56 #include "pvrdma.h" 57 58 #define DRV_NAME "vmw_pvrdma" 59 #define DRV_VERSION "1.0.1.0-k" 60 61 static DEFINE_MUTEX(pvrdma_device_list_lock); 62 static LIST_HEAD(pvrdma_device_list); 63 static struct workqueue_struct *event_wq; 64 65 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context); 66 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context); 67 68 static ssize_t hca_type_show(struct device *device, 69 struct device_attribute *attr, char *buf) 70 { 71 return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION); 72 } 73 static DEVICE_ATTR_RO(hca_type); 74 75 static ssize_t hw_rev_show(struct device *device, 76 struct device_attribute *attr, char *buf) 77 { 78 return sprintf(buf, "%d\n", PVRDMA_REV_ID); 79 } 80 static DEVICE_ATTR_RO(hw_rev); 81 82 static ssize_t board_id_show(struct device *device, 83 struct device_attribute *attr, char *buf) 84 { 85 return sprintf(buf, "%d\n", PVRDMA_BOARD_ID); 86 } 87 static DEVICE_ATTR_RO(board_id); 88 89 static struct attribute *pvrdma_class_attributes[] = { 90 &dev_attr_hw_rev.attr, 91 &dev_attr_hca_type.attr, 92 &dev_attr_board_id.attr, 93 NULL, 94 }; 95 96 static const struct attribute_group pvrdma_attr_group = { 97 .attrs = pvrdma_class_attributes, 98 }; 99 100 static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str) 101 { 102 struct pvrdma_dev *dev = 103 container_of(device, struct pvrdma_dev, ib_dev); 104 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n", 105 (int) (dev->dsr->caps.fw_ver >> 32), 106 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff, 107 (int) dev->dsr->caps.fw_ver & 0xffff); 108 } 109 110 static int pvrdma_init_device(struct pvrdma_dev *dev) 111 { 112 /* Initialize some device related stuff */ 113 spin_lock_init(&dev->cmd_lock); 114 sema_init(&dev->cmd_sema, 1); 115 atomic_set(&dev->num_qps, 0); 116 atomic_set(&dev->num_srqs, 0); 117 atomic_set(&dev->num_cqs, 0); 118 atomic_set(&dev->num_pds, 0); 119 atomic_set(&dev->num_ahs, 0); 120 121 return 0; 122 } 123 124 static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num, 125 struct ib_port_immutable *immutable) 126 { 127 struct pvrdma_dev *dev = to_vdev(ibdev); 128 struct ib_port_attr attr; 129 int err; 130 131 if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1) 132 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE; 133 else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2) 134 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 135 136 err = ib_query_port(ibdev, port_num, &attr); 137 if (err) 138 return err; 139 140 immutable->pkey_tbl_len = attr.pkey_tbl_len; 141 immutable->gid_tbl_len = attr.gid_tbl_len; 142 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 143 return 0; 144 } 145 146 static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev, 147 u8 port_num) 148 { 149 struct net_device *netdev; 150 struct pvrdma_dev *dev = to_vdev(ibdev); 151 152 if (port_num != 1) 153 return NULL; 154 155 rcu_read_lock(); 156 netdev = dev->netdev; 157 if (netdev) 158 dev_hold(netdev); 159 rcu_read_unlock(); 160 161 return netdev; 162 } 163 164 static const struct ib_device_ops pvrdma_dev_ops = { 165 .add_gid = pvrdma_add_gid, 166 .alloc_mr = pvrdma_alloc_mr, 167 .alloc_pd = pvrdma_alloc_pd, 168 .alloc_ucontext = pvrdma_alloc_ucontext, 169 .create_ah = pvrdma_create_ah, 170 .create_cq = pvrdma_create_cq, 171 .create_qp = pvrdma_create_qp, 172 .dealloc_pd = pvrdma_dealloc_pd, 173 .dealloc_ucontext = pvrdma_dealloc_ucontext, 174 .del_gid = pvrdma_del_gid, 175 .dereg_mr = pvrdma_dereg_mr, 176 .destroy_ah = pvrdma_destroy_ah, 177 .destroy_cq = pvrdma_destroy_cq, 178 .destroy_qp = pvrdma_destroy_qp, 179 .get_dev_fw_str = pvrdma_get_fw_ver_str, 180 .get_dma_mr = pvrdma_get_dma_mr, 181 .get_link_layer = pvrdma_port_link_layer, 182 .get_netdev = pvrdma_get_netdev, 183 .get_port_immutable = pvrdma_port_immutable, 184 .map_mr_sg = pvrdma_map_mr_sg, 185 .mmap = pvrdma_mmap, 186 .modify_port = pvrdma_modify_port, 187 .modify_qp = pvrdma_modify_qp, 188 .poll_cq = pvrdma_poll_cq, 189 .post_recv = pvrdma_post_recv, 190 .post_send = pvrdma_post_send, 191 .query_device = pvrdma_query_device, 192 .query_gid = pvrdma_query_gid, 193 .query_pkey = pvrdma_query_pkey, 194 .query_port = pvrdma_query_port, 195 .query_qp = pvrdma_query_qp, 196 .reg_user_mr = pvrdma_reg_user_mr, 197 .req_notify_cq = pvrdma_req_notify_cq, 198 INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd), 199 INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext), 200 }; 201 202 static const struct ib_device_ops pvrdma_dev_srq_ops = { 203 .create_srq = pvrdma_create_srq, 204 .destroy_srq = pvrdma_destroy_srq, 205 .modify_srq = pvrdma_modify_srq, 206 .query_srq = pvrdma_query_srq, 207 }; 208 209 static int pvrdma_register_device(struct pvrdma_dev *dev) 210 { 211 int ret = -1; 212 213 dev->ib_dev.node_guid = dev->dsr->caps.node_guid; 214 dev->sys_image_guid = dev->dsr->caps.sys_image_guid; 215 dev->flags = 0; 216 dev->ib_dev.owner = THIS_MODULE; 217 dev->ib_dev.num_comp_vectors = 1; 218 dev->ib_dev.dev.parent = &dev->pdev->dev; 219 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; 220 dev->ib_dev.uverbs_cmd_mask = 221 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 222 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 223 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 224 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 225 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 226 (1ull << IB_USER_VERBS_CMD_REG_MR) | 227 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 228 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 229 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 230 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 231 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 232 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 233 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 234 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 235 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 236 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 237 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 238 (1ull << IB_USER_VERBS_CMD_POST_RECV) | 239 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 240 (1ull << IB_USER_VERBS_CMD_DESTROY_AH); 241 242 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 243 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt; 244 245 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_ops); 246 247 mutex_init(&dev->port_mutex); 248 spin_lock_init(&dev->desc_lock); 249 250 dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *), 251 GFP_KERNEL); 252 if (!dev->cq_tbl) 253 return ret; 254 spin_lock_init(&dev->cq_tbl_lock); 255 256 dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *), 257 GFP_KERNEL); 258 if (!dev->qp_tbl) 259 goto err_cq_free; 260 spin_lock_init(&dev->qp_tbl_lock); 261 262 /* Check if SRQ is supported by backend */ 263 if (dev->dsr->caps.max_srq) { 264 dev->ib_dev.uverbs_cmd_mask |= 265 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 266 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 267 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 268 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 269 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 270 271 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops); 272 273 dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq, 274 sizeof(struct pvrdma_srq *), 275 GFP_KERNEL); 276 if (!dev->srq_tbl) 277 goto err_qp_free; 278 } 279 dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA; 280 spin_lock_init(&dev->srq_tbl_lock); 281 rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group); 282 283 ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d"); 284 if (ret) 285 goto err_srq_free; 286 287 dev->ib_active = true; 288 289 return 0; 290 291 err_srq_free: 292 kfree(dev->srq_tbl); 293 err_qp_free: 294 kfree(dev->qp_tbl); 295 err_cq_free: 296 kfree(dev->cq_tbl); 297 298 return ret; 299 } 300 301 static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id) 302 { 303 u32 icr = PVRDMA_INTR_CAUSE_RESPONSE; 304 struct pvrdma_dev *dev = dev_id; 305 306 dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n"); 307 308 if (!dev->pdev->msix_enabled) { 309 /* Legacy intr */ 310 icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR); 311 if (icr == 0) 312 return IRQ_NONE; 313 } 314 315 if (icr == PVRDMA_INTR_CAUSE_RESPONSE) 316 complete(&dev->cmd_done); 317 318 return IRQ_HANDLED; 319 } 320 321 static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) 322 { 323 struct pvrdma_qp *qp; 324 unsigned long flags; 325 326 spin_lock_irqsave(&dev->qp_tbl_lock, flags); 327 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp]; 328 if (qp) 329 refcount_inc(&qp->refcnt); 330 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); 331 332 if (qp && qp->ibqp.event_handler) { 333 struct ib_qp *ibqp = &qp->ibqp; 334 struct ib_event e; 335 336 e.device = ibqp->device; 337 e.element.qp = ibqp; 338 e.event = type; /* 1:1 mapping for now. */ 339 ibqp->event_handler(&e, ibqp->qp_context); 340 } 341 if (qp) { 342 if (refcount_dec_and_test(&qp->refcnt)) 343 complete(&qp->free); 344 } 345 } 346 347 static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) 348 { 349 struct pvrdma_cq *cq; 350 unsigned long flags; 351 352 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 353 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq]; 354 if (cq) 355 refcount_inc(&cq->refcnt); 356 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 357 358 if (cq && cq->ibcq.event_handler) { 359 struct ib_cq *ibcq = &cq->ibcq; 360 struct ib_event e; 361 362 e.device = ibcq->device; 363 e.element.cq = ibcq; 364 e.event = type; /* 1:1 mapping for now. */ 365 ibcq->event_handler(&e, ibcq->cq_context); 366 } 367 if (cq) { 368 if (refcount_dec_and_test(&cq->refcnt)) 369 complete(&cq->free); 370 } 371 } 372 373 static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type) 374 { 375 struct pvrdma_srq *srq; 376 unsigned long flags; 377 378 spin_lock_irqsave(&dev->srq_tbl_lock, flags); 379 if (dev->srq_tbl) 380 srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq]; 381 else 382 srq = NULL; 383 if (srq) 384 refcount_inc(&srq->refcnt); 385 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); 386 387 if (srq && srq->ibsrq.event_handler) { 388 struct ib_srq *ibsrq = &srq->ibsrq; 389 struct ib_event e; 390 391 e.device = ibsrq->device; 392 e.element.srq = ibsrq; 393 e.event = type; /* 1:1 mapping for now. */ 394 ibsrq->event_handler(&e, ibsrq->srq_context); 395 } 396 if (srq) { 397 if (refcount_dec_and_test(&srq->refcnt)) 398 complete(&srq->free); 399 } 400 } 401 402 static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port, 403 enum ib_event_type event) 404 { 405 struct ib_event ib_event; 406 407 memset(&ib_event, 0, sizeof(ib_event)); 408 ib_event.device = &dev->ib_dev; 409 ib_event.element.port_num = port; 410 ib_event.event = event; 411 ib_dispatch_event(&ib_event); 412 } 413 414 static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type) 415 { 416 if (port < 1 || port > dev->dsr->caps.phys_port_cnt) { 417 dev_warn(&dev->pdev->dev, "event on port %d\n", port); 418 return; 419 } 420 421 pvrdma_dispatch_event(dev, port, type); 422 } 423 424 static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i) 425 { 426 return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr( 427 &dev->async_pdir, 428 PAGE_SIZE + 429 sizeof(struct pvrdma_eqe) * i); 430 } 431 432 static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id) 433 { 434 struct pvrdma_dev *dev = dev_id; 435 struct pvrdma_ring *ring = &dev->async_ring_state->rx; 436 int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) * 437 PAGE_SIZE / sizeof(struct pvrdma_eqe); 438 unsigned int head; 439 440 dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n"); 441 442 /* 443 * Don't process events until the IB device is registered. Otherwise 444 * we'll try to ib_dispatch_event() on an invalid device. 445 */ 446 if (!dev->ib_active) 447 return IRQ_HANDLED; 448 449 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 450 struct pvrdma_eqe *eqe; 451 452 eqe = get_eqe(dev, head); 453 454 switch (eqe->type) { 455 case PVRDMA_EVENT_QP_FATAL: 456 case PVRDMA_EVENT_QP_REQ_ERR: 457 case PVRDMA_EVENT_QP_ACCESS_ERR: 458 case PVRDMA_EVENT_COMM_EST: 459 case PVRDMA_EVENT_SQ_DRAINED: 460 case PVRDMA_EVENT_PATH_MIG: 461 case PVRDMA_EVENT_PATH_MIG_ERR: 462 case PVRDMA_EVENT_QP_LAST_WQE_REACHED: 463 pvrdma_qp_event(dev, eqe->info, eqe->type); 464 break; 465 466 case PVRDMA_EVENT_CQ_ERR: 467 pvrdma_cq_event(dev, eqe->info, eqe->type); 468 break; 469 470 case PVRDMA_EVENT_SRQ_ERR: 471 case PVRDMA_EVENT_SRQ_LIMIT_REACHED: 472 pvrdma_srq_event(dev, eqe->info, eqe->type); 473 break; 474 475 case PVRDMA_EVENT_PORT_ACTIVE: 476 case PVRDMA_EVENT_PORT_ERR: 477 case PVRDMA_EVENT_LID_CHANGE: 478 case PVRDMA_EVENT_PKEY_CHANGE: 479 case PVRDMA_EVENT_SM_CHANGE: 480 case PVRDMA_EVENT_CLIENT_REREGISTER: 481 case PVRDMA_EVENT_GID_CHANGE: 482 pvrdma_dev_event(dev, eqe->info, eqe->type); 483 break; 484 485 case PVRDMA_EVENT_DEVICE_FATAL: 486 pvrdma_dev_event(dev, 1, eqe->type); 487 break; 488 489 default: 490 break; 491 } 492 493 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 494 } 495 496 return IRQ_HANDLED; 497 } 498 499 static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev, 500 unsigned int i) 501 { 502 return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr( 503 &dev->cq_pdir, 504 PAGE_SIZE + 505 sizeof(struct pvrdma_cqne) * i); 506 } 507 508 static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) 509 { 510 struct pvrdma_dev *dev = dev_id; 511 struct pvrdma_ring *ring = &dev->cq_ring_state->rx; 512 int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE / 513 sizeof(struct pvrdma_cqne); 514 unsigned int head; 515 unsigned long flags; 516 517 dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n"); 518 519 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 520 struct pvrdma_cqne *cqne; 521 struct pvrdma_cq *cq; 522 523 cqne = get_cqne(dev, head); 524 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 525 cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq]; 526 if (cq) 527 refcount_inc(&cq->refcnt); 528 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 529 530 if (cq && cq->ibcq.comp_handler) 531 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 532 if (cq) { 533 if (refcount_dec_and_test(&cq->refcnt)) 534 complete(&cq->free); 535 } 536 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 537 } 538 539 return IRQ_HANDLED; 540 } 541 542 static void pvrdma_free_irq(struct pvrdma_dev *dev) 543 { 544 int i; 545 546 dev_dbg(&dev->pdev->dev, "freeing interrupts\n"); 547 for (i = 0; i < dev->nr_vectors; i++) 548 free_irq(pci_irq_vector(dev->pdev, i), dev); 549 } 550 551 static void pvrdma_enable_intrs(struct pvrdma_dev *dev) 552 { 553 dev_dbg(&dev->pdev->dev, "enable interrupts\n"); 554 pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0); 555 } 556 557 static void pvrdma_disable_intrs(struct pvrdma_dev *dev) 558 { 559 dev_dbg(&dev->pdev->dev, "disable interrupts\n"); 560 pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0); 561 } 562 563 static int pvrdma_alloc_intrs(struct pvrdma_dev *dev) 564 { 565 struct pci_dev *pdev = dev->pdev; 566 int ret = 0, i; 567 568 ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS, 569 PCI_IRQ_MSIX); 570 if (ret < 0) { 571 ret = pci_alloc_irq_vectors(pdev, 1, 1, 572 PCI_IRQ_MSI | PCI_IRQ_LEGACY); 573 if (ret < 0) 574 return ret; 575 } 576 dev->nr_vectors = ret; 577 578 ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler, 579 pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev); 580 if (ret) { 581 dev_err(&dev->pdev->dev, 582 "failed to request interrupt 0\n"); 583 goto out_free_vectors; 584 } 585 586 for (i = 1; i < dev->nr_vectors; i++) { 587 ret = request_irq(pci_irq_vector(dev->pdev, i), 588 i == 1 ? pvrdma_intr1_handler : 589 pvrdma_intrx_handler, 590 0, DRV_NAME, dev); 591 if (ret) { 592 dev_err(&dev->pdev->dev, 593 "failed to request interrupt %d\n", i); 594 goto free_irqs; 595 } 596 } 597 598 return 0; 599 600 free_irqs: 601 while (--i >= 0) 602 free_irq(pci_irq_vector(dev->pdev, i), dev); 603 out_free_vectors: 604 pci_free_irq_vectors(pdev); 605 return ret; 606 } 607 608 static void pvrdma_free_slots(struct pvrdma_dev *dev) 609 { 610 struct pci_dev *pdev = dev->pdev; 611 612 if (dev->resp_slot) 613 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot, 614 dev->dsr->resp_slot_dma); 615 if (dev->cmd_slot) 616 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot, 617 dev->dsr->cmd_slot_dma); 618 } 619 620 static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev, 621 const union ib_gid *gid, 622 u8 gid_type, 623 int index) 624 { 625 int ret; 626 union pvrdma_cmd_req req; 627 struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind; 628 629 if (!dev->sgid_tbl) { 630 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 631 return -EINVAL; 632 } 633 634 memset(cmd_bind, 0, sizeof(*cmd_bind)); 635 cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND; 636 memcpy(cmd_bind->new_gid, gid->raw, 16); 637 cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024); 638 cmd_bind->vlan = 0xfff; 639 cmd_bind->index = index; 640 cmd_bind->gid_type = gid_type; 641 642 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 643 if (ret < 0) { 644 dev_warn(&dev->pdev->dev, 645 "could not create binding, error: %d\n", ret); 646 return -EFAULT; 647 } 648 memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid)); 649 return 0; 650 } 651 652 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context) 653 { 654 struct pvrdma_dev *dev = to_vdev(attr->device); 655 656 return pvrdma_add_gid_at_index(dev, &attr->gid, 657 ib_gid_type_to_pvrdma(attr->gid_type), 658 attr->index); 659 } 660 661 static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index) 662 { 663 int ret; 664 union pvrdma_cmd_req req; 665 struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind; 666 667 /* Update sgid table. */ 668 if (!dev->sgid_tbl) { 669 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 670 return -EINVAL; 671 } 672 673 memset(cmd_dest, 0, sizeof(*cmd_dest)); 674 cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND; 675 memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16); 676 cmd_dest->index = index; 677 678 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 679 if (ret < 0) { 680 dev_warn(&dev->pdev->dev, 681 "could not destroy binding, error: %d\n", ret); 682 return ret; 683 } 684 memset(&dev->sgid_tbl[index], 0, 16); 685 return 0; 686 } 687 688 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context) 689 { 690 struct pvrdma_dev *dev = to_vdev(attr->device); 691 692 dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s", 693 attr->index, dev->netdev->name); 694 695 return pvrdma_del_gid_at_index(dev, attr->index); 696 } 697 698 static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, 699 struct net_device *ndev, 700 unsigned long event) 701 { 702 struct pci_dev *pdev_net; 703 unsigned int slot; 704 705 switch (event) { 706 case NETDEV_REBOOT: 707 case NETDEV_DOWN: 708 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 709 break; 710 case NETDEV_UP: 711 pvrdma_write_reg(dev, PVRDMA_REG_CTL, 712 PVRDMA_DEVICE_CTL_UNQUIESCE); 713 714 mb(); 715 716 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR)) 717 dev_err(&dev->pdev->dev, 718 "failed to activate device during link up\n"); 719 else 720 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 721 break; 722 case NETDEV_UNREGISTER: 723 dev_put(dev->netdev); 724 dev->netdev = NULL; 725 break; 726 case NETDEV_REGISTER: 727 /* vmxnet3 will have same bus, slot. But func will be 0 */ 728 slot = PCI_SLOT(dev->pdev->devfn); 729 pdev_net = pci_get_slot(dev->pdev->bus, 730 PCI_DEVFN(slot, 0)); 731 if ((dev->netdev == NULL) && 732 (pci_get_drvdata(pdev_net) == ndev)) { 733 /* this is our netdev */ 734 dev->netdev = ndev; 735 dev_hold(ndev); 736 } 737 pci_dev_put(pdev_net); 738 break; 739 740 default: 741 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", 742 event, dev_name(&dev->ib_dev.dev)); 743 break; 744 } 745 } 746 747 static void pvrdma_netdevice_event_work(struct work_struct *work) 748 { 749 struct pvrdma_netdevice_work *netdev_work; 750 struct pvrdma_dev *dev; 751 752 netdev_work = container_of(work, struct pvrdma_netdevice_work, work); 753 754 mutex_lock(&pvrdma_device_list_lock); 755 list_for_each_entry(dev, &pvrdma_device_list, device_link) { 756 if ((netdev_work->event == NETDEV_REGISTER) || 757 (dev->netdev == netdev_work->event_netdev)) { 758 pvrdma_netdevice_event_handle(dev, 759 netdev_work->event_netdev, 760 netdev_work->event); 761 break; 762 } 763 } 764 mutex_unlock(&pvrdma_device_list_lock); 765 766 kfree(netdev_work); 767 } 768 769 static int pvrdma_netdevice_event(struct notifier_block *this, 770 unsigned long event, void *ptr) 771 { 772 struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr); 773 struct pvrdma_netdevice_work *netdev_work; 774 775 netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC); 776 if (!netdev_work) 777 return NOTIFY_BAD; 778 779 INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work); 780 netdev_work->event_netdev = event_netdev; 781 netdev_work->event = event; 782 queue_work(event_wq, &netdev_work->work); 783 784 return NOTIFY_DONE; 785 } 786 787 static int pvrdma_pci_probe(struct pci_dev *pdev, 788 const struct pci_device_id *id) 789 { 790 struct pci_dev *pdev_net; 791 struct pvrdma_dev *dev; 792 int ret; 793 unsigned long start; 794 unsigned long len; 795 dma_addr_t slot_dma = 0; 796 797 dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev)); 798 799 /* Allocate zero-out device */ 800 dev = ib_alloc_device(pvrdma_dev, ib_dev); 801 if (!dev) { 802 dev_err(&pdev->dev, "failed to allocate IB device\n"); 803 return -ENOMEM; 804 } 805 806 mutex_lock(&pvrdma_device_list_lock); 807 list_add(&dev->device_link, &pvrdma_device_list); 808 mutex_unlock(&pvrdma_device_list_lock); 809 810 ret = pvrdma_init_device(dev); 811 if (ret) 812 goto err_free_device; 813 814 dev->pdev = pdev; 815 pci_set_drvdata(pdev, dev); 816 817 ret = pci_enable_device(pdev); 818 if (ret) { 819 dev_err(&pdev->dev, "cannot enable PCI device\n"); 820 goto err_free_device; 821 } 822 823 dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n", 824 pci_resource_flags(pdev, 0)); 825 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 826 (unsigned long long)pci_resource_len(pdev, 0)); 827 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 828 (unsigned long long)pci_resource_start(pdev, 0)); 829 dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n", 830 pci_resource_flags(pdev, 1)); 831 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 832 (unsigned long long)pci_resource_len(pdev, 1)); 833 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 834 (unsigned long long)pci_resource_start(pdev, 1)); 835 836 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 837 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 838 dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); 839 ret = -ENOMEM; 840 goto err_free_device; 841 } 842 843 ret = pci_request_regions(pdev, DRV_NAME); 844 if (ret) { 845 dev_err(&pdev->dev, "cannot request PCI resources\n"); 846 goto err_disable_pdev; 847 } 848 849 /* Enable 64-Bit DMA */ 850 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 851 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 852 if (ret != 0) { 853 dev_err(&pdev->dev, 854 "pci_set_consistent_dma_mask failed\n"); 855 goto err_free_resource; 856 } 857 } else { 858 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 859 if (ret != 0) { 860 dev_err(&pdev->dev, 861 "pci_set_dma_mask failed\n"); 862 goto err_free_resource; 863 } 864 } 865 866 pci_set_master(pdev); 867 868 /* Map register space */ 869 start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 870 len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 871 dev->regs = ioremap(start, len); 872 if (!dev->regs) { 873 dev_err(&pdev->dev, "register mapping failed\n"); 874 ret = -ENOMEM; 875 goto err_free_resource; 876 } 877 878 /* Setup per-device UAR. */ 879 dev->driver_uar.index = 0; 880 dev->driver_uar.pfn = 881 pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >> 882 PAGE_SHIFT; 883 dev->driver_uar.map = 884 ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 885 if (!dev->driver_uar.map) { 886 dev_err(&pdev->dev, "failed to remap UAR pages\n"); 887 ret = -ENOMEM; 888 goto err_unmap_regs; 889 } 890 891 dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION); 892 dev_info(&pdev->dev, "device version %d, driver version %d\n", 893 dev->dsr_version, PVRDMA_VERSION); 894 895 dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr), 896 &dev->dsrbase, GFP_KERNEL); 897 if (!dev->dsr) { 898 dev_err(&pdev->dev, "failed to allocate shared region\n"); 899 ret = -ENOMEM; 900 goto err_uar_unmap; 901 } 902 903 /* Setup the shared region */ 904 dev->dsr->driver_version = PVRDMA_VERSION; 905 dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ? 906 PVRDMA_GOS_BITS_32 : 907 PVRDMA_GOS_BITS_64; 908 dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX; 909 dev->dsr->gos_info.gos_ver = 1; 910 911 if (dev->dsr_version < PVRDMA_PPN64_VERSION) 912 dev->dsr->uar_pfn = dev->driver_uar.pfn; 913 else 914 dev->dsr->uar_pfn64 = dev->driver_uar.pfn; 915 916 /* Command slot. */ 917 dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 918 &slot_dma, GFP_KERNEL); 919 if (!dev->cmd_slot) { 920 ret = -ENOMEM; 921 goto err_free_dsr; 922 } 923 924 dev->dsr->cmd_slot_dma = (u64)slot_dma; 925 926 /* Response slot. */ 927 dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 928 &slot_dma, GFP_KERNEL); 929 if (!dev->resp_slot) { 930 ret = -ENOMEM; 931 goto err_free_slots; 932 } 933 934 dev->dsr->resp_slot_dma = (u64)slot_dma; 935 936 /* Async event ring */ 937 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 938 ret = pvrdma_page_dir_init(dev, &dev->async_pdir, 939 dev->dsr->async_ring_pages.num_pages, true); 940 if (ret) 941 goto err_free_slots; 942 dev->async_ring_state = dev->async_pdir.pages[0]; 943 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; 944 945 /* CQ notification ring */ 946 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 947 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, 948 dev->dsr->cq_ring_pages.num_pages, true); 949 if (ret) 950 goto err_free_async_ring; 951 dev->cq_ring_state = dev->cq_pdir.pages[0]; 952 dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma; 953 954 /* 955 * Write the PA of the shared region to the device. The writes must be 956 * ordered such that the high bits are written last. When the writes 957 * complete, the device will have filled out the capabilities. 958 */ 959 960 pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase); 961 pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH, 962 (u32)((u64)(dev->dsrbase) >> 32)); 963 964 /* Make sure the write is complete before reading status. */ 965 mb(); 966 967 /* The driver supports RoCE V1 and V2. */ 968 if (!PVRDMA_SUPPORTED(dev)) { 969 dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n"); 970 ret = -EFAULT; 971 goto err_free_cq_ring; 972 } 973 974 /* Paired vmxnet3 will have same bus, slot. But func will be 0 */ 975 pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); 976 if (!pdev_net) { 977 dev_err(&pdev->dev, "failed to find paired net device\n"); 978 ret = -ENODEV; 979 goto err_free_cq_ring; 980 } 981 982 if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE || 983 pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) { 984 dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n"); 985 pci_dev_put(pdev_net); 986 ret = -ENODEV; 987 goto err_free_cq_ring; 988 } 989 990 dev->netdev = pci_get_drvdata(pdev_net); 991 pci_dev_put(pdev_net); 992 if (!dev->netdev) { 993 dev_err(&pdev->dev, "failed to get vmxnet3 device\n"); 994 ret = -ENODEV; 995 goto err_free_cq_ring; 996 } 997 dev_hold(dev->netdev); 998 999 dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name); 1000 1001 /* Interrupt setup */ 1002 ret = pvrdma_alloc_intrs(dev); 1003 if (ret) { 1004 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 1005 ret = -ENOMEM; 1006 goto err_free_cq_ring; 1007 } 1008 1009 /* Allocate UAR table. */ 1010 ret = pvrdma_uar_table_init(dev); 1011 if (ret) { 1012 dev_err(&pdev->dev, "failed to allocate UAR table\n"); 1013 ret = -ENOMEM; 1014 goto err_free_intrs; 1015 } 1016 1017 /* Allocate GID table */ 1018 dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len, 1019 sizeof(union ib_gid), GFP_KERNEL); 1020 if (!dev->sgid_tbl) { 1021 ret = -ENOMEM; 1022 goto err_free_uar_table; 1023 } 1024 dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len); 1025 1026 pvrdma_enable_intrs(dev); 1027 1028 /* Activate pvrdma device */ 1029 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE); 1030 1031 /* Make sure the write is complete before reading status. */ 1032 mb(); 1033 1034 /* Check if device was successfully activated */ 1035 ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR); 1036 if (ret != 0) { 1037 dev_err(&pdev->dev, "failed to activate device\n"); 1038 ret = -EFAULT; 1039 goto err_disable_intr; 1040 } 1041 1042 /* Register IB device */ 1043 ret = pvrdma_register_device(dev); 1044 if (ret) { 1045 dev_err(&pdev->dev, "failed to register IB device\n"); 1046 goto err_disable_intr; 1047 } 1048 1049 dev->nb_netdev.notifier_call = pvrdma_netdevice_event; 1050 ret = register_netdevice_notifier(&dev->nb_netdev); 1051 if (ret) { 1052 dev_err(&pdev->dev, "failed to register netdevice events\n"); 1053 goto err_unreg_ibdev; 1054 } 1055 1056 dev_info(&pdev->dev, "attached to device\n"); 1057 return 0; 1058 1059 err_unreg_ibdev: 1060 ib_unregister_device(&dev->ib_dev); 1061 err_disable_intr: 1062 pvrdma_disable_intrs(dev); 1063 kfree(dev->sgid_tbl); 1064 err_free_uar_table: 1065 pvrdma_uar_table_cleanup(dev); 1066 err_free_intrs: 1067 pvrdma_free_irq(dev); 1068 pci_free_irq_vectors(pdev); 1069 err_free_cq_ring: 1070 if (dev->netdev) { 1071 dev_put(dev->netdev); 1072 dev->netdev = NULL; 1073 } 1074 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1075 err_free_async_ring: 1076 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1077 err_free_slots: 1078 pvrdma_free_slots(dev); 1079 err_free_dsr: 1080 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, 1081 dev->dsrbase); 1082 err_uar_unmap: 1083 iounmap(dev->driver_uar.map); 1084 err_unmap_regs: 1085 iounmap(dev->regs); 1086 err_free_resource: 1087 pci_release_regions(pdev); 1088 err_disable_pdev: 1089 pci_disable_device(pdev); 1090 pci_set_drvdata(pdev, NULL); 1091 err_free_device: 1092 mutex_lock(&pvrdma_device_list_lock); 1093 list_del(&dev->device_link); 1094 mutex_unlock(&pvrdma_device_list_lock); 1095 ib_dealloc_device(&dev->ib_dev); 1096 return ret; 1097 } 1098 1099 static void pvrdma_pci_remove(struct pci_dev *pdev) 1100 { 1101 struct pvrdma_dev *dev = pci_get_drvdata(pdev); 1102 1103 if (!dev) 1104 return; 1105 1106 dev_info(&pdev->dev, "detaching from device\n"); 1107 1108 unregister_netdevice_notifier(&dev->nb_netdev); 1109 dev->nb_netdev.notifier_call = NULL; 1110 1111 flush_workqueue(event_wq); 1112 1113 if (dev->netdev) { 1114 dev_put(dev->netdev); 1115 dev->netdev = NULL; 1116 } 1117 1118 /* Unregister ib device */ 1119 ib_unregister_device(&dev->ib_dev); 1120 1121 mutex_lock(&pvrdma_device_list_lock); 1122 list_del(&dev->device_link); 1123 mutex_unlock(&pvrdma_device_list_lock); 1124 1125 pvrdma_disable_intrs(dev); 1126 pvrdma_free_irq(dev); 1127 pci_free_irq_vectors(pdev); 1128 1129 /* Deactivate pvrdma device */ 1130 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET); 1131 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1132 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1133 pvrdma_free_slots(dev); 1134 1135 iounmap(dev->regs); 1136 kfree(dev->sgid_tbl); 1137 kfree(dev->cq_tbl); 1138 kfree(dev->srq_tbl); 1139 kfree(dev->qp_tbl); 1140 pvrdma_uar_table_cleanup(dev); 1141 iounmap(dev->driver_uar.map); 1142 1143 ib_dealloc_device(&dev->ib_dev); 1144 1145 /* Free pci resources */ 1146 pci_release_regions(pdev); 1147 pci_disable_device(pdev); 1148 pci_set_drvdata(pdev, NULL); 1149 } 1150 1151 static const struct pci_device_id pvrdma_pci_table[] = { 1152 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), }, 1153 { 0 }, 1154 }; 1155 1156 MODULE_DEVICE_TABLE(pci, pvrdma_pci_table); 1157 1158 static struct pci_driver pvrdma_driver = { 1159 .name = DRV_NAME, 1160 .id_table = pvrdma_pci_table, 1161 .probe = pvrdma_pci_probe, 1162 .remove = pvrdma_pci_remove, 1163 }; 1164 1165 static int __init pvrdma_init(void) 1166 { 1167 int err; 1168 1169 event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM); 1170 if (!event_wq) 1171 return -ENOMEM; 1172 1173 err = pci_register_driver(&pvrdma_driver); 1174 if (err) 1175 destroy_workqueue(event_wq); 1176 1177 return err; 1178 } 1179 1180 static void __exit pvrdma_cleanup(void) 1181 { 1182 pci_unregister_driver(&pvrdma_driver); 1183 1184 destroy_workqueue(event_wq); 1185 } 1186 1187 module_init(pvrdma_init); 1188 module_exit(pvrdma_cleanup); 1189 1190 MODULE_AUTHOR("VMware, Inc"); 1191 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver"); 1192 MODULE_LICENSE("Dual BSD/GPL"); 1193