1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/string.h> 36 #include <linux/errno.h> 37 #include <linux/kernel.h> 38 #include <linux/slab.h> 39 #include <linux/init.h> 40 #include <linux/mutex.h> 41 #include <rdma/rdma_netlink.h> 42 43 #include "core_priv.h" 44 45 MODULE_AUTHOR("Roland Dreier"); 46 MODULE_DESCRIPTION("core kernel InfiniBand API"); 47 MODULE_LICENSE("Dual BSD/GPL"); 48 49 struct ib_client_data { 50 struct list_head list; 51 struct ib_client *client; 52 void * data; 53 }; 54 55 struct workqueue_struct *ib_wq; 56 EXPORT_SYMBOL_GPL(ib_wq); 57 58 static LIST_HEAD(device_list); 59 static LIST_HEAD(client_list); 60 61 /* 62 * device_mutex protects access to both device_list and client_list. 63 * There's no real point to using multiple locks or something fancier 64 * like an rwsem: we always access both lists, and we're always 65 * modifying one list or the other list. In any case this is not a 66 * hot path so there's no point in trying to optimize. 67 */ 68 static DEFINE_MUTEX(device_mutex); 69 70 static int ib_device_check_mandatory(struct ib_device *device) 71 { 72 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } 73 static const struct { 74 size_t offset; 75 char *name; 76 } mandatory_table[] = { 77 IB_MANDATORY_FUNC(query_device), 78 IB_MANDATORY_FUNC(query_port), 79 IB_MANDATORY_FUNC(query_pkey), 80 IB_MANDATORY_FUNC(query_gid), 81 IB_MANDATORY_FUNC(alloc_pd), 82 IB_MANDATORY_FUNC(dealloc_pd), 83 IB_MANDATORY_FUNC(create_ah), 84 IB_MANDATORY_FUNC(destroy_ah), 85 IB_MANDATORY_FUNC(create_qp), 86 IB_MANDATORY_FUNC(modify_qp), 87 IB_MANDATORY_FUNC(destroy_qp), 88 IB_MANDATORY_FUNC(post_send), 89 IB_MANDATORY_FUNC(post_recv), 90 IB_MANDATORY_FUNC(create_cq), 91 IB_MANDATORY_FUNC(destroy_cq), 92 IB_MANDATORY_FUNC(poll_cq), 93 IB_MANDATORY_FUNC(req_notify_cq), 94 IB_MANDATORY_FUNC(get_dma_mr), 95 IB_MANDATORY_FUNC(dereg_mr), 96 IB_MANDATORY_FUNC(get_port_immutable) 97 }; 98 int i; 99 100 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { 101 if (!*(void **) ((void *) device + mandatory_table[i].offset)) { 102 printk(KERN_WARNING "Device %s is missing mandatory function %s\n", 103 device->name, mandatory_table[i].name); 104 return -EINVAL; 105 } 106 } 107 108 return 0; 109 } 110 111 static struct ib_device *__ib_device_get_by_name(const char *name) 112 { 113 struct ib_device *device; 114 115 list_for_each_entry(device, &device_list, core_list) 116 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX)) 117 return device; 118 119 return NULL; 120 } 121 122 123 static int alloc_name(char *name) 124 { 125 unsigned long *inuse; 126 char buf[IB_DEVICE_NAME_MAX]; 127 struct ib_device *device; 128 int i; 129 130 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL); 131 if (!inuse) 132 return -ENOMEM; 133 134 list_for_each_entry(device, &device_list, core_list) { 135 if (!sscanf(device->name, name, &i)) 136 continue; 137 if (i < 0 || i >= PAGE_SIZE * 8) 138 continue; 139 snprintf(buf, sizeof buf, name, i); 140 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX)) 141 set_bit(i, inuse); 142 } 143 144 i = find_first_zero_bit(inuse, PAGE_SIZE * 8); 145 free_page((unsigned long) inuse); 146 snprintf(buf, sizeof buf, name, i); 147 148 if (__ib_device_get_by_name(buf)) 149 return -ENFILE; 150 151 strlcpy(name, buf, IB_DEVICE_NAME_MAX); 152 return 0; 153 } 154 155 /** 156 * ib_alloc_device - allocate an IB device struct 157 * @size:size of structure to allocate 158 * 159 * Low-level drivers should use ib_alloc_device() to allocate &struct 160 * ib_device. @size is the size of the structure to be allocated, 161 * including any private data used by the low-level driver. 162 * ib_dealloc_device() must be used to free structures allocated with 163 * ib_alloc_device(). 164 */ 165 struct ib_device *ib_alloc_device(size_t size) 166 { 167 BUG_ON(size < sizeof (struct ib_device)); 168 169 return kzalloc(size, GFP_KERNEL); 170 } 171 EXPORT_SYMBOL(ib_alloc_device); 172 173 /** 174 * ib_dealloc_device - free an IB device struct 175 * @device:structure to free 176 * 177 * Free a structure allocated with ib_alloc_device(). 178 */ 179 void ib_dealloc_device(struct ib_device *device) 180 { 181 if (device->reg_state == IB_DEV_UNINITIALIZED) { 182 kfree(device); 183 return; 184 } 185 186 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED); 187 188 kobject_put(&device->dev.kobj); 189 } 190 EXPORT_SYMBOL(ib_dealloc_device); 191 192 static int add_client_context(struct ib_device *device, struct ib_client *client) 193 { 194 struct ib_client_data *context; 195 unsigned long flags; 196 197 context = kmalloc(sizeof *context, GFP_KERNEL); 198 if (!context) { 199 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n", 200 device->name, client->name); 201 return -ENOMEM; 202 } 203 204 context->client = client; 205 context->data = NULL; 206 207 spin_lock_irqsave(&device->client_data_lock, flags); 208 list_add(&context->list, &device->client_data_list); 209 spin_unlock_irqrestore(&device->client_data_lock, flags); 210 211 return 0; 212 } 213 214 static int verify_immutable(const struct ib_device *dev, u8 port) 215 { 216 return WARN_ON(!rdma_cap_ib_mad(dev, port) && 217 rdma_max_mad_size(dev, port) != 0); 218 } 219 220 static int read_port_immutable(struct ib_device *device) 221 { 222 int ret = -ENOMEM; 223 u8 start_port = rdma_start_port(device); 224 u8 end_port = rdma_end_port(device); 225 u8 port; 226 227 /** 228 * device->port_immutable is indexed directly by the port number to make 229 * access to this data as efficient as possible. 230 * 231 * Therefore port_immutable is declared as a 1 based array with 232 * potential empty slots at the beginning. 233 */ 234 device->port_immutable = kzalloc(sizeof(*device->port_immutable) 235 * (end_port + 1), 236 GFP_KERNEL); 237 if (!device->port_immutable) 238 goto err; 239 240 for (port = start_port; port <= end_port; ++port) { 241 ret = device->get_port_immutable(device, port, 242 &device->port_immutable[port]); 243 if (ret) 244 goto err; 245 246 if (verify_immutable(device, port)) { 247 ret = -EINVAL; 248 goto err; 249 } 250 } 251 252 ret = 0; 253 goto out; 254 err: 255 kfree(device->port_immutable); 256 out: 257 return ret; 258 } 259 260 /** 261 * ib_register_device - Register an IB device with IB core 262 * @device:Device to register 263 * 264 * Low-level drivers use ib_register_device() to register their 265 * devices with the IB core. All registered clients will receive a 266 * callback for each device that is added. @device must be allocated 267 * with ib_alloc_device(). 268 */ 269 int ib_register_device(struct ib_device *device, 270 int (*port_callback)(struct ib_device *, 271 u8, struct kobject *)) 272 { 273 int ret; 274 275 mutex_lock(&device_mutex); 276 277 if (strchr(device->name, '%')) { 278 ret = alloc_name(device->name); 279 if (ret) 280 goto out; 281 } 282 283 if (ib_device_check_mandatory(device)) { 284 ret = -EINVAL; 285 goto out; 286 } 287 288 INIT_LIST_HEAD(&device->event_handler_list); 289 INIT_LIST_HEAD(&device->client_data_list); 290 spin_lock_init(&device->event_handler_lock); 291 spin_lock_init(&device->client_data_lock); 292 293 ret = read_port_immutable(device); 294 if (ret) { 295 printk(KERN_WARNING "Couldn't create per port immutable data %s\n", 296 device->name); 297 goto out; 298 } 299 300 ret = ib_device_register_sysfs(device, port_callback); 301 if (ret) { 302 printk(KERN_WARNING "Couldn't register device %s with driver model\n", 303 device->name); 304 kfree(device->port_immutable); 305 goto out; 306 } 307 308 list_add_tail(&device->core_list, &device_list); 309 310 device->reg_state = IB_DEV_REGISTERED; 311 312 { 313 struct ib_client *client; 314 315 list_for_each_entry(client, &client_list, list) 316 if (client->add && !add_client_context(device, client)) 317 client->add(device); 318 } 319 320 out: 321 mutex_unlock(&device_mutex); 322 return ret; 323 } 324 EXPORT_SYMBOL(ib_register_device); 325 326 /** 327 * ib_unregister_device - Unregister an IB device 328 * @device:Device to unregister 329 * 330 * Unregister an IB device. All clients will receive a remove callback. 331 */ 332 void ib_unregister_device(struct ib_device *device) 333 { 334 struct ib_client *client; 335 struct ib_client_data *context, *tmp; 336 unsigned long flags; 337 338 mutex_lock(&device_mutex); 339 340 list_for_each_entry_reverse(client, &client_list, list) 341 if (client->remove) 342 client->remove(device); 343 344 list_del(&device->core_list); 345 346 mutex_unlock(&device_mutex); 347 348 ib_device_unregister_sysfs(device); 349 350 spin_lock_irqsave(&device->client_data_lock, flags); 351 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 352 kfree(context); 353 spin_unlock_irqrestore(&device->client_data_lock, flags); 354 355 device->reg_state = IB_DEV_UNREGISTERED; 356 } 357 EXPORT_SYMBOL(ib_unregister_device); 358 359 /** 360 * ib_register_client - Register an IB client 361 * @client:Client to register 362 * 363 * Upper level users of the IB drivers can use ib_register_client() to 364 * register callbacks for IB device addition and removal. When an IB 365 * device is added, each registered client's add method will be called 366 * (in the order the clients were registered), and when a device is 367 * removed, each client's remove method will be called (in the reverse 368 * order that clients were registered). In addition, when 369 * ib_register_client() is called, the client will receive an add 370 * callback for all devices already registered. 371 */ 372 int ib_register_client(struct ib_client *client) 373 { 374 struct ib_device *device; 375 376 mutex_lock(&device_mutex); 377 378 list_add_tail(&client->list, &client_list); 379 list_for_each_entry(device, &device_list, core_list) 380 if (client->add && !add_client_context(device, client)) 381 client->add(device); 382 383 mutex_unlock(&device_mutex); 384 385 return 0; 386 } 387 EXPORT_SYMBOL(ib_register_client); 388 389 /** 390 * ib_unregister_client - Unregister an IB client 391 * @client:Client to unregister 392 * 393 * Upper level users use ib_unregister_client() to remove their client 394 * registration. When ib_unregister_client() is called, the client 395 * will receive a remove callback for each IB device still registered. 396 */ 397 void ib_unregister_client(struct ib_client *client) 398 { 399 struct ib_client_data *context, *tmp; 400 struct ib_device *device; 401 unsigned long flags; 402 403 mutex_lock(&device_mutex); 404 405 list_for_each_entry(device, &device_list, core_list) { 406 if (client->remove) 407 client->remove(device); 408 409 spin_lock_irqsave(&device->client_data_lock, flags); 410 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 411 if (context->client == client) { 412 list_del(&context->list); 413 kfree(context); 414 } 415 spin_unlock_irqrestore(&device->client_data_lock, flags); 416 } 417 list_del(&client->list); 418 419 mutex_unlock(&device_mutex); 420 } 421 EXPORT_SYMBOL(ib_unregister_client); 422 423 /** 424 * ib_get_client_data - Get IB client context 425 * @device:Device to get context for 426 * @client:Client to get context for 427 * 428 * ib_get_client_data() returns client context set with 429 * ib_set_client_data(). 430 */ 431 void *ib_get_client_data(struct ib_device *device, struct ib_client *client) 432 { 433 struct ib_client_data *context; 434 void *ret = NULL; 435 unsigned long flags; 436 437 spin_lock_irqsave(&device->client_data_lock, flags); 438 list_for_each_entry(context, &device->client_data_list, list) 439 if (context->client == client) { 440 ret = context->data; 441 break; 442 } 443 spin_unlock_irqrestore(&device->client_data_lock, flags); 444 445 return ret; 446 } 447 EXPORT_SYMBOL(ib_get_client_data); 448 449 /** 450 * ib_set_client_data - Set IB client context 451 * @device:Device to set context for 452 * @client:Client to set context for 453 * @data:Context to set 454 * 455 * ib_set_client_data() sets client context that can be retrieved with 456 * ib_get_client_data(). 457 */ 458 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 459 void *data) 460 { 461 struct ib_client_data *context; 462 unsigned long flags; 463 464 spin_lock_irqsave(&device->client_data_lock, flags); 465 list_for_each_entry(context, &device->client_data_list, list) 466 if (context->client == client) { 467 context->data = data; 468 goto out; 469 } 470 471 printk(KERN_WARNING "No client context found for %s/%s\n", 472 device->name, client->name); 473 474 out: 475 spin_unlock_irqrestore(&device->client_data_lock, flags); 476 } 477 EXPORT_SYMBOL(ib_set_client_data); 478 479 /** 480 * ib_register_event_handler - Register an IB event handler 481 * @event_handler:Handler to register 482 * 483 * ib_register_event_handler() registers an event handler that will be 484 * called back when asynchronous IB events occur (as defined in 485 * chapter 11 of the InfiniBand Architecture Specification). This 486 * callback may occur in interrupt context. 487 */ 488 int ib_register_event_handler (struct ib_event_handler *event_handler) 489 { 490 unsigned long flags; 491 492 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 493 list_add_tail(&event_handler->list, 494 &event_handler->device->event_handler_list); 495 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 496 497 return 0; 498 } 499 EXPORT_SYMBOL(ib_register_event_handler); 500 501 /** 502 * ib_unregister_event_handler - Unregister an event handler 503 * @event_handler:Handler to unregister 504 * 505 * Unregister an event handler registered with 506 * ib_register_event_handler(). 507 */ 508 int ib_unregister_event_handler(struct ib_event_handler *event_handler) 509 { 510 unsigned long flags; 511 512 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 513 list_del(&event_handler->list); 514 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 515 516 return 0; 517 } 518 EXPORT_SYMBOL(ib_unregister_event_handler); 519 520 /** 521 * ib_dispatch_event - Dispatch an asynchronous event 522 * @event:Event to dispatch 523 * 524 * Low-level drivers must call ib_dispatch_event() to dispatch the 525 * event to all registered event handlers when an asynchronous event 526 * occurs. 527 */ 528 void ib_dispatch_event(struct ib_event *event) 529 { 530 unsigned long flags; 531 struct ib_event_handler *handler; 532 533 spin_lock_irqsave(&event->device->event_handler_lock, flags); 534 535 list_for_each_entry(handler, &event->device->event_handler_list, list) 536 handler->handler(handler, event); 537 538 spin_unlock_irqrestore(&event->device->event_handler_lock, flags); 539 } 540 EXPORT_SYMBOL(ib_dispatch_event); 541 542 /** 543 * ib_query_device - Query IB device attributes 544 * @device:Device to query 545 * @device_attr:Device attributes 546 * 547 * ib_query_device() returns the attributes of a device through the 548 * @device_attr pointer. 549 */ 550 int ib_query_device(struct ib_device *device, 551 struct ib_device_attr *device_attr) 552 { 553 struct ib_udata uhw = {.outlen = 0, .inlen = 0}; 554 555 memset(device_attr, 0, sizeof(*device_attr)); 556 557 return device->query_device(device, device_attr, &uhw); 558 } 559 EXPORT_SYMBOL(ib_query_device); 560 561 /** 562 * ib_query_port - Query IB port attributes 563 * @device:Device to query 564 * @port_num:Port number to query 565 * @port_attr:Port attributes 566 * 567 * ib_query_port() returns the attributes of a port through the 568 * @port_attr pointer. 569 */ 570 int ib_query_port(struct ib_device *device, 571 u8 port_num, 572 struct ib_port_attr *port_attr) 573 { 574 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) 575 return -EINVAL; 576 577 return device->query_port(device, port_num, port_attr); 578 } 579 EXPORT_SYMBOL(ib_query_port); 580 581 /** 582 * ib_query_gid - Get GID table entry 583 * @device:Device to query 584 * @port_num:Port number to query 585 * @index:GID table index to query 586 * @gid:Returned GID 587 * 588 * ib_query_gid() fetches the specified GID table entry. 589 */ 590 int ib_query_gid(struct ib_device *device, 591 u8 port_num, int index, union ib_gid *gid) 592 { 593 return device->query_gid(device, port_num, index, gid); 594 } 595 EXPORT_SYMBOL(ib_query_gid); 596 597 /** 598 * ib_query_pkey - Get P_Key table entry 599 * @device:Device to query 600 * @port_num:Port number to query 601 * @index:P_Key table index to query 602 * @pkey:Returned P_Key 603 * 604 * ib_query_pkey() fetches the specified P_Key table entry. 605 */ 606 int ib_query_pkey(struct ib_device *device, 607 u8 port_num, u16 index, u16 *pkey) 608 { 609 return device->query_pkey(device, port_num, index, pkey); 610 } 611 EXPORT_SYMBOL(ib_query_pkey); 612 613 /** 614 * ib_modify_device - Change IB device attributes 615 * @device:Device to modify 616 * @device_modify_mask:Mask of attributes to change 617 * @device_modify:New attribute values 618 * 619 * ib_modify_device() changes a device's attributes as specified by 620 * the @device_modify_mask and @device_modify structure. 621 */ 622 int ib_modify_device(struct ib_device *device, 623 int device_modify_mask, 624 struct ib_device_modify *device_modify) 625 { 626 if (!device->modify_device) 627 return -ENOSYS; 628 629 return device->modify_device(device, device_modify_mask, 630 device_modify); 631 } 632 EXPORT_SYMBOL(ib_modify_device); 633 634 /** 635 * ib_modify_port - Modifies the attributes for the specified port. 636 * @device: The device to modify. 637 * @port_num: The number of the port to modify. 638 * @port_modify_mask: Mask used to specify which attributes of the port 639 * to change. 640 * @port_modify: New attribute values for the port. 641 * 642 * ib_modify_port() changes a port's attributes as specified by the 643 * @port_modify_mask and @port_modify structure. 644 */ 645 int ib_modify_port(struct ib_device *device, 646 u8 port_num, int port_modify_mask, 647 struct ib_port_modify *port_modify) 648 { 649 if (!device->modify_port) 650 return -ENOSYS; 651 652 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) 653 return -EINVAL; 654 655 return device->modify_port(device, port_num, port_modify_mask, 656 port_modify); 657 } 658 EXPORT_SYMBOL(ib_modify_port); 659 660 /** 661 * ib_find_gid - Returns the port number and GID table index where 662 * a specified GID value occurs. 663 * @device: The device to query. 664 * @gid: The GID value to search for. 665 * @port_num: The port number of the device where the GID value was found. 666 * @index: The index into the GID table where the GID was found. This 667 * parameter may be NULL. 668 */ 669 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 670 u8 *port_num, u16 *index) 671 { 672 union ib_gid tmp_gid; 673 int ret, port, i; 674 675 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { 676 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { 677 ret = ib_query_gid(device, port, i, &tmp_gid); 678 if (ret) 679 return ret; 680 if (!memcmp(&tmp_gid, gid, sizeof *gid)) { 681 *port_num = port; 682 if (index) 683 *index = i; 684 return 0; 685 } 686 } 687 } 688 689 return -ENOENT; 690 } 691 EXPORT_SYMBOL(ib_find_gid); 692 693 /** 694 * ib_find_pkey - Returns the PKey table index where a specified 695 * PKey value occurs. 696 * @device: The device to query. 697 * @port_num: The port number of the device to search for the PKey. 698 * @pkey: The PKey value to search for. 699 * @index: The index into the PKey table where the PKey was found. 700 */ 701 int ib_find_pkey(struct ib_device *device, 702 u8 port_num, u16 pkey, u16 *index) 703 { 704 int ret, i; 705 u16 tmp_pkey; 706 int partial_ix = -1; 707 708 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { 709 ret = ib_query_pkey(device, port_num, i, &tmp_pkey); 710 if (ret) 711 return ret; 712 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { 713 /* if there is full-member pkey take it.*/ 714 if (tmp_pkey & 0x8000) { 715 *index = i; 716 return 0; 717 } 718 if (partial_ix < 0) 719 partial_ix = i; 720 } 721 } 722 723 /*no full-member, if exists take the limited*/ 724 if (partial_ix >= 0) { 725 *index = partial_ix; 726 return 0; 727 } 728 return -ENOENT; 729 } 730 EXPORT_SYMBOL(ib_find_pkey); 731 732 static int __init ib_core_init(void) 733 { 734 int ret; 735 736 ib_wq = alloc_workqueue("infiniband", 0, 0); 737 if (!ib_wq) 738 return -ENOMEM; 739 740 ret = ib_sysfs_setup(); 741 if (ret) { 742 printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); 743 goto err; 744 } 745 746 ret = ibnl_init(); 747 if (ret) { 748 printk(KERN_WARNING "Couldn't init IB netlink interface\n"); 749 goto err_sysfs; 750 } 751 752 ret = ib_cache_setup(); 753 if (ret) { 754 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); 755 goto err_nl; 756 } 757 758 return 0; 759 760 err_nl: 761 ibnl_cleanup(); 762 763 err_sysfs: 764 ib_sysfs_cleanup(); 765 766 err: 767 destroy_workqueue(ib_wq); 768 return ret; 769 } 770 771 static void __exit ib_core_cleanup(void) 772 { 773 ib_cache_cleanup(); 774 ibnl_cleanup(); 775 ib_sysfs_cleanup(); 776 /* Make sure that any pending umem accounting work is done. */ 777 destroy_workqueue(ib_wq); 778 } 779 780 module_init(ib_core_init); 781 module_exit(ib_core_cleanup); 782