1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/string.h> 36 #include <linux/errno.h> 37 #include <linux/kernel.h> 38 #include <linux/slab.h> 39 #include <linux/init.h> 40 #include <linux/mutex.h> 41 #include <linux/netdevice.h> 42 #include <linux/security.h> 43 #include <linux/notifier.h> 44 #include <rdma/rdma_netlink.h> 45 #include <rdma/ib_addr.h> 46 #include <rdma/ib_cache.h> 47 48 #include "core_priv.h" 49 50 MODULE_AUTHOR("Roland Dreier"); 51 MODULE_DESCRIPTION("core kernel InfiniBand API"); 52 MODULE_LICENSE("Dual BSD/GPL"); 53 54 struct ib_client_data { 55 struct list_head list; 56 struct ib_client *client; 57 void * data; 58 /* The device or client is going down. Do not call client or device 59 * callbacks other than remove(). */ 60 bool going_down; 61 }; 62 63 struct workqueue_struct *ib_comp_wq; 64 struct workqueue_struct *ib_wq; 65 EXPORT_SYMBOL_GPL(ib_wq); 66 67 /* The device_list and client_list contain devices and clients after their 68 * registration has completed, and the devices and clients are removed 69 * during unregistration. */ 70 static LIST_HEAD(device_list); 71 static LIST_HEAD(client_list); 72 73 /* 74 * device_mutex and lists_rwsem protect access to both device_list and 75 * client_list. device_mutex protects writer access by device and client 76 * registration / de-registration. lists_rwsem protects reader access to 77 * these lists. Iterators of these lists must lock it for read, while updates 78 * to the lists must be done with a write lock. A special case is when the 79 * device_mutex is locked. In this case locking the lists for read access is 80 * not necessary as the device_mutex implies it. 81 * 82 * lists_rwsem also protects access to the client data list. 83 */ 84 static DEFINE_MUTEX(device_mutex); 85 static DECLARE_RWSEM(lists_rwsem); 86 87 static int ib_security_change(struct notifier_block *nb, unsigned long event, 88 void *lsm_data); 89 static void ib_policy_change_task(struct work_struct *work); 90 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); 91 92 static struct notifier_block ibdev_lsm_nb = { 93 .notifier_call = ib_security_change, 94 }; 95 96 static int ib_device_check_mandatory(struct ib_device *device) 97 { 98 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } 99 static const struct { 100 size_t offset; 101 char *name; 102 } mandatory_table[] = { 103 IB_MANDATORY_FUNC(query_device), 104 IB_MANDATORY_FUNC(query_port), 105 IB_MANDATORY_FUNC(query_pkey), 106 IB_MANDATORY_FUNC(query_gid), 107 IB_MANDATORY_FUNC(alloc_pd), 108 IB_MANDATORY_FUNC(dealloc_pd), 109 IB_MANDATORY_FUNC(create_ah), 110 IB_MANDATORY_FUNC(destroy_ah), 111 IB_MANDATORY_FUNC(create_qp), 112 IB_MANDATORY_FUNC(modify_qp), 113 IB_MANDATORY_FUNC(destroy_qp), 114 IB_MANDATORY_FUNC(post_send), 115 IB_MANDATORY_FUNC(post_recv), 116 IB_MANDATORY_FUNC(create_cq), 117 IB_MANDATORY_FUNC(destroy_cq), 118 IB_MANDATORY_FUNC(poll_cq), 119 IB_MANDATORY_FUNC(req_notify_cq), 120 IB_MANDATORY_FUNC(get_dma_mr), 121 IB_MANDATORY_FUNC(dereg_mr), 122 IB_MANDATORY_FUNC(get_port_immutable) 123 }; 124 int i; 125 126 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { 127 if (!*(void **) ((void *) device + mandatory_table[i].offset)) { 128 pr_warn("Device %s is missing mandatory function %s\n", 129 device->name, mandatory_table[i].name); 130 return -EINVAL; 131 } 132 } 133 134 return 0; 135 } 136 137 static struct ib_device *__ib_device_get_by_index(u32 index) 138 { 139 struct ib_device *device; 140 141 list_for_each_entry(device, &device_list, core_list) 142 if (device->index == index) 143 return device; 144 145 return NULL; 146 } 147 148 /* 149 * Caller is responsible to return refrerence count by calling put_device() 150 */ 151 struct ib_device *ib_device_get_by_index(u32 index) 152 { 153 struct ib_device *device; 154 155 down_read(&lists_rwsem); 156 device = __ib_device_get_by_index(index); 157 if (device) 158 get_device(&device->dev); 159 160 up_read(&lists_rwsem); 161 return device; 162 } 163 164 static struct ib_device *__ib_device_get_by_name(const char *name) 165 { 166 struct ib_device *device; 167 168 list_for_each_entry(device, &device_list, core_list) 169 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX)) 170 return device; 171 172 return NULL; 173 } 174 175 static int alloc_name(char *name) 176 { 177 unsigned long *inuse; 178 char buf[IB_DEVICE_NAME_MAX]; 179 struct ib_device *device; 180 int i; 181 182 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL); 183 if (!inuse) 184 return -ENOMEM; 185 186 list_for_each_entry(device, &device_list, core_list) { 187 if (!sscanf(device->name, name, &i)) 188 continue; 189 if (i < 0 || i >= PAGE_SIZE * 8) 190 continue; 191 snprintf(buf, sizeof buf, name, i); 192 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX)) 193 set_bit(i, inuse); 194 } 195 196 i = find_first_zero_bit(inuse, PAGE_SIZE * 8); 197 free_page((unsigned long) inuse); 198 snprintf(buf, sizeof buf, name, i); 199 200 if (__ib_device_get_by_name(buf)) 201 return -ENFILE; 202 203 strlcpy(name, buf, IB_DEVICE_NAME_MAX); 204 return 0; 205 } 206 207 static void ib_device_release(struct device *device) 208 { 209 struct ib_device *dev = container_of(device, struct ib_device, dev); 210 211 WARN_ON(dev->reg_state == IB_DEV_REGISTERED); 212 if (dev->reg_state == IB_DEV_UNREGISTERED) { 213 /* 214 * In IB_DEV_UNINITIALIZED state, cache or port table 215 * is not even created. Free cache and port table only when 216 * device reaches UNREGISTERED state. 217 */ 218 ib_cache_release_one(dev); 219 kfree(dev->port_immutable); 220 } 221 kfree(dev); 222 } 223 224 static int ib_device_uevent(struct device *device, 225 struct kobj_uevent_env *env) 226 { 227 struct ib_device *dev = container_of(device, struct ib_device, dev); 228 229 if (add_uevent_var(env, "NAME=%s", dev->name)) 230 return -ENOMEM; 231 232 /* 233 * It would be nice to pass the node GUID with the event... 234 */ 235 236 return 0; 237 } 238 239 static struct class ib_class = { 240 .name = "infiniband", 241 .dev_release = ib_device_release, 242 .dev_uevent = ib_device_uevent, 243 }; 244 245 /** 246 * ib_alloc_device - allocate an IB device struct 247 * @size:size of structure to allocate 248 * 249 * Low-level drivers should use ib_alloc_device() to allocate &struct 250 * ib_device. @size is the size of the structure to be allocated, 251 * including any private data used by the low-level driver. 252 * ib_dealloc_device() must be used to free structures allocated with 253 * ib_alloc_device(). 254 */ 255 struct ib_device *ib_alloc_device(size_t size) 256 { 257 struct ib_device *device; 258 259 if (WARN_ON(size < sizeof(struct ib_device))) 260 return NULL; 261 262 device = kzalloc(size, GFP_KERNEL); 263 if (!device) 264 return NULL; 265 266 device->dev.class = &ib_class; 267 device_initialize(&device->dev); 268 269 dev_set_drvdata(&device->dev, device); 270 271 INIT_LIST_HEAD(&device->event_handler_list); 272 spin_lock_init(&device->event_handler_lock); 273 spin_lock_init(&device->client_data_lock); 274 INIT_LIST_HEAD(&device->client_data_list); 275 INIT_LIST_HEAD(&device->port_list); 276 277 return device; 278 } 279 EXPORT_SYMBOL(ib_alloc_device); 280 281 /** 282 * ib_dealloc_device - free an IB device struct 283 * @device:structure to free 284 * 285 * Free a structure allocated with ib_alloc_device(). 286 */ 287 void ib_dealloc_device(struct ib_device *device) 288 { 289 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED && 290 device->reg_state != IB_DEV_UNINITIALIZED); 291 kobject_put(&device->dev.kobj); 292 } 293 EXPORT_SYMBOL(ib_dealloc_device); 294 295 static int add_client_context(struct ib_device *device, struct ib_client *client) 296 { 297 struct ib_client_data *context; 298 unsigned long flags; 299 300 context = kmalloc(sizeof *context, GFP_KERNEL); 301 if (!context) 302 return -ENOMEM; 303 304 context->client = client; 305 context->data = NULL; 306 context->going_down = false; 307 308 down_write(&lists_rwsem); 309 spin_lock_irqsave(&device->client_data_lock, flags); 310 list_add(&context->list, &device->client_data_list); 311 spin_unlock_irqrestore(&device->client_data_lock, flags); 312 up_write(&lists_rwsem); 313 314 return 0; 315 } 316 317 static int verify_immutable(const struct ib_device *dev, u8 port) 318 { 319 return WARN_ON(!rdma_cap_ib_mad(dev, port) && 320 rdma_max_mad_size(dev, port) != 0); 321 } 322 323 static int read_port_immutable(struct ib_device *device) 324 { 325 int ret; 326 u8 start_port = rdma_start_port(device); 327 u8 end_port = rdma_end_port(device); 328 u8 port; 329 330 /** 331 * device->port_immutable is indexed directly by the port number to make 332 * access to this data as efficient as possible. 333 * 334 * Therefore port_immutable is declared as a 1 based array with 335 * potential empty slots at the beginning. 336 */ 337 device->port_immutable = kzalloc(sizeof(*device->port_immutable) 338 * (end_port + 1), 339 GFP_KERNEL); 340 if (!device->port_immutable) 341 return -ENOMEM; 342 343 for (port = start_port; port <= end_port; ++port) { 344 ret = device->get_port_immutable(device, port, 345 &device->port_immutable[port]); 346 if (ret) 347 return ret; 348 349 if (verify_immutable(device, port)) 350 return -EINVAL; 351 } 352 return 0; 353 } 354 355 void ib_get_device_fw_str(struct ib_device *dev, char *str) 356 { 357 if (dev->get_dev_fw_str) 358 dev->get_dev_fw_str(dev, str); 359 else 360 str[0] = '\0'; 361 } 362 EXPORT_SYMBOL(ib_get_device_fw_str); 363 364 static int setup_port_pkey_list(struct ib_device *device) 365 { 366 int i; 367 368 /** 369 * device->port_pkey_list is indexed directly by the port number, 370 * Therefore it is declared as a 1 based array with potential empty 371 * slots at the beginning. 372 */ 373 device->port_pkey_list = kcalloc(rdma_end_port(device) + 1, 374 sizeof(*device->port_pkey_list), 375 GFP_KERNEL); 376 377 if (!device->port_pkey_list) 378 return -ENOMEM; 379 380 for (i = 0; i < (rdma_end_port(device) + 1); i++) { 381 spin_lock_init(&device->port_pkey_list[i].list_lock); 382 INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list); 383 } 384 385 return 0; 386 } 387 388 static void ib_policy_change_task(struct work_struct *work) 389 { 390 struct ib_device *dev; 391 392 down_read(&lists_rwsem); 393 list_for_each_entry(dev, &device_list, core_list) { 394 int i; 395 396 for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { 397 u64 sp; 398 int ret = ib_get_cached_subnet_prefix(dev, 399 i, 400 &sp); 401 402 WARN_ONCE(ret, 403 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", 404 ret); 405 if (!ret) 406 ib_security_cache_change(dev, i, sp); 407 } 408 } 409 up_read(&lists_rwsem); 410 } 411 412 static int ib_security_change(struct notifier_block *nb, unsigned long event, 413 void *lsm_data) 414 { 415 if (event != LSM_POLICY_CHANGE) 416 return NOTIFY_DONE; 417 418 schedule_work(&ib_policy_change_work); 419 420 return NOTIFY_OK; 421 } 422 423 /** 424 * __dev_new_index - allocate an device index 425 * 426 * Returns a suitable unique value for a new device interface 427 * number. It assumes that there are less than 2^32-1 ib devices 428 * will be present in the system. 429 */ 430 static u32 __dev_new_index(void) 431 { 432 /* 433 * The device index to allow stable naming. 434 * Similar to struct net -> ifindex. 435 */ 436 static u32 index; 437 438 for (;;) { 439 if (!(++index)) 440 index = 1; 441 442 if (!__ib_device_get_by_index(index)) 443 return index; 444 } 445 } 446 447 /** 448 * ib_register_device - Register an IB device with IB core 449 * @device:Device to register 450 * 451 * Low-level drivers use ib_register_device() to register their 452 * devices with the IB core. All registered clients will receive a 453 * callback for each device that is added. @device must be allocated 454 * with ib_alloc_device(). 455 */ 456 int ib_register_device(struct ib_device *device, 457 int (*port_callback)(struct ib_device *, 458 u8, struct kobject *)) 459 { 460 int ret; 461 struct ib_client *client; 462 struct ib_udata uhw = {.outlen = 0, .inlen = 0}; 463 struct device *parent = device->dev.parent; 464 465 WARN_ON_ONCE(!parent); 466 WARN_ON_ONCE(device->dma_device); 467 if (device->dev.dma_ops) { 468 /* 469 * The caller provided custom DMA operations. Copy the 470 * DMA-related fields that are used by e.g. dma_alloc_coherent() 471 * into device->dev. 472 */ 473 device->dma_device = &device->dev; 474 if (!device->dev.dma_mask) 475 device->dev.dma_mask = parent->dma_mask; 476 if (!device->dev.coherent_dma_mask) 477 device->dev.coherent_dma_mask = 478 parent->coherent_dma_mask; 479 } else { 480 /* 481 * The caller did not provide custom DMA operations. Use the 482 * DMA mapping operations of the parent device. 483 */ 484 device->dma_device = parent; 485 } 486 487 mutex_lock(&device_mutex); 488 489 if (strchr(device->name, '%')) { 490 ret = alloc_name(device->name); 491 if (ret) 492 goto out; 493 } 494 495 if (ib_device_check_mandatory(device)) { 496 ret = -EINVAL; 497 goto out; 498 } 499 500 ret = read_port_immutable(device); 501 if (ret) { 502 pr_warn("Couldn't create per port immutable data %s\n", 503 device->name); 504 goto out; 505 } 506 507 ret = setup_port_pkey_list(device); 508 if (ret) { 509 pr_warn("Couldn't create per port_pkey_list\n"); 510 goto out; 511 } 512 513 ret = ib_cache_setup_one(device); 514 if (ret) { 515 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); 516 goto port_cleanup; 517 } 518 519 ret = ib_device_register_rdmacg(device); 520 if (ret) { 521 pr_warn("Couldn't register device with rdma cgroup\n"); 522 goto cache_cleanup; 523 } 524 525 memset(&device->attrs, 0, sizeof(device->attrs)); 526 ret = device->query_device(device, &device->attrs, &uhw); 527 if (ret) { 528 pr_warn("Couldn't query the device attributes\n"); 529 goto cache_cleanup; 530 } 531 532 ret = ib_device_register_sysfs(device, port_callback); 533 if (ret) { 534 pr_warn("Couldn't register device %s with driver model\n", 535 device->name); 536 goto cache_cleanup; 537 } 538 539 device->reg_state = IB_DEV_REGISTERED; 540 541 list_for_each_entry(client, &client_list, list) 542 if (!add_client_context(device, client) && client->add) 543 client->add(device); 544 545 device->index = __dev_new_index(); 546 down_write(&lists_rwsem); 547 list_add_tail(&device->core_list, &device_list); 548 up_write(&lists_rwsem); 549 mutex_unlock(&device_mutex); 550 return 0; 551 552 cache_cleanup: 553 ib_cache_cleanup_one(device); 554 ib_cache_release_one(device); 555 port_cleanup: 556 kfree(device->port_immutable); 557 out: 558 mutex_unlock(&device_mutex); 559 return ret; 560 } 561 EXPORT_SYMBOL(ib_register_device); 562 563 /** 564 * ib_unregister_device - Unregister an IB device 565 * @device:Device to unregister 566 * 567 * Unregister an IB device. All clients will receive a remove callback. 568 */ 569 void ib_unregister_device(struct ib_device *device) 570 { 571 struct ib_client_data *context, *tmp; 572 unsigned long flags; 573 574 mutex_lock(&device_mutex); 575 576 down_write(&lists_rwsem); 577 list_del(&device->core_list); 578 spin_lock_irqsave(&device->client_data_lock, flags); 579 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 580 context->going_down = true; 581 spin_unlock_irqrestore(&device->client_data_lock, flags); 582 downgrade_write(&lists_rwsem); 583 584 list_for_each_entry_safe(context, tmp, &device->client_data_list, 585 list) { 586 if (context->client->remove) 587 context->client->remove(device, context->data); 588 } 589 up_read(&lists_rwsem); 590 591 ib_device_unregister_rdmacg(device); 592 ib_device_unregister_sysfs(device); 593 594 mutex_unlock(&device_mutex); 595 596 ib_cache_cleanup_one(device); 597 598 ib_security_destroy_port_pkey_list(device); 599 kfree(device->port_pkey_list); 600 601 down_write(&lists_rwsem); 602 spin_lock_irqsave(&device->client_data_lock, flags); 603 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 604 kfree(context); 605 spin_unlock_irqrestore(&device->client_data_lock, flags); 606 up_write(&lists_rwsem); 607 608 device->reg_state = IB_DEV_UNREGISTERED; 609 } 610 EXPORT_SYMBOL(ib_unregister_device); 611 612 /** 613 * ib_register_client - Register an IB client 614 * @client:Client to register 615 * 616 * Upper level users of the IB drivers can use ib_register_client() to 617 * register callbacks for IB device addition and removal. When an IB 618 * device is added, each registered client's add method will be called 619 * (in the order the clients were registered), and when a device is 620 * removed, each client's remove method will be called (in the reverse 621 * order that clients were registered). In addition, when 622 * ib_register_client() is called, the client will receive an add 623 * callback for all devices already registered. 624 */ 625 int ib_register_client(struct ib_client *client) 626 { 627 struct ib_device *device; 628 629 mutex_lock(&device_mutex); 630 631 list_for_each_entry(device, &device_list, core_list) 632 if (!add_client_context(device, client) && client->add) 633 client->add(device); 634 635 down_write(&lists_rwsem); 636 list_add_tail(&client->list, &client_list); 637 up_write(&lists_rwsem); 638 639 mutex_unlock(&device_mutex); 640 641 return 0; 642 } 643 EXPORT_SYMBOL(ib_register_client); 644 645 /** 646 * ib_unregister_client - Unregister an IB client 647 * @client:Client to unregister 648 * 649 * Upper level users use ib_unregister_client() to remove their client 650 * registration. When ib_unregister_client() is called, the client 651 * will receive a remove callback for each IB device still registered. 652 */ 653 void ib_unregister_client(struct ib_client *client) 654 { 655 struct ib_client_data *context, *tmp; 656 struct ib_device *device; 657 unsigned long flags; 658 659 mutex_lock(&device_mutex); 660 661 down_write(&lists_rwsem); 662 list_del(&client->list); 663 up_write(&lists_rwsem); 664 665 list_for_each_entry(device, &device_list, core_list) { 666 struct ib_client_data *found_context = NULL; 667 668 down_write(&lists_rwsem); 669 spin_lock_irqsave(&device->client_data_lock, flags); 670 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 671 if (context->client == client) { 672 context->going_down = true; 673 found_context = context; 674 break; 675 } 676 spin_unlock_irqrestore(&device->client_data_lock, flags); 677 up_write(&lists_rwsem); 678 679 if (client->remove) 680 client->remove(device, found_context ? 681 found_context->data : NULL); 682 683 if (!found_context) { 684 pr_warn("No client context found for %s/%s\n", 685 device->name, client->name); 686 continue; 687 } 688 689 down_write(&lists_rwsem); 690 spin_lock_irqsave(&device->client_data_lock, flags); 691 list_del(&found_context->list); 692 kfree(found_context); 693 spin_unlock_irqrestore(&device->client_data_lock, flags); 694 up_write(&lists_rwsem); 695 } 696 697 mutex_unlock(&device_mutex); 698 } 699 EXPORT_SYMBOL(ib_unregister_client); 700 701 /** 702 * ib_get_client_data - Get IB client context 703 * @device:Device to get context for 704 * @client:Client to get context for 705 * 706 * ib_get_client_data() returns client context set with 707 * ib_set_client_data(). 708 */ 709 void *ib_get_client_data(struct ib_device *device, struct ib_client *client) 710 { 711 struct ib_client_data *context; 712 void *ret = NULL; 713 unsigned long flags; 714 715 spin_lock_irqsave(&device->client_data_lock, flags); 716 list_for_each_entry(context, &device->client_data_list, list) 717 if (context->client == client) { 718 ret = context->data; 719 break; 720 } 721 spin_unlock_irqrestore(&device->client_data_lock, flags); 722 723 return ret; 724 } 725 EXPORT_SYMBOL(ib_get_client_data); 726 727 /** 728 * ib_set_client_data - Set IB client context 729 * @device:Device to set context for 730 * @client:Client to set context for 731 * @data:Context to set 732 * 733 * ib_set_client_data() sets client context that can be retrieved with 734 * ib_get_client_data(). 735 */ 736 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 737 void *data) 738 { 739 struct ib_client_data *context; 740 unsigned long flags; 741 742 spin_lock_irqsave(&device->client_data_lock, flags); 743 list_for_each_entry(context, &device->client_data_list, list) 744 if (context->client == client) { 745 context->data = data; 746 goto out; 747 } 748 749 pr_warn("No client context found for %s/%s\n", 750 device->name, client->name); 751 752 out: 753 spin_unlock_irqrestore(&device->client_data_lock, flags); 754 } 755 EXPORT_SYMBOL(ib_set_client_data); 756 757 /** 758 * ib_register_event_handler - Register an IB event handler 759 * @event_handler:Handler to register 760 * 761 * ib_register_event_handler() registers an event handler that will be 762 * called back when asynchronous IB events occur (as defined in 763 * chapter 11 of the InfiniBand Architecture Specification). This 764 * callback may occur in interrupt context. 765 */ 766 void ib_register_event_handler(struct ib_event_handler *event_handler) 767 { 768 unsigned long flags; 769 770 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 771 list_add_tail(&event_handler->list, 772 &event_handler->device->event_handler_list); 773 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 774 } 775 EXPORT_SYMBOL(ib_register_event_handler); 776 777 /** 778 * ib_unregister_event_handler - Unregister an event handler 779 * @event_handler:Handler to unregister 780 * 781 * Unregister an event handler registered with 782 * ib_register_event_handler(). 783 */ 784 void ib_unregister_event_handler(struct ib_event_handler *event_handler) 785 { 786 unsigned long flags; 787 788 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 789 list_del(&event_handler->list); 790 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 791 } 792 EXPORT_SYMBOL(ib_unregister_event_handler); 793 794 /** 795 * ib_dispatch_event - Dispatch an asynchronous event 796 * @event:Event to dispatch 797 * 798 * Low-level drivers must call ib_dispatch_event() to dispatch the 799 * event to all registered event handlers when an asynchronous event 800 * occurs. 801 */ 802 void ib_dispatch_event(struct ib_event *event) 803 { 804 unsigned long flags; 805 struct ib_event_handler *handler; 806 807 spin_lock_irqsave(&event->device->event_handler_lock, flags); 808 809 list_for_each_entry(handler, &event->device->event_handler_list, list) 810 handler->handler(handler, event); 811 812 spin_unlock_irqrestore(&event->device->event_handler_lock, flags); 813 } 814 EXPORT_SYMBOL(ib_dispatch_event); 815 816 /** 817 * ib_query_port - Query IB port attributes 818 * @device:Device to query 819 * @port_num:Port number to query 820 * @port_attr:Port attributes 821 * 822 * ib_query_port() returns the attributes of a port through the 823 * @port_attr pointer. 824 */ 825 int ib_query_port(struct ib_device *device, 826 u8 port_num, 827 struct ib_port_attr *port_attr) 828 { 829 union ib_gid gid; 830 int err; 831 832 if (!rdma_is_port_valid(device, port_num)) 833 return -EINVAL; 834 835 memset(port_attr, 0, sizeof(*port_attr)); 836 err = device->query_port(device, port_num, port_attr); 837 if (err || port_attr->subnet_prefix) 838 return err; 839 840 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) 841 return 0; 842 843 err = ib_query_gid(device, port_num, 0, &gid, NULL); 844 if (err) 845 return err; 846 847 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); 848 return 0; 849 } 850 EXPORT_SYMBOL(ib_query_port); 851 852 /** 853 * ib_query_gid - Get GID table entry 854 * @device:Device to query 855 * @port_num:Port number to query 856 * @index:GID table index to query 857 * @gid:Returned GID 858 * @attr: Returned GID attributes related to this GID index (only in RoCE). 859 * NULL means ignore. 860 * 861 * ib_query_gid() fetches the specified GID table entry. 862 */ 863 int ib_query_gid(struct ib_device *device, 864 u8 port_num, int index, union ib_gid *gid, 865 struct ib_gid_attr *attr) 866 { 867 if (rdma_cap_roce_gid_table(device, port_num)) 868 return ib_get_cached_gid(device, port_num, index, gid, attr); 869 870 if (attr) 871 return -EINVAL; 872 873 return device->query_gid(device, port_num, index, gid); 874 } 875 EXPORT_SYMBOL(ib_query_gid); 876 877 /** 878 * ib_enum_roce_netdev - enumerate all RoCE ports 879 * @ib_dev : IB device we want to query 880 * @filter: Should we call the callback? 881 * @filter_cookie: Cookie passed to filter 882 * @cb: Callback to call for each found RoCE ports 883 * @cookie: Cookie passed back to the callback 884 * 885 * Enumerates all of the physical RoCE ports of ib_dev 886 * which are related to netdevice and calls callback() on each 887 * device for which filter() function returns non zero. 888 */ 889 void ib_enum_roce_netdev(struct ib_device *ib_dev, 890 roce_netdev_filter filter, 891 void *filter_cookie, 892 roce_netdev_callback cb, 893 void *cookie) 894 { 895 u8 port; 896 897 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev); 898 port++) 899 if (rdma_protocol_roce(ib_dev, port)) { 900 struct net_device *idev = NULL; 901 902 if (ib_dev->get_netdev) 903 idev = ib_dev->get_netdev(ib_dev, port); 904 905 if (idev && 906 idev->reg_state >= NETREG_UNREGISTERED) { 907 dev_put(idev); 908 idev = NULL; 909 } 910 911 if (filter(ib_dev, port, idev, filter_cookie)) 912 cb(ib_dev, port, idev, cookie); 913 914 if (idev) 915 dev_put(idev); 916 } 917 } 918 919 /** 920 * ib_enum_all_roce_netdevs - enumerate all RoCE devices 921 * @filter: Should we call the callback? 922 * @filter_cookie: Cookie passed to filter 923 * @cb: Callback to call for each found RoCE ports 924 * @cookie: Cookie passed back to the callback 925 * 926 * Enumerates all RoCE devices' physical ports which are related 927 * to netdevices and calls callback() on each device for which 928 * filter() function returns non zero. 929 */ 930 void ib_enum_all_roce_netdevs(roce_netdev_filter filter, 931 void *filter_cookie, 932 roce_netdev_callback cb, 933 void *cookie) 934 { 935 struct ib_device *dev; 936 937 down_read(&lists_rwsem); 938 list_for_each_entry(dev, &device_list, core_list) 939 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); 940 up_read(&lists_rwsem); 941 } 942 943 /** 944 * ib_enum_all_devs - enumerate all ib_devices 945 * @cb: Callback to call for each found ib_device 946 * 947 * Enumerates all ib_devices and calls callback() on each device. 948 */ 949 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, 950 struct netlink_callback *cb) 951 { 952 struct ib_device *dev; 953 unsigned int idx = 0; 954 int ret = 0; 955 956 down_read(&lists_rwsem); 957 list_for_each_entry(dev, &device_list, core_list) { 958 ret = nldev_cb(dev, skb, cb, idx); 959 if (ret) 960 break; 961 idx++; 962 } 963 964 up_read(&lists_rwsem); 965 return ret; 966 } 967 968 /** 969 * ib_query_pkey - Get P_Key table entry 970 * @device:Device to query 971 * @port_num:Port number to query 972 * @index:P_Key table index to query 973 * @pkey:Returned P_Key 974 * 975 * ib_query_pkey() fetches the specified P_Key table entry. 976 */ 977 int ib_query_pkey(struct ib_device *device, 978 u8 port_num, u16 index, u16 *pkey) 979 { 980 return device->query_pkey(device, port_num, index, pkey); 981 } 982 EXPORT_SYMBOL(ib_query_pkey); 983 984 /** 985 * ib_modify_device - Change IB device attributes 986 * @device:Device to modify 987 * @device_modify_mask:Mask of attributes to change 988 * @device_modify:New attribute values 989 * 990 * ib_modify_device() changes a device's attributes as specified by 991 * the @device_modify_mask and @device_modify structure. 992 */ 993 int ib_modify_device(struct ib_device *device, 994 int device_modify_mask, 995 struct ib_device_modify *device_modify) 996 { 997 if (!device->modify_device) 998 return -ENOSYS; 999 1000 return device->modify_device(device, device_modify_mask, 1001 device_modify); 1002 } 1003 EXPORT_SYMBOL(ib_modify_device); 1004 1005 /** 1006 * ib_modify_port - Modifies the attributes for the specified port. 1007 * @device: The device to modify. 1008 * @port_num: The number of the port to modify. 1009 * @port_modify_mask: Mask used to specify which attributes of the port 1010 * to change. 1011 * @port_modify: New attribute values for the port. 1012 * 1013 * ib_modify_port() changes a port's attributes as specified by the 1014 * @port_modify_mask and @port_modify structure. 1015 */ 1016 int ib_modify_port(struct ib_device *device, 1017 u8 port_num, int port_modify_mask, 1018 struct ib_port_modify *port_modify) 1019 { 1020 int rc; 1021 1022 if (!rdma_is_port_valid(device, port_num)) 1023 return -EINVAL; 1024 1025 if (device->modify_port) 1026 rc = device->modify_port(device, port_num, port_modify_mask, 1027 port_modify); 1028 else 1029 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS; 1030 return rc; 1031 } 1032 EXPORT_SYMBOL(ib_modify_port); 1033 1034 /** 1035 * ib_find_gid - Returns the port number and GID table index where 1036 * a specified GID value occurs. 1037 * @device: The device to query. 1038 * @gid: The GID value to search for. 1039 * @gid_type: Type of GID. 1040 * @ndev: The ndev related to the GID to search for. 1041 * @port_num: The port number of the device where the GID value was found. 1042 * @index: The index into the GID table where the GID was found. This 1043 * parameter may be NULL. 1044 */ 1045 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 1046 enum ib_gid_type gid_type, struct net_device *ndev, 1047 u8 *port_num, u16 *index) 1048 { 1049 union ib_gid tmp_gid; 1050 int ret, port, i; 1051 1052 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { 1053 if (rdma_cap_roce_gid_table(device, port)) { 1054 if (!ib_find_cached_gid_by_port(device, gid, gid_type, port, 1055 ndev, index)) { 1056 *port_num = port; 1057 return 0; 1058 } 1059 } 1060 1061 if (gid_type != IB_GID_TYPE_IB) 1062 continue; 1063 1064 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { 1065 ret = ib_query_gid(device, port, i, &tmp_gid, NULL); 1066 if (ret) 1067 return ret; 1068 if (!memcmp(&tmp_gid, gid, sizeof *gid)) { 1069 *port_num = port; 1070 if (index) 1071 *index = i; 1072 return 0; 1073 } 1074 } 1075 } 1076 1077 return -ENOENT; 1078 } 1079 EXPORT_SYMBOL(ib_find_gid); 1080 1081 /** 1082 * ib_find_pkey - Returns the PKey table index where a specified 1083 * PKey value occurs. 1084 * @device: The device to query. 1085 * @port_num: The port number of the device to search for the PKey. 1086 * @pkey: The PKey value to search for. 1087 * @index: The index into the PKey table where the PKey was found. 1088 */ 1089 int ib_find_pkey(struct ib_device *device, 1090 u8 port_num, u16 pkey, u16 *index) 1091 { 1092 int ret, i; 1093 u16 tmp_pkey; 1094 int partial_ix = -1; 1095 1096 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { 1097 ret = ib_query_pkey(device, port_num, i, &tmp_pkey); 1098 if (ret) 1099 return ret; 1100 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { 1101 /* if there is full-member pkey take it.*/ 1102 if (tmp_pkey & 0x8000) { 1103 *index = i; 1104 return 0; 1105 } 1106 if (partial_ix < 0) 1107 partial_ix = i; 1108 } 1109 } 1110 1111 /*no full-member, if exists take the limited*/ 1112 if (partial_ix >= 0) { 1113 *index = partial_ix; 1114 return 0; 1115 } 1116 return -ENOENT; 1117 } 1118 EXPORT_SYMBOL(ib_find_pkey); 1119 1120 /** 1121 * ib_get_net_dev_by_params() - Return the appropriate net_dev 1122 * for a received CM request 1123 * @dev: An RDMA device on which the request has been received. 1124 * @port: Port number on the RDMA device. 1125 * @pkey: The Pkey the request came on. 1126 * @gid: A GID that the net_dev uses to communicate. 1127 * @addr: Contains the IP address that the request specified as its 1128 * destination. 1129 */ 1130 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, 1131 u8 port, 1132 u16 pkey, 1133 const union ib_gid *gid, 1134 const struct sockaddr *addr) 1135 { 1136 struct net_device *net_dev = NULL; 1137 struct ib_client_data *context; 1138 1139 if (!rdma_protocol_ib(dev, port)) 1140 return NULL; 1141 1142 down_read(&lists_rwsem); 1143 1144 list_for_each_entry(context, &dev->client_data_list, list) { 1145 struct ib_client *client = context->client; 1146 1147 if (context->going_down) 1148 continue; 1149 1150 if (client->get_net_dev_by_params) { 1151 net_dev = client->get_net_dev_by_params(dev, port, pkey, 1152 gid, addr, 1153 context->data); 1154 if (net_dev) 1155 break; 1156 } 1157 } 1158 1159 up_read(&lists_rwsem); 1160 1161 return net_dev; 1162 } 1163 EXPORT_SYMBOL(ib_get_net_dev_by_params); 1164 1165 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { 1166 [RDMA_NL_LS_OP_RESOLVE] = { 1167 .doit = ib_nl_handle_resolve_resp, 1168 .flags = RDMA_NL_ADMIN_PERM, 1169 }, 1170 [RDMA_NL_LS_OP_SET_TIMEOUT] = { 1171 .doit = ib_nl_handle_set_timeout, 1172 .flags = RDMA_NL_ADMIN_PERM, 1173 }, 1174 [RDMA_NL_LS_OP_IP_RESOLVE] = { 1175 .doit = ib_nl_handle_ip_res_resp, 1176 .flags = RDMA_NL_ADMIN_PERM, 1177 }, 1178 }; 1179 1180 static int __init ib_core_init(void) 1181 { 1182 int ret; 1183 1184 ib_wq = alloc_workqueue("infiniband", 0, 0); 1185 if (!ib_wq) 1186 return -ENOMEM; 1187 1188 ib_comp_wq = alloc_workqueue("ib-comp-wq", 1189 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 1190 if (!ib_comp_wq) { 1191 ret = -ENOMEM; 1192 goto err; 1193 } 1194 1195 ret = class_register(&ib_class); 1196 if (ret) { 1197 pr_warn("Couldn't create InfiniBand device class\n"); 1198 goto err_comp; 1199 } 1200 1201 ret = rdma_nl_init(); 1202 if (ret) { 1203 pr_warn("Couldn't init IB netlink interface: err %d\n", ret); 1204 goto err_sysfs; 1205 } 1206 1207 ret = addr_init(); 1208 if (ret) { 1209 pr_warn("Could't init IB address resolution\n"); 1210 goto err_ibnl; 1211 } 1212 1213 ret = ib_mad_init(); 1214 if (ret) { 1215 pr_warn("Couldn't init IB MAD\n"); 1216 goto err_addr; 1217 } 1218 1219 ret = ib_sa_init(); 1220 if (ret) { 1221 pr_warn("Couldn't init SA\n"); 1222 goto err_mad; 1223 } 1224 1225 ret = register_lsm_notifier(&ibdev_lsm_nb); 1226 if (ret) { 1227 pr_warn("Couldn't register LSM notifier. ret %d\n", ret); 1228 goto err_sa; 1229 } 1230 1231 nldev_init(); 1232 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); 1233 ib_cache_setup(); 1234 1235 return 0; 1236 1237 err_sa: 1238 ib_sa_cleanup(); 1239 err_mad: 1240 ib_mad_cleanup(); 1241 err_addr: 1242 addr_cleanup(); 1243 err_ibnl: 1244 rdma_nl_exit(); 1245 err_sysfs: 1246 class_unregister(&ib_class); 1247 err_comp: 1248 destroy_workqueue(ib_comp_wq); 1249 err: 1250 destroy_workqueue(ib_wq); 1251 return ret; 1252 } 1253 1254 static void __exit ib_core_cleanup(void) 1255 { 1256 ib_cache_cleanup(); 1257 nldev_exit(); 1258 rdma_nl_unregister(RDMA_NL_LS); 1259 unregister_lsm_notifier(&ibdev_lsm_nb); 1260 ib_sa_cleanup(); 1261 ib_mad_cleanup(); 1262 addr_cleanup(); 1263 rdma_nl_exit(); 1264 class_unregister(&ib_class); 1265 destroy_workqueue(ib_comp_wq); 1266 /* Make sure that any pending umem accounting work is done. */ 1267 destroy_workqueue(ib_wq); 1268 } 1269 1270 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); 1271 1272 subsys_initcall(ib_core_init); 1273 module_exit(ib_core_cleanup); 1274