1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/string.h> 36 #include <linux/errno.h> 37 #include <linux/kernel.h> 38 #include <linux/slab.h> 39 #include <linux/init.h> 40 #include <linux/netdevice.h> 41 #include <net/net_namespace.h> 42 #include <net/netns/generic.h> 43 #include <linux/security.h> 44 #include <linux/notifier.h> 45 #include <linux/hashtable.h> 46 #include <rdma/rdma_netlink.h> 47 #include <rdma/ib_addr.h> 48 #include <rdma/ib_cache.h> 49 #include <rdma/rdma_counter.h> 50 51 #include "core_priv.h" 52 #include "restrack.h" 53 54 MODULE_AUTHOR("Roland Dreier"); 55 MODULE_DESCRIPTION("core kernel InfiniBand API"); 56 MODULE_LICENSE("Dual BSD/GPL"); 57 58 struct workqueue_struct *ib_comp_wq; 59 struct workqueue_struct *ib_comp_unbound_wq; 60 struct workqueue_struct *ib_wq; 61 EXPORT_SYMBOL_GPL(ib_wq); 62 63 /* 64 * Each of the three rwsem locks (devices, clients, client_data) protects the 65 * xarray of the same name. Specifically it allows the caller to assert that 66 * the MARK will/will not be changing under the lock, and for devices and 67 * clients, that the value in the xarray is still a valid pointer. Change of 68 * the MARK is linked to the object state, so holding the lock and testing the 69 * MARK also asserts that the contained object is in a certain state. 70 * 71 * This is used to build a two stage register/unregister flow where objects 72 * can continue to be in the xarray even though they are still in progress to 73 * register/unregister. 74 * 75 * The xarray itself provides additional locking, and restartable iteration, 76 * which is also relied on. 77 * 78 * Locks should not be nested, with the exception of client_data, which is 79 * allowed to nest under the read side of the other two locks. 80 * 81 * The devices_rwsem also protects the device name list, any change or 82 * assignment of device name must also hold the write side to guarantee unique 83 * names. 84 */ 85 86 /* 87 * devices contains devices that have had their names assigned. The 88 * devices may not be registered. Users that care about the registration 89 * status need to call ib_device_try_get() on the device to ensure it is 90 * registered, and keep it registered, for the required duration. 91 * 92 */ 93 static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); 94 static DECLARE_RWSEM(devices_rwsem); 95 #define DEVICE_REGISTERED XA_MARK_1 96 97 static LIST_HEAD(client_list); 98 #define CLIENT_REGISTERED XA_MARK_1 99 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); 100 static DECLARE_RWSEM(clients_rwsem); 101 102 /* 103 * If client_data is registered then the corresponding client must also still 104 * be registered. 105 */ 106 #define CLIENT_DATA_REGISTERED XA_MARK_1 107 108 /** 109 * struct rdma_dev_net - rdma net namespace metadata for a net 110 * @net: Pointer to owner net namespace 111 * @id: xarray id to identify the net namespace. 112 */ 113 struct rdma_dev_net { 114 possible_net_t net; 115 u32 id; 116 }; 117 118 static unsigned int rdma_dev_net_id; 119 120 /* 121 * A list of net namespaces is maintained in an xarray. This is necessary 122 * because we can't get the locking right using the existing net ns list. We 123 * would require a init_net callback after the list is updated. 124 */ 125 static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC); 126 /* 127 * rwsem to protect accessing the rdma_nets xarray entries. 128 */ 129 static DECLARE_RWSEM(rdma_nets_rwsem); 130 131 bool ib_devices_shared_netns = true; 132 module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444); 133 MODULE_PARM_DESC(netns_mode, 134 "Share device among net namespaces; default=1 (shared)"); 135 /** 136 * rdma_dev_access_netns() - Return whether a rdma device can be accessed 137 * from a specified net namespace or not. 138 * @device: Pointer to rdma device which needs to be checked 139 * @net: Pointer to net namesapce for which access to be checked 140 * 141 * rdma_dev_access_netns() - Return whether a rdma device can be accessed 142 * from a specified net namespace or not. When 143 * rdma device is in shared mode, it ignores the 144 * net namespace. When rdma device is exclusive 145 * to a net namespace, rdma device net namespace is 146 * checked against the specified one. 147 */ 148 bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net) 149 { 150 return (ib_devices_shared_netns || 151 net_eq(read_pnet(&dev->coredev.rdma_net), net)); 152 } 153 EXPORT_SYMBOL(rdma_dev_access_netns); 154 155 /* 156 * xarray has this behavior where it won't iterate over NULL values stored in 157 * allocated arrays. So we need our own iterator to see all values stored in 158 * the array. This does the same thing as xa_for_each except that it also 159 * returns NULL valued entries if the array is allocating. Simplified to only 160 * work on simple xarrays. 161 */ 162 static void *xan_find_marked(struct xarray *xa, unsigned long *indexp, 163 xa_mark_t filter) 164 { 165 XA_STATE(xas, xa, *indexp); 166 void *entry; 167 168 rcu_read_lock(); 169 do { 170 entry = xas_find_marked(&xas, ULONG_MAX, filter); 171 if (xa_is_zero(entry)) 172 break; 173 } while (xas_retry(&xas, entry)); 174 rcu_read_unlock(); 175 176 if (entry) { 177 *indexp = xas.xa_index; 178 if (xa_is_zero(entry)) 179 return NULL; 180 return entry; 181 } 182 return XA_ERROR(-ENOENT); 183 } 184 #define xan_for_each_marked(xa, index, entry, filter) \ 185 for (index = 0, entry = xan_find_marked(xa, &(index), filter); \ 186 !xa_is_err(entry); \ 187 (index)++, entry = xan_find_marked(xa, &(index), filter)) 188 189 /* RCU hash table mapping netdevice pointers to struct ib_port_data */ 190 static DEFINE_SPINLOCK(ndev_hash_lock); 191 static DECLARE_HASHTABLE(ndev_hash, 5); 192 193 static void free_netdevs(struct ib_device *ib_dev); 194 static void ib_unregister_work(struct work_struct *work); 195 static void __ib_unregister_device(struct ib_device *device); 196 static int ib_security_change(struct notifier_block *nb, unsigned long event, 197 void *lsm_data); 198 static void ib_policy_change_task(struct work_struct *work); 199 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); 200 201 static void __ibdev_printk(const char *level, const struct ib_device *ibdev, 202 struct va_format *vaf) 203 { 204 if (ibdev && ibdev->dev.parent) 205 dev_printk_emit(level[1] - '0', 206 ibdev->dev.parent, 207 "%s %s %s: %pV", 208 dev_driver_string(ibdev->dev.parent), 209 dev_name(ibdev->dev.parent), 210 dev_name(&ibdev->dev), 211 vaf); 212 else if (ibdev) 213 printk("%s%s: %pV", 214 level, dev_name(&ibdev->dev), vaf); 215 else 216 printk("%s(NULL ib_device): %pV", level, vaf); 217 } 218 219 void ibdev_printk(const char *level, const struct ib_device *ibdev, 220 const char *format, ...) 221 { 222 struct va_format vaf; 223 va_list args; 224 225 va_start(args, format); 226 227 vaf.fmt = format; 228 vaf.va = &args; 229 230 __ibdev_printk(level, ibdev, &vaf); 231 232 va_end(args); 233 } 234 EXPORT_SYMBOL(ibdev_printk); 235 236 #define define_ibdev_printk_level(func, level) \ 237 void func(const struct ib_device *ibdev, const char *fmt, ...) \ 238 { \ 239 struct va_format vaf; \ 240 va_list args; \ 241 \ 242 va_start(args, fmt); \ 243 \ 244 vaf.fmt = fmt; \ 245 vaf.va = &args; \ 246 \ 247 __ibdev_printk(level, ibdev, &vaf); \ 248 \ 249 va_end(args); \ 250 } \ 251 EXPORT_SYMBOL(func); 252 253 define_ibdev_printk_level(ibdev_emerg, KERN_EMERG); 254 define_ibdev_printk_level(ibdev_alert, KERN_ALERT); 255 define_ibdev_printk_level(ibdev_crit, KERN_CRIT); 256 define_ibdev_printk_level(ibdev_err, KERN_ERR); 257 define_ibdev_printk_level(ibdev_warn, KERN_WARNING); 258 define_ibdev_printk_level(ibdev_notice, KERN_NOTICE); 259 define_ibdev_printk_level(ibdev_info, KERN_INFO); 260 261 static struct notifier_block ibdev_lsm_nb = { 262 .notifier_call = ib_security_change, 263 }; 264 265 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, 266 struct net *net); 267 268 /* Pointer to the RCU head at the start of the ib_port_data array */ 269 struct ib_port_data_rcu { 270 struct rcu_head rcu_head; 271 struct ib_port_data pdata[]; 272 }; 273 274 static void ib_device_check_mandatory(struct ib_device *device) 275 { 276 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x } 277 static const struct { 278 size_t offset; 279 char *name; 280 } mandatory_table[] = { 281 IB_MANDATORY_FUNC(query_device), 282 IB_MANDATORY_FUNC(query_port), 283 IB_MANDATORY_FUNC(query_pkey), 284 IB_MANDATORY_FUNC(alloc_pd), 285 IB_MANDATORY_FUNC(dealloc_pd), 286 IB_MANDATORY_FUNC(create_qp), 287 IB_MANDATORY_FUNC(modify_qp), 288 IB_MANDATORY_FUNC(destroy_qp), 289 IB_MANDATORY_FUNC(post_send), 290 IB_MANDATORY_FUNC(post_recv), 291 IB_MANDATORY_FUNC(create_cq), 292 IB_MANDATORY_FUNC(destroy_cq), 293 IB_MANDATORY_FUNC(poll_cq), 294 IB_MANDATORY_FUNC(req_notify_cq), 295 IB_MANDATORY_FUNC(get_dma_mr), 296 IB_MANDATORY_FUNC(dereg_mr), 297 IB_MANDATORY_FUNC(get_port_immutable) 298 }; 299 int i; 300 301 device->kverbs_provider = true; 302 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { 303 if (!*(void **) ((void *) &device->ops + 304 mandatory_table[i].offset)) { 305 device->kverbs_provider = false; 306 break; 307 } 308 } 309 } 310 311 /* 312 * Caller must perform ib_device_put() to return the device reference count 313 * when ib_device_get_by_index() returns valid device pointer. 314 */ 315 struct ib_device *ib_device_get_by_index(const struct net *net, u32 index) 316 { 317 struct ib_device *device; 318 319 down_read(&devices_rwsem); 320 device = xa_load(&devices, index); 321 if (device) { 322 if (!rdma_dev_access_netns(device, net)) { 323 device = NULL; 324 goto out; 325 } 326 327 if (!ib_device_try_get(device)) 328 device = NULL; 329 } 330 out: 331 up_read(&devices_rwsem); 332 return device; 333 } 334 335 /** 336 * ib_device_put - Release IB device reference 337 * @device: device whose reference to be released 338 * 339 * ib_device_put() releases reference to the IB device to allow it to be 340 * unregistered and eventually free. 341 */ 342 void ib_device_put(struct ib_device *device) 343 { 344 if (refcount_dec_and_test(&device->refcount)) 345 complete(&device->unreg_completion); 346 } 347 EXPORT_SYMBOL(ib_device_put); 348 349 static struct ib_device *__ib_device_get_by_name(const char *name) 350 { 351 struct ib_device *device; 352 unsigned long index; 353 354 xa_for_each (&devices, index, device) 355 if (!strcmp(name, dev_name(&device->dev))) 356 return device; 357 358 return NULL; 359 } 360 361 /** 362 * ib_device_get_by_name - Find an IB device by name 363 * @name: The name to look for 364 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) 365 * 366 * Find and hold an ib_device by its name. The caller must call 367 * ib_device_put() on the returned pointer. 368 */ 369 struct ib_device *ib_device_get_by_name(const char *name, 370 enum rdma_driver_id driver_id) 371 { 372 struct ib_device *device; 373 374 down_read(&devices_rwsem); 375 device = __ib_device_get_by_name(name); 376 if (device && driver_id != RDMA_DRIVER_UNKNOWN && 377 device->ops.driver_id != driver_id) 378 device = NULL; 379 380 if (device) { 381 if (!ib_device_try_get(device)) 382 device = NULL; 383 } 384 up_read(&devices_rwsem); 385 return device; 386 } 387 EXPORT_SYMBOL(ib_device_get_by_name); 388 389 static int rename_compat_devs(struct ib_device *device) 390 { 391 struct ib_core_device *cdev; 392 unsigned long index; 393 int ret = 0; 394 395 mutex_lock(&device->compat_devs_mutex); 396 xa_for_each (&device->compat_devs, index, cdev) { 397 ret = device_rename(&cdev->dev, dev_name(&device->dev)); 398 if (ret) { 399 dev_warn(&cdev->dev, 400 "Fail to rename compatdev to new name %s\n", 401 dev_name(&device->dev)); 402 break; 403 } 404 } 405 mutex_unlock(&device->compat_devs_mutex); 406 return ret; 407 } 408 409 int ib_device_rename(struct ib_device *ibdev, const char *name) 410 { 411 unsigned long index; 412 void *client_data; 413 int ret; 414 415 down_write(&devices_rwsem); 416 if (!strcmp(name, dev_name(&ibdev->dev))) { 417 up_write(&devices_rwsem); 418 return 0; 419 } 420 421 if (__ib_device_get_by_name(name)) { 422 up_write(&devices_rwsem); 423 return -EEXIST; 424 } 425 426 ret = device_rename(&ibdev->dev, name); 427 if (ret) { 428 up_write(&devices_rwsem); 429 return ret; 430 } 431 432 strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX); 433 ret = rename_compat_devs(ibdev); 434 435 downgrade_write(&devices_rwsem); 436 down_read(&ibdev->client_data_rwsem); 437 xan_for_each_marked(&ibdev->client_data, index, client_data, 438 CLIENT_DATA_REGISTERED) { 439 struct ib_client *client = xa_load(&clients, index); 440 441 if (!client || !client->rename) 442 continue; 443 444 client->rename(ibdev, client_data); 445 } 446 up_read(&ibdev->client_data_rwsem); 447 up_read(&devices_rwsem); 448 return 0; 449 } 450 451 static int alloc_name(struct ib_device *ibdev, const char *name) 452 { 453 struct ib_device *device; 454 unsigned long index; 455 struct ida inuse; 456 int rc; 457 int i; 458 459 lockdep_assert_held_exclusive(&devices_rwsem); 460 ida_init(&inuse); 461 xa_for_each (&devices, index, device) { 462 char buf[IB_DEVICE_NAME_MAX]; 463 464 if (sscanf(dev_name(&device->dev), name, &i) != 1) 465 continue; 466 if (i < 0 || i >= INT_MAX) 467 continue; 468 snprintf(buf, sizeof buf, name, i); 469 if (strcmp(buf, dev_name(&device->dev)) != 0) 470 continue; 471 472 rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL); 473 if (rc < 0) 474 goto out; 475 } 476 477 rc = ida_alloc(&inuse, GFP_KERNEL); 478 if (rc < 0) 479 goto out; 480 481 rc = dev_set_name(&ibdev->dev, name, rc); 482 out: 483 ida_destroy(&inuse); 484 return rc; 485 } 486 487 static void ib_device_release(struct device *device) 488 { 489 struct ib_device *dev = container_of(device, struct ib_device, dev); 490 491 free_netdevs(dev); 492 WARN_ON(refcount_read(&dev->refcount)); 493 if (dev->port_data) { 494 ib_cache_release_one(dev); 495 ib_security_release_port_pkey_list(dev); 496 rdma_counter_release(dev); 497 kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu, 498 pdata[0]), 499 rcu_head); 500 } 501 502 xa_destroy(&dev->compat_devs); 503 xa_destroy(&dev->client_data); 504 kfree_rcu(dev, rcu_head); 505 } 506 507 static int ib_device_uevent(struct device *device, 508 struct kobj_uevent_env *env) 509 { 510 if (add_uevent_var(env, "NAME=%s", dev_name(device))) 511 return -ENOMEM; 512 513 /* 514 * It would be nice to pass the node GUID with the event... 515 */ 516 517 return 0; 518 } 519 520 static const void *net_namespace(struct device *d) 521 { 522 struct ib_core_device *coredev = 523 container_of(d, struct ib_core_device, dev); 524 525 return read_pnet(&coredev->rdma_net); 526 } 527 528 static struct class ib_class = { 529 .name = "infiniband", 530 .dev_release = ib_device_release, 531 .dev_uevent = ib_device_uevent, 532 .ns_type = &net_ns_type_operations, 533 .namespace = net_namespace, 534 }; 535 536 static void rdma_init_coredev(struct ib_core_device *coredev, 537 struct ib_device *dev, struct net *net) 538 { 539 /* This BUILD_BUG_ON is intended to catch layout change 540 * of union of ib_core_device and device. 541 * dev must be the first element as ib_core and providers 542 * driver uses it. Adding anything in ib_core_device before 543 * device will break this assumption. 544 */ 545 BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) != 546 offsetof(struct ib_device, dev)); 547 548 coredev->dev.class = &ib_class; 549 coredev->dev.groups = dev->groups; 550 device_initialize(&coredev->dev); 551 coredev->owner = dev; 552 INIT_LIST_HEAD(&coredev->port_list); 553 write_pnet(&coredev->rdma_net, net); 554 } 555 556 /** 557 * _ib_alloc_device - allocate an IB device struct 558 * @size:size of structure to allocate 559 * 560 * Low-level drivers should use ib_alloc_device() to allocate &struct 561 * ib_device. @size is the size of the structure to be allocated, 562 * including any private data used by the low-level driver. 563 * ib_dealloc_device() must be used to free structures allocated with 564 * ib_alloc_device(). 565 */ 566 struct ib_device *_ib_alloc_device(size_t size) 567 { 568 struct ib_device *device; 569 570 if (WARN_ON(size < sizeof(struct ib_device))) 571 return NULL; 572 573 device = kzalloc(size, GFP_KERNEL); 574 if (!device) 575 return NULL; 576 577 if (rdma_restrack_init(device)) { 578 kfree(device); 579 return NULL; 580 } 581 582 device->groups[0] = &ib_dev_attr_group; 583 rdma_init_coredev(&device->coredev, device, &init_net); 584 585 INIT_LIST_HEAD(&device->event_handler_list); 586 spin_lock_init(&device->event_handler_lock); 587 mutex_init(&device->unregistration_lock); 588 /* 589 * client_data needs to be alloc because we don't want our mark to be 590 * destroyed if the user stores NULL in the client data. 591 */ 592 xa_init_flags(&device->client_data, XA_FLAGS_ALLOC); 593 init_rwsem(&device->client_data_rwsem); 594 xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC); 595 mutex_init(&device->compat_devs_mutex); 596 init_completion(&device->unreg_completion); 597 INIT_WORK(&device->unregistration_work, ib_unregister_work); 598 599 return device; 600 } 601 EXPORT_SYMBOL(_ib_alloc_device); 602 603 /** 604 * ib_dealloc_device - free an IB device struct 605 * @device:structure to free 606 * 607 * Free a structure allocated with ib_alloc_device(). 608 */ 609 void ib_dealloc_device(struct ib_device *device) 610 { 611 if (device->ops.dealloc_driver) 612 device->ops.dealloc_driver(device); 613 614 /* 615 * ib_unregister_driver() requires all devices to remain in the xarray 616 * while their ops are callable. The last op we call is dealloc_driver 617 * above. This is needed to create a fence on op callbacks prior to 618 * allowing the driver module to unload. 619 */ 620 down_write(&devices_rwsem); 621 if (xa_load(&devices, device->index) == device) 622 xa_erase(&devices, device->index); 623 up_write(&devices_rwsem); 624 625 /* Expedite releasing netdev references */ 626 free_netdevs(device); 627 628 WARN_ON(!xa_empty(&device->compat_devs)); 629 WARN_ON(!xa_empty(&device->client_data)); 630 WARN_ON(refcount_read(&device->refcount)); 631 rdma_restrack_clean(device); 632 /* Balances with device_initialize */ 633 put_device(&device->dev); 634 } 635 EXPORT_SYMBOL(ib_dealloc_device); 636 637 /* 638 * add_client_context() and remove_client_context() must be safe against 639 * parallel calls on the same device - registration/unregistration of both the 640 * device and client can be occurring in parallel. 641 * 642 * The routines need to be a fence, any caller must not return until the add 643 * or remove is fully completed. 644 */ 645 static int add_client_context(struct ib_device *device, 646 struct ib_client *client) 647 { 648 int ret = 0; 649 650 if (!device->kverbs_provider && !client->no_kverbs_req) 651 return 0; 652 653 down_write(&device->client_data_rwsem); 654 /* 655 * Another caller to add_client_context got here first and has already 656 * completely initialized context. 657 */ 658 if (xa_get_mark(&device->client_data, client->client_id, 659 CLIENT_DATA_REGISTERED)) 660 goto out; 661 662 ret = xa_err(xa_store(&device->client_data, client->client_id, NULL, 663 GFP_KERNEL)); 664 if (ret) 665 goto out; 666 downgrade_write(&device->client_data_rwsem); 667 if (client->add) 668 client->add(device); 669 670 /* Readers shall not see a client until add has been completed */ 671 xa_set_mark(&device->client_data, client->client_id, 672 CLIENT_DATA_REGISTERED); 673 up_read(&device->client_data_rwsem); 674 return 0; 675 676 out: 677 up_write(&device->client_data_rwsem); 678 return ret; 679 } 680 681 static void remove_client_context(struct ib_device *device, 682 unsigned int client_id) 683 { 684 struct ib_client *client; 685 void *client_data; 686 687 down_write(&device->client_data_rwsem); 688 if (!xa_get_mark(&device->client_data, client_id, 689 CLIENT_DATA_REGISTERED)) { 690 up_write(&device->client_data_rwsem); 691 return; 692 } 693 client_data = xa_load(&device->client_data, client_id); 694 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); 695 client = xa_load(&clients, client_id); 696 downgrade_write(&device->client_data_rwsem); 697 698 /* 699 * Notice we cannot be holding any exclusive locks when calling the 700 * remove callback as the remove callback can recurse back into any 701 * public functions in this module and thus try for any locks those 702 * functions take. 703 * 704 * For this reason clients and drivers should not call the 705 * unregistration functions will holdling any locks. 706 * 707 * It tempting to drop the client_data_rwsem too, but this is required 708 * to ensure that unregister_client does not return until all clients 709 * are completely unregistered, which is required to avoid module 710 * unloading races. 711 */ 712 if (client->remove) 713 client->remove(device, client_data); 714 715 xa_erase(&device->client_data, client_id); 716 up_read(&device->client_data_rwsem); 717 } 718 719 static int alloc_port_data(struct ib_device *device) 720 { 721 struct ib_port_data_rcu *pdata_rcu; 722 unsigned int port; 723 724 if (device->port_data) 725 return 0; 726 727 /* This can only be called once the physical port range is defined */ 728 if (WARN_ON(!device->phys_port_cnt)) 729 return -EINVAL; 730 731 /* 732 * device->port_data is indexed directly by the port number to make 733 * access to this data as efficient as possible. 734 * 735 * Therefore port_data is declared as a 1 based array with potential 736 * empty slots at the beginning. 737 */ 738 pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata, 739 rdma_end_port(device) + 1), 740 GFP_KERNEL); 741 if (!pdata_rcu) 742 return -ENOMEM; 743 /* 744 * The rcu_head is put in front of the port data array and the stored 745 * pointer is adjusted since we never need to see that member until 746 * kfree_rcu. 747 */ 748 device->port_data = pdata_rcu->pdata; 749 750 rdma_for_each_port (device, port) { 751 struct ib_port_data *pdata = &device->port_data[port]; 752 753 pdata->ib_dev = device; 754 spin_lock_init(&pdata->pkey_list_lock); 755 INIT_LIST_HEAD(&pdata->pkey_list); 756 spin_lock_init(&pdata->netdev_lock); 757 INIT_HLIST_NODE(&pdata->ndev_hash_link); 758 } 759 return 0; 760 } 761 762 static int verify_immutable(const struct ib_device *dev, u8 port) 763 { 764 return WARN_ON(!rdma_cap_ib_mad(dev, port) && 765 rdma_max_mad_size(dev, port) != 0); 766 } 767 768 static int setup_port_data(struct ib_device *device) 769 { 770 unsigned int port; 771 int ret; 772 773 ret = alloc_port_data(device); 774 if (ret) 775 return ret; 776 777 rdma_for_each_port (device, port) { 778 struct ib_port_data *pdata = &device->port_data[port]; 779 780 ret = device->ops.get_port_immutable(device, port, 781 &pdata->immutable); 782 if (ret) 783 return ret; 784 785 if (verify_immutable(device, port)) 786 return -EINVAL; 787 } 788 return 0; 789 } 790 791 void ib_get_device_fw_str(struct ib_device *dev, char *str) 792 { 793 if (dev->ops.get_dev_fw_str) 794 dev->ops.get_dev_fw_str(dev, str); 795 else 796 str[0] = '\0'; 797 } 798 EXPORT_SYMBOL(ib_get_device_fw_str); 799 800 static void ib_policy_change_task(struct work_struct *work) 801 { 802 struct ib_device *dev; 803 unsigned long index; 804 805 down_read(&devices_rwsem); 806 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 807 unsigned int i; 808 809 rdma_for_each_port (dev, i) { 810 u64 sp; 811 int ret = ib_get_cached_subnet_prefix(dev, 812 i, 813 &sp); 814 815 WARN_ONCE(ret, 816 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", 817 ret); 818 if (!ret) 819 ib_security_cache_change(dev, i, sp); 820 } 821 } 822 up_read(&devices_rwsem); 823 } 824 825 static int ib_security_change(struct notifier_block *nb, unsigned long event, 826 void *lsm_data) 827 { 828 if (event != LSM_POLICY_CHANGE) 829 return NOTIFY_DONE; 830 831 schedule_work(&ib_policy_change_work); 832 ib_mad_agent_security_change(); 833 834 return NOTIFY_OK; 835 } 836 837 static void compatdev_release(struct device *dev) 838 { 839 struct ib_core_device *cdev = 840 container_of(dev, struct ib_core_device, dev); 841 842 kfree(cdev); 843 } 844 845 static int add_one_compat_dev(struct ib_device *device, 846 struct rdma_dev_net *rnet) 847 { 848 struct ib_core_device *cdev; 849 int ret; 850 851 lockdep_assert_held(&rdma_nets_rwsem); 852 if (!ib_devices_shared_netns) 853 return 0; 854 855 /* 856 * Create and add compat device in all namespaces other than where it 857 * is currently bound to. 858 */ 859 if (net_eq(read_pnet(&rnet->net), 860 read_pnet(&device->coredev.rdma_net))) 861 return 0; 862 863 /* 864 * The first of init_net() or ib_register_device() to take the 865 * compat_devs_mutex wins and gets to add the device. Others will wait 866 * for completion here. 867 */ 868 mutex_lock(&device->compat_devs_mutex); 869 cdev = xa_load(&device->compat_devs, rnet->id); 870 if (cdev) { 871 ret = 0; 872 goto done; 873 } 874 ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL); 875 if (ret) 876 goto done; 877 878 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 879 if (!cdev) { 880 ret = -ENOMEM; 881 goto cdev_err; 882 } 883 884 cdev->dev.parent = device->dev.parent; 885 rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); 886 cdev->dev.release = compatdev_release; 887 dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); 888 889 ret = device_add(&cdev->dev); 890 if (ret) 891 goto add_err; 892 ret = ib_setup_port_attrs(cdev); 893 if (ret) 894 goto port_err; 895 896 ret = xa_err(xa_store(&device->compat_devs, rnet->id, 897 cdev, GFP_KERNEL)); 898 if (ret) 899 goto insert_err; 900 901 mutex_unlock(&device->compat_devs_mutex); 902 return 0; 903 904 insert_err: 905 ib_free_port_attrs(cdev); 906 port_err: 907 device_del(&cdev->dev); 908 add_err: 909 put_device(&cdev->dev); 910 cdev_err: 911 xa_release(&device->compat_devs, rnet->id); 912 done: 913 mutex_unlock(&device->compat_devs_mutex); 914 return ret; 915 } 916 917 static void remove_one_compat_dev(struct ib_device *device, u32 id) 918 { 919 struct ib_core_device *cdev; 920 921 mutex_lock(&device->compat_devs_mutex); 922 cdev = xa_erase(&device->compat_devs, id); 923 mutex_unlock(&device->compat_devs_mutex); 924 if (cdev) { 925 ib_free_port_attrs(cdev); 926 device_del(&cdev->dev); 927 put_device(&cdev->dev); 928 } 929 } 930 931 static void remove_compat_devs(struct ib_device *device) 932 { 933 struct ib_core_device *cdev; 934 unsigned long index; 935 936 xa_for_each (&device->compat_devs, index, cdev) 937 remove_one_compat_dev(device, index); 938 } 939 940 static int add_compat_devs(struct ib_device *device) 941 { 942 struct rdma_dev_net *rnet; 943 unsigned long index; 944 int ret = 0; 945 946 lockdep_assert_held(&devices_rwsem); 947 948 down_read(&rdma_nets_rwsem); 949 xa_for_each (&rdma_nets, index, rnet) { 950 ret = add_one_compat_dev(device, rnet); 951 if (ret) 952 break; 953 } 954 up_read(&rdma_nets_rwsem); 955 return ret; 956 } 957 958 static void remove_all_compat_devs(void) 959 { 960 struct ib_compat_device *cdev; 961 struct ib_device *dev; 962 unsigned long index; 963 964 down_read(&devices_rwsem); 965 xa_for_each (&devices, index, dev) { 966 unsigned long c_index = 0; 967 968 /* Hold nets_rwsem so that any other thread modifying this 969 * system param can sync with this thread. 970 */ 971 down_read(&rdma_nets_rwsem); 972 xa_for_each (&dev->compat_devs, c_index, cdev) 973 remove_one_compat_dev(dev, c_index); 974 up_read(&rdma_nets_rwsem); 975 } 976 up_read(&devices_rwsem); 977 } 978 979 static int add_all_compat_devs(void) 980 { 981 struct rdma_dev_net *rnet; 982 struct ib_device *dev; 983 unsigned long index; 984 int ret = 0; 985 986 down_read(&devices_rwsem); 987 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 988 unsigned long net_index = 0; 989 990 /* Hold nets_rwsem so that any other thread modifying this 991 * system param can sync with this thread. 992 */ 993 down_read(&rdma_nets_rwsem); 994 xa_for_each (&rdma_nets, net_index, rnet) { 995 ret = add_one_compat_dev(dev, rnet); 996 if (ret) 997 break; 998 } 999 up_read(&rdma_nets_rwsem); 1000 } 1001 up_read(&devices_rwsem); 1002 if (ret) 1003 remove_all_compat_devs(); 1004 return ret; 1005 } 1006 1007 int rdma_compatdev_set(u8 enable) 1008 { 1009 struct rdma_dev_net *rnet; 1010 unsigned long index; 1011 int ret = 0; 1012 1013 down_write(&rdma_nets_rwsem); 1014 if (ib_devices_shared_netns == enable) { 1015 up_write(&rdma_nets_rwsem); 1016 return 0; 1017 } 1018 1019 /* enable/disable of compat devices is not supported 1020 * when more than default init_net exists. 1021 */ 1022 xa_for_each (&rdma_nets, index, rnet) { 1023 ret++; 1024 break; 1025 } 1026 if (!ret) 1027 ib_devices_shared_netns = enable; 1028 up_write(&rdma_nets_rwsem); 1029 if (ret) 1030 return -EBUSY; 1031 1032 if (enable) 1033 ret = add_all_compat_devs(); 1034 else 1035 remove_all_compat_devs(); 1036 return ret; 1037 } 1038 1039 static void rdma_dev_exit_net(struct net *net) 1040 { 1041 struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); 1042 struct ib_device *dev; 1043 unsigned long index; 1044 int ret; 1045 1046 down_write(&rdma_nets_rwsem); 1047 /* 1048 * Prevent the ID from being re-used and hide the id from xa_for_each. 1049 */ 1050 ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL)); 1051 WARN_ON(ret); 1052 up_write(&rdma_nets_rwsem); 1053 1054 down_read(&devices_rwsem); 1055 xa_for_each (&devices, index, dev) { 1056 get_device(&dev->dev); 1057 /* 1058 * Release the devices_rwsem so that pontentially blocking 1059 * device_del, doesn't hold the devices_rwsem for too long. 1060 */ 1061 up_read(&devices_rwsem); 1062 1063 remove_one_compat_dev(dev, rnet->id); 1064 1065 /* 1066 * If the real device is in the NS then move it back to init. 1067 */ 1068 rdma_dev_change_netns(dev, net, &init_net); 1069 1070 put_device(&dev->dev); 1071 down_read(&devices_rwsem); 1072 } 1073 up_read(&devices_rwsem); 1074 1075 xa_erase(&rdma_nets, rnet->id); 1076 } 1077 1078 static __net_init int rdma_dev_init_net(struct net *net) 1079 { 1080 struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); 1081 unsigned long index; 1082 struct ib_device *dev; 1083 int ret; 1084 1085 /* No need to create any compat devices in default init_net. */ 1086 if (net_eq(net, &init_net)) 1087 return 0; 1088 1089 write_pnet(&rnet->net, net); 1090 1091 ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); 1092 if (ret) 1093 return ret; 1094 1095 down_read(&devices_rwsem); 1096 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 1097 /* Hold nets_rwsem so that netlink command cannot change 1098 * system configuration for device sharing mode. 1099 */ 1100 down_read(&rdma_nets_rwsem); 1101 ret = add_one_compat_dev(dev, rnet); 1102 up_read(&rdma_nets_rwsem); 1103 if (ret) 1104 break; 1105 } 1106 up_read(&devices_rwsem); 1107 1108 if (ret) 1109 rdma_dev_exit_net(net); 1110 1111 return ret; 1112 } 1113 1114 /* 1115 * Assign the unique string device name and the unique device index. This is 1116 * undone by ib_dealloc_device. 1117 */ 1118 static int assign_name(struct ib_device *device, const char *name) 1119 { 1120 static u32 last_id; 1121 int ret; 1122 1123 down_write(&devices_rwsem); 1124 /* Assign a unique name to the device */ 1125 if (strchr(name, '%')) 1126 ret = alloc_name(device, name); 1127 else 1128 ret = dev_set_name(&device->dev, name); 1129 if (ret) 1130 goto out; 1131 1132 if (__ib_device_get_by_name(dev_name(&device->dev))) { 1133 ret = -ENFILE; 1134 goto out; 1135 } 1136 strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX); 1137 1138 ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b, 1139 &last_id, GFP_KERNEL); 1140 if (ret > 0) 1141 ret = 0; 1142 1143 out: 1144 up_write(&devices_rwsem); 1145 return ret; 1146 } 1147 1148 static void setup_dma_device(struct ib_device *device) 1149 { 1150 struct device *parent = device->dev.parent; 1151 1152 WARN_ON_ONCE(device->dma_device); 1153 if (device->dev.dma_ops) { 1154 /* 1155 * The caller provided custom DMA operations. Copy the 1156 * DMA-related fields that are used by e.g. dma_alloc_coherent() 1157 * into device->dev. 1158 */ 1159 device->dma_device = &device->dev; 1160 if (!device->dev.dma_mask) { 1161 if (parent) 1162 device->dev.dma_mask = parent->dma_mask; 1163 else 1164 WARN_ON_ONCE(true); 1165 } 1166 if (!device->dev.coherent_dma_mask) { 1167 if (parent) 1168 device->dev.coherent_dma_mask = 1169 parent->coherent_dma_mask; 1170 else 1171 WARN_ON_ONCE(true); 1172 } 1173 } else { 1174 /* 1175 * The caller did not provide custom DMA operations. Use the 1176 * DMA mapping operations of the parent device. 1177 */ 1178 WARN_ON_ONCE(!parent); 1179 device->dma_device = parent; 1180 } 1181 /* Setup default max segment size for all IB devices */ 1182 dma_set_max_seg_size(device->dma_device, SZ_2G); 1183 1184 } 1185 1186 /* 1187 * setup_device() allocates memory and sets up data that requires calling the 1188 * device ops, this is the only reason these actions are not done during 1189 * ib_alloc_device. It is undone by ib_dealloc_device(). 1190 */ 1191 static int setup_device(struct ib_device *device) 1192 { 1193 struct ib_udata uhw = {.outlen = 0, .inlen = 0}; 1194 int ret; 1195 1196 setup_dma_device(device); 1197 ib_device_check_mandatory(device); 1198 1199 ret = setup_port_data(device); 1200 if (ret) { 1201 dev_warn(&device->dev, "Couldn't create per-port data\n"); 1202 return ret; 1203 } 1204 1205 memset(&device->attrs, 0, sizeof(device->attrs)); 1206 ret = device->ops.query_device(device, &device->attrs, &uhw); 1207 if (ret) { 1208 dev_warn(&device->dev, 1209 "Couldn't query the device attributes\n"); 1210 return ret; 1211 } 1212 1213 return 0; 1214 } 1215 1216 static void disable_device(struct ib_device *device) 1217 { 1218 struct ib_client *client; 1219 1220 WARN_ON(!refcount_read(&device->refcount)); 1221 1222 down_write(&devices_rwsem); 1223 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); 1224 up_write(&devices_rwsem); 1225 1226 down_read(&clients_rwsem); 1227 list_for_each_entry_reverse(client, &client_list, list) 1228 remove_client_context(device, client->client_id); 1229 up_read(&clients_rwsem); 1230 1231 /* Pairs with refcount_set in enable_device */ 1232 ib_device_put(device); 1233 wait_for_completion(&device->unreg_completion); 1234 1235 /* 1236 * compat devices must be removed after device refcount drops to zero. 1237 * Otherwise init_net() may add more compatdevs after removing compat 1238 * devices and before device is disabled. 1239 */ 1240 remove_compat_devs(device); 1241 } 1242 1243 /* 1244 * An enabled device is visible to all clients and to all the public facing 1245 * APIs that return a device pointer. This always returns with a new get, even 1246 * if it fails. 1247 */ 1248 static int enable_device_and_get(struct ib_device *device) 1249 { 1250 struct ib_client *client; 1251 unsigned long index; 1252 int ret = 0; 1253 1254 /* 1255 * One ref belongs to the xa and the other belongs to this 1256 * thread. This is needed to guard against parallel unregistration. 1257 */ 1258 refcount_set(&device->refcount, 2); 1259 down_write(&devices_rwsem); 1260 xa_set_mark(&devices, device->index, DEVICE_REGISTERED); 1261 1262 /* 1263 * By using downgrade_write() we ensure that no other thread can clear 1264 * DEVICE_REGISTERED while we are completing the client setup. 1265 */ 1266 downgrade_write(&devices_rwsem); 1267 1268 if (device->ops.enable_driver) { 1269 ret = device->ops.enable_driver(device); 1270 if (ret) 1271 goto out; 1272 } 1273 1274 down_read(&clients_rwsem); 1275 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { 1276 ret = add_client_context(device, client); 1277 if (ret) 1278 break; 1279 } 1280 up_read(&clients_rwsem); 1281 if (!ret) 1282 ret = add_compat_devs(device); 1283 out: 1284 up_read(&devices_rwsem); 1285 return ret; 1286 } 1287 1288 /** 1289 * ib_register_device - Register an IB device with IB core 1290 * @device:Device to register 1291 * 1292 * Low-level drivers use ib_register_device() to register their 1293 * devices with the IB core. All registered clients will receive a 1294 * callback for each device that is added. @device must be allocated 1295 * with ib_alloc_device(). 1296 * 1297 * If the driver uses ops.dealloc_driver and calls any ib_unregister_device() 1298 * asynchronously then the device pointer may become freed as soon as this 1299 * function returns. 1300 */ 1301 int ib_register_device(struct ib_device *device, const char *name) 1302 { 1303 int ret; 1304 1305 ret = assign_name(device, name); 1306 if (ret) 1307 return ret; 1308 1309 ret = setup_device(device); 1310 if (ret) 1311 return ret; 1312 1313 ret = ib_cache_setup_one(device); 1314 if (ret) { 1315 dev_warn(&device->dev, 1316 "Couldn't set up InfiniBand P_Key/GID cache\n"); 1317 return ret; 1318 } 1319 1320 ib_device_register_rdmacg(device); 1321 1322 rdma_counter_init(device); 1323 1324 /* 1325 * Ensure that ADD uevent is not fired because it 1326 * is too early amd device is not initialized yet. 1327 */ 1328 dev_set_uevent_suppress(&device->dev, true); 1329 ret = device_add(&device->dev); 1330 if (ret) 1331 goto cg_cleanup; 1332 1333 ret = ib_device_register_sysfs(device); 1334 if (ret) { 1335 dev_warn(&device->dev, 1336 "Couldn't register device with driver model\n"); 1337 goto dev_cleanup; 1338 } 1339 1340 ret = enable_device_and_get(device); 1341 dev_set_uevent_suppress(&device->dev, false); 1342 /* Mark for userspace that device is ready */ 1343 kobject_uevent(&device->dev.kobj, KOBJ_ADD); 1344 if (ret) { 1345 void (*dealloc_fn)(struct ib_device *); 1346 1347 /* 1348 * If we hit this error flow then we don't want to 1349 * automatically dealloc the device since the caller is 1350 * expected to call ib_dealloc_device() after 1351 * ib_register_device() fails. This is tricky due to the 1352 * possibility for a parallel unregistration along with this 1353 * error flow. Since we have a refcount here we know any 1354 * parallel flow is stopped in disable_device and will see the 1355 * NULL pointers, causing the responsibility to 1356 * ib_dealloc_device() to revert back to this thread. 1357 */ 1358 dealloc_fn = device->ops.dealloc_driver; 1359 device->ops.dealloc_driver = NULL; 1360 ib_device_put(device); 1361 __ib_unregister_device(device); 1362 device->ops.dealloc_driver = dealloc_fn; 1363 return ret; 1364 } 1365 ib_device_put(device); 1366 1367 return 0; 1368 1369 dev_cleanup: 1370 device_del(&device->dev); 1371 cg_cleanup: 1372 dev_set_uevent_suppress(&device->dev, false); 1373 ib_device_unregister_rdmacg(device); 1374 ib_cache_cleanup_one(device); 1375 return ret; 1376 } 1377 EXPORT_SYMBOL(ib_register_device); 1378 1379 /* Callers must hold a get on the device. */ 1380 static void __ib_unregister_device(struct ib_device *ib_dev) 1381 { 1382 /* 1383 * We have a registration lock so that all the calls to unregister are 1384 * fully fenced, once any unregister returns the device is truely 1385 * unregistered even if multiple callers are unregistering it at the 1386 * same time. This also interacts with the registration flow and 1387 * provides sane semantics if register and unregister are racing. 1388 */ 1389 mutex_lock(&ib_dev->unregistration_lock); 1390 if (!refcount_read(&ib_dev->refcount)) 1391 goto out; 1392 1393 disable_device(ib_dev); 1394 1395 /* Expedite removing unregistered pointers from the hash table */ 1396 free_netdevs(ib_dev); 1397 1398 ib_device_unregister_sysfs(ib_dev); 1399 device_del(&ib_dev->dev); 1400 ib_device_unregister_rdmacg(ib_dev); 1401 ib_cache_cleanup_one(ib_dev); 1402 1403 /* 1404 * Drivers using the new flow may not call ib_dealloc_device except 1405 * in error unwind prior to registration success. 1406 */ 1407 if (ib_dev->ops.dealloc_driver) { 1408 WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1); 1409 ib_dealloc_device(ib_dev); 1410 } 1411 out: 1412 mutex_unlock(&ib_dev->unregistration_lock); 1413 } 1414 1415 /** 1416 * ib_unregister_device - Unregister an IB device 1417 * @device: The device to unregister 1418 * 1419 * Unregister an IB device. All clients will receive a remove callback. 1420 * 1421 * Callers should call this routine only once, and protect against races with 1422 * registration. Typically it should only be called as part of a remove 1423 * callback in an implementation of driver core's struct device_driver and 1424 * related. 1425 * 1426 * If ops.dealloc_driver is used then ib_dev will be freed upon return from 1427 * this function. 1428 */ 1429 void ib_unregister_device(struct ib_device *ib_dev) 1430 { 1431 get_device(&ib_dev->dev); 1432 __ib_unregister_device(ib_dev); 1433 put_device(&ib_dev->dev); 1434 } 1435 EXPORT_SYMBOL(ib_unregister_device); 1436 1437 /** 1438 * ib_unregister_device_and_put - Unregister a device while holding a 'get' 1439 * device: The device to unregister 1440 * 1441 * This is the same as ib_unregister_device(), except it includes an internal 1442 * ib_device_put() that should match a 'get' obtained by the caller. 1443 * 1444 * It is safe to call this routine concurrently from multiple threads while 1445 * holding the 'get'. When the function returns the device is fully 1446 * unregistered. 1447 * 1448 * Drivers using this flow MUST use the driver_unregister callback to clean up 1449 * their resources associated with the device and dealloc it. 1450 */ 1451 void ib_unregister_device_and_put(struct ib_device *ib_dev) 1452 { 1453 WARN_ON(!ib_dev->ops.dealloc_driver); 1454 get_device(&ib_dev->dev); 1455 ib_device_put(ib_dev); 1456 __ib_unregister_device(ib_dev); 1457 put_device(&ib_dev->dev); 1458 } 1459 EXPORT_SYMBOL(ib_unregister_device_and_put); 1460 1461 /** 1462 * ib_unregister_driver - Unregister all IB devices for a driver 1463 * @driver_id: The driver to unregister 1464 * 1465 * This implements a fence for device unregistration. It only returns once all 1466 * devices associated with the driver_id have fully completed their 1467 * unregistration and returned from ib_unregister_device*(). 1468 * 1469 * If device's are not yet unregistered it goes ahead and starts unregistering 1470 * them. 1471 * 1472 * This does not block creation of new devices with the given driver_id, that 1473 * is the responsibility of the caller. 1474 */ 1475 void ib_unregister_driver(enum rdma_driver_id driver_id) 1476 { 1477 struct ib_device *ib_dev; 1478 unsigned long index; 1479 1480 down_read(&devices_rwsem); 1481 xa_for_each (&devices, index, ib_dev) { 1482 if (ib_dev->ops.driver_id != driver_id) 1483 continue; 1484 1485 get_device(&ib_dev->dev); 1486 up_read(&devices_rwsem); 1487 1488 WARN_ON(!ib_dev->ops.dealloc_driver); 1489 __ib_unregister_device(ib_dev); 1490 1491 put_device(&ib_dev->dev); 1492 down_read(&devices_rwsem); 1493 } 1494 up_read(&devices_rwsem); 1495 } 1496 EXPORT_SYMBOL(ib_unregister_driver); 1497 1498 static void ib_unregister_work(struct work_struct *work) 1499 { 1500 struct ib_device *ib_dev = 1501 container_of(work, struct ib_device, unregistration_work); 1502 1503 __ib_unregister_device(ib_dev); 1504 put_device(&ib_dev->dev); 1505 } 1506 1507 /** 1508 * ib_unregister_device_queued - Unregister a device using a work queue 1509 * device: The device to unregister 1510 * 1511 * This schedules an asynchronous unregistration using a WQ for the device. A 1512 * driver should use this to avoid holding locks while doing unregistration, 1513 * such as holding the RTNL lock. 1514 * 1515 * Drivers using this API must use ib_unregister_driver before module unload 1516 * to ensure that all scheduled unregistrations have completed. 1517 */ 1518 void ib_unregister_device_queued(struct ib_device *ib_dev) 1519 { 1520 WARN_ON(!refcount_read(&ib_dev->refcount)); 1521 WARN_ON(!ib_dev->ops.dealloc_driver); 1522 get_device(&ib_dev->dev); 1523 if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work)) 1524 put_device(&ib_dev->dev); 1525 } 1526 EXPORT_SYMBOL(ib_unregister_device_queued); 1527 1528 /* 1529 * The caller must pass in a device that has the kref held and the refcount 1530 * released. If the device is in cur_net and still registered then it is moved 1531 * into net. 1532 */ 1533 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, 1534 struct net *net) 1535 { 1536 int ret2 = -EINVAL; 1537 int ret; 1538 1539 mutex_lock(&device->unregistration_lock); 1540 1541 /* 1542 * If a device not under ib_device_get() or if the unregistration_lock 1543 * is not held, the namespace can be changed, or it can be unregistered. 1544 * Check again under the lock. 1545 */ 1546 if (refcount_read(&device->refcount) == 0 || 1547 !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) { 1548 ret = -ENODEV; 1549 goto out; 1550 } 1551 1552 kobject_uevent(&device->dev.kobj, KOBJ_REMOVE); 1553 disable_device(device); 1554 1555 /* 1556 * At this point no one can be using the device, so it is safe to 1557 * change the namespace. 1558 */ 1559 write_pnet(&device->coredev.rdma_net, net); 1560 1561 down_read(&devices_rwsem); 1562 /* 1563 * Currently rdma devices are system wide unique. So the device name 1564 * is guaranteed free in the new namespace. Publish the new namespace 1565 * at the sysfs level. 1566 */ 1567 ret = device_rename(&device->dev, dev_name(&device->dev)); 1568 up_read(&devices_rwsem); 1569 if (ret) { 1570 dev_warn(&device->dev, 1571 "%s: Couldn't rename device after namespace change\n", 1572 __func__); 1573 /* Try and put things back and re-enable the device */ 1574 write_pnet(&device->coredev.rdma_net, cur_net); 1575 } 1576 1577 ret2 = enable_device_and_get(device); 1578 if (ret2) { 1579 /* 1580 * This shouldn't really happen, but if it does, let the user 1581 * retry at later point. So don't disable the device. 1582 */ 1583 dev_warn(&device->dev, 1584 "%s: Couldn't re-enable device after namespace change\n", 1585 __func__); 1586 } 1587 kobject_uevent(&device->dev.kobj, KOBJ_ADD); 1588 1589 ib_device_put(device); 1590 out: 1591 mutex_unlock(&device->unregistration_lock); 1592 if (ret) 1593 return ret; 1594 return ret2; 1595 } 1596 1597 int ib_device_set_netns_put(struct sk_buff *skb, 1598 struct ib_device *dev, u32 ns_fd) 1599 { 1600 struct net *net; 1601 int ret; 1602 1603 net = get_net_ns_by_fd(ns_fd); 1604 if (IS_ERR(net)) { 1605 ret = PTR_ERR(net); 1606 goto net_err; 1607 } 1608 1609 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 1610 ret = -EPERM; 1611 goto ns_err; 1612 } 1613 1614 /* 1615 * Currently supported only for those providers which support 1616 * disassociation and don't do port specific sysfs init. Once a 1617 * port_cleanup infrastructure is implemented, this limitation will be 1618 * removed. 1619 */ 1620 if (!dev->ops.disassociate_ucontext || dev->ops.init_port || 1621 ib_devices_shared_netns) { 1622 ret = -EOPNOTSUPP; 1623 goto ns_err; 1624 } 1625 1626 get_device(&dev->dev); 1627 ib_device_put(dev); 1628 ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net); 1629 put_device(&dev->dev); 1630 1631 put_net(net); 1632 return ret; 1633 1634 ns_err: 1635 put_net(net); 1636 net_err: 1637 ib_device_put(dev); 1638 return ret; 1639 } 1640 1641 static struct pernet_operations rdma_dev_net_ops = { 1642 .init = rdma_dev_init_net, 1643 .exit = rdma_dev_exit_net, 1644 .id = &rdma_dev_net_id, 1645 .size = sizeof(struct rdma_dev_net), 1646 }; 1647 1648 static int assign_client_id(struct ib_client *client) 1649 { 1650 int ret; 1651 1652 down_write(&clients_rwsem); 1653 /* 1654 * The add/remove callbacks must be called in FIFO/LIFO order. To 1655 * achieve this we assign client_ids so they are sorted in 1656 * registration order, and retain a linked list we can reverse iterate 1657 * to get the LIFO order. The extra linked list can go away if xarray 1658 * learns to reverse iterate. 1659 */ 1660 if (list_empty(&client_list)) { 1661 client->client_id = 0; 1662 } else { 1663 struct ib_client *last; 1664 1665 last = list_last_entry(&client_list, struct ib_client, list); 1666 client->client_id = last->client_id + 1; 1667 } 1668 ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); 1669 if (ret) 1670 goto out; 1671 1672 xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); 1673 list_add_tail(&client->list, &client_list); 1674 1675 out: 1676 up_write(&clients_rwsem); 1677 return ret; 1678 } 1679 1680 /** 1681 * ib_register_client - Register an IB client 1682 * @client:Client to register 1683 * 1684 * Upper level users of the IB drivers can use ib_register_client() to 1685 * register callbacks for IB device addition and removal. When an IB 1686 * device is added, each registered client's add method will be called 1687 * (in the order the clients were registered), and when a device is 1688 * removed, each client's remove method will be called (in the reverse 1689 * order that clients were registered). In addition, when 1690 * ib_register_client() is called, the client will receive an add 1691 * callback for all devices already registered. 1692 */ 1693 int ib_register_client(struct ib_client *client) 1694 { 1695 struct ib_device *device; 1696 unsigned long index; 1697 int ret; 1698 1699 ret = assign_client_id(client); 1700 if (ret) 1701 return ret; 1702 1703 down_read(&devices_rwsem); 1704 xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) { 1705 ret = add_client_context(device, client); 1706 if (ret) { 1707 up_read(&devices_rwsem); 1708 ib_unregister_client(client); 1709 return ret; 1710 } 1711 } 1712 up_read(&devices_rwsem); 1713 return 0; 1714 } 1715 EXPORT_SYMBOL(ib_register_client); 1716 1717 /** 1718 * ib_unregister_client - Unregister an IB client 1719 * @client:Client to unregister 1720 * 1721 * Upper level users use ib_unregister_client() to remove their client 1722 * registration. When ib_unregister_client() is called, the client 1723 * will receive a remove callback for each IB device still registered. 1724 * 1725 * This is a full fence, once it returns no client callbacks will be called, 1726 * or are running in another thread. 1727 */ 1728 void ib_unregister_client(struct ib_client *client) 1729 { 1730 struct ib_device *device; 1731 unsigned long index; 1732 1733 down_write(&clients_rwsem); 1734 xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); 1735 up_write(&clients_rwsem); 1736 /* 1737 * Every device still known must be serialized to make sure we are 1738 * done with the client callbacks before we return. 1739 */ 1740 down_read(&devices_rwsem); 1741 xa_for_each (&devices, index, device) 1742 remove_client_context(device, client->client_id); 1743 up_read(&devices_rwsem); 1744 1745 down_write(&clients_rwsem); 1746 list_del(&client->list); 1747 xa_erase(&clients, client->client_id); 1748 up_write(&clients_rwsem); 1749 } 1750 EXPORT_SYMBOL(ib_unregister_client); 1751 1752 static int __ib_get_global_client_nl_info(const char *client_name, 1753 struct ib_client_nl_info *res) 1754 { 1755 struct ib_client *client; 1756 unsigned long index; 1757 int ret = -ENOENT; 1758 1759 down_read(&clients_rwsem); 1760 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { 1761 if (strcmp(client->name, client_name) != 0) 1762 continue; 1763 if (!client->get_global_nl_info) { 1764 ret = -EOPNOTSUPP; 1765 break; 1766 } 1767 ret = client->get_global_nl_info(res); 1768 if (WARN_ON(ret == -ENOENT)) 1769 ret = -EINVAL; 1770 if (!ret && res->cdev) 1771 get_device(res->cdev); 1772 break; 1773 } 1774 up_read(&clients_rwsem); 1775 return ret; 1776 } 1777 1778 static int __ib_get_client_nl_info(struct ib_device *ibdev, 1779 const char *client_name, 1780 struct ib_client_nl_info *res) 1781 { 1782 unsigned long index; 1783 void *client_data; 1784 int ret = -ENOENT; 1785 1786 down_read(&ibdev->client_data_rwsem); 1787 xan_for_each_marked (&ibdev->client_data, index, client_data, 1788 CLIENT_DATA_REGISTERED) { 1789 struct ib_client *client = xa_load(&clients, index); 1790 1791 if (!client || strcmp(client->name, client_name) != 0) 1792 continue; 1793 if (!client->get_nl_info) { 1794 ret = -EOPNOTSUPP; 1795 break; 1796 } 1797 ret = client->get_nl_info(ibdev, client_data, res); 1798 if (WARN_ON(ret == -ENOENT)) 1799 ret = -EINVAL; 1800 1801 /* 1802 * The cdev is guaranteed valid as long as we are inside the 1803 * client_data_rwsem as remove_one can't be called. Keep it 1804 * valid for the caller. 1805 */ 1806 if (!ret && res->cdev) 1807 get_device(res->cdev); 1808 break; 1809 } 1810 up_read(&ibdev->client_data_rwsem); 1811 1812 return ret; 1813 } 1814 1815 /** 1816 * ib_get_client_nl_info - Fetch the nl_info from a client 1817 * @device - IB device 1818 * @client_name - Name of the client 1819 * @res - Result of the query 1820 */ 1821 int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, 1822 struct ib_client_nl_info *res) 1823 { 1824 int ret; 1825 1826 if (ibdev) 1827 ret = __ib_get_client_nl_info(ibdev, client_name, res); 1828 else 1829 ret = __ib_get_global_client_nl_info(client_name, res); 1830 #ifdef CONFIG_MODULES 1831 if (ret == -ENOENT) { 1832 request_module("rdma-client-%s", client_name); 1833 if (ibdev) 1834 ret = __ib_get_client_nl_info(ibdev, client_name, res); 1835 else 1836 ret = __ib_get_global_client_nl_info(client_name, res); 1837 } 1838 #endif 1839 if (ret) { 1840 if (ret == -ENOENT) 1841 return -EOPNOTSUPP; 1842 return ret; 1843 } 1844 1845 if (WARN_ON(!res->cdev)) 1846 return -EINVAL; 1847 return 0; 1848 } 1849 1850 /** 1851 * ib_set_client_data - Set IB client context 1852 * @device:Device to set context for 1853 * @client:Client to set context for 1854 * @data:Context to set 1855 * 1856 * ib_set_client_data() sets client context data that can be retrieved with 1857 * ib_get_client_data(). This can only be called while the client is 1858 * registered to the device, once the ib_client remove() callback returns this 1859 * cannot be called. 1860 */ 1861 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 1862 void *data) 1863 { 1864 void *rc; 1865 1866 if (WARN_ON(IS_ERR(data))) 1867 data = NULL; 1868 1869 rc = xa_store(&device->client_data, client->client_id, data, 1870 GFP_KERNEL); 1871 WARN_ON(xa_is_err(rc)); 1872 } 1873 EXPORT_SYMBOL(ib_set_client_data); 1874 1875 /** 1876 * ib_register_event_handler - Register an IB event handler 1877 * @event_handler:Handler to register 1878 * 1879 * ib_register_event_handler() registers an event handler that will be 1880 * called back when asynchronous IB events occur (as defined in 1881 * chapter 11 of the InfiniBand Architecture Specification). This 1882 * callback may occur in interrupt context. 1883 */ 1884 void ib_register_event_handler(struct ib_event_handler *event_handler) 1885 { 1886 unsigned long flags; 1887 1888 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 1889 list_add_tail(&event_handler->list, 1890 &event_handler->device->event_handler_list); 1891 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 1892 } 1893 EXPORT_SYMBOL(ib_register_event_handler); 1894 1895 /** 1896 * ib_unregister_event_handler - Unregister an event handler 1897 * @event_handler:Handler to unregister 1898 * 1899 * Unregister an event handler registered with 1900 * ib_register_event_handler(). 1901 */ 1902 void ib_unregister_event_handler(struct ib_event_handler *event_handler) 1903 { 1904 unsigned long flags; 1905 1906 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 1907 list_del(&event_handler->list); 1908 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 1909 } 1910 EXPORT_SYMBOL(ib_unregister_event_handler); 1911 1912 /** 1913 * ib_dispatch_event - Dispatch an asynchronous event 1914 * @event:Event to dispatch 1915 * 1916 * Low-level drivers must call ib_dispatch_event() to dispatch the 1917 * event to all registered event handlers when an asynchronous event 1918 * occurs. 1919 */ 1920 void ib_dispatch_event(struct ib_event *event) 1921 { 1922 unsigned long flags; 1923 struct ib_event_handler *handler; 1924 1925 spin_lock_irqsave(&event->device->event_handler_lock, flags); 1926 1927 list_for_each_entry(handler, &event->device->event_handler_list, list) 1928 handler->handler(handler, event); 1929 1930 spin_unlock_irqrestore(&event->device->event_handler_lock, flags); 1931 } 1932 EXPORT_SYMBOL(ib_dispatch_event); 1933 1934 /** 1935 * ib_query_port - Query IB port attributes 1936 * @device:Device to query 1937 * @port_num:Port number to query 1938 * @port_attr:Port attributes 1939 * 1940 * ib_query_port() returns the attributes of a port through the 1941 * @port_attr pointer. 1942 */ 1943 int ib_query_port(struct ib_device *device, 1944 u8 port_num, 1945 struct ib_port_attr *port_attr) 1946 { 1947 union ib_gid gid; 1948 int err; 1949 1950 if (!rdma_is_port_valid(device, port_num)) 1951 return -EINVAL; 1952 1953 memset(port_attr, 0, sizeof(*port_attr)); 1954 err = device->ops.query_port(device, port_num, port_attr); 1955 if (err || port_attr->subnet_prefix) 1956 return err; 1957 1958 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) 1959 return 0; 1960 1961 err = device->ops.query_gid(device, port_num, 0, &gid); 1962 if (err) 1963 return err; 1964 1965 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); 1966 return 0; 1967 } 1968 EXPORT_SYMBOL(ib_query_port); 1969 1970 static void add_ndev_hash(struct ib_port_data *pdata) 1971 { 1972 unsigned long flags; 1973 1974 might_sleep(); 1975 1976 spin_lock_irqsave(&ndev_hash_lock, flags); 1977 if (hash_hashed(&pdata->ndev_hash_link)) { 1978 hash_del_rcu(&pdata->ndev_hash_link); 1979 spin_unlock_irqrestore(&ndev_hash_lock, flags); 1980 /* 1981 * We cannot do hash_add_rcu after a hash_del_rcu until the 1982 * grace period 1983 */ 1984 synchronize_rcu(); 1985 spin_lock_irqsave(&ndev_hash_lock, flags); 1986 } 1987 if (pdata->netdev) 1988 hash_add_rcu(ndev_hash, &pdata->ndev_hash_link, 1989 (uintptr_t)pdata->netdev); 1990 spin_unlock_irqrestore(&ndev_hash_lock, flags); 1991 } 1992 1993 /** 1994 * ib_device_set_netdev - Associate the ib_dev with an underlying net_device 1995 * @ib_dev: Device to modify 1996 * @ndev: net_device to affiliate, may be NULL 1997 * @port: IB port the net_device is connected to 1998 * 1999 * Drivers should use this to link the ib_device to a netdev so the netdev 2000 * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be 2001 * affiliated with any port. 2002 * 2003 * The caller must ensure that the given ndev is not unregistered or 2004 * unregistering, and that either the ib_device is unregistered or 2005 * ib_device_set_netdev() is called with NULL when the ndev sends a 2006 * NETDEV_UNREGISTER event. 2007 */ 2008 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 2009 unsigned int port) 2010 { 2011 struct net_device *old_ndev; 2012 struct ib_port_data *pdata; 2013 unsigned long flags; 2014 int ret; 2015 2016 /* 2017 * Drivers wish to call this before ib_register_driver, so we have to 2018 * setup the port data early. 2019 */ 2020 ret = alloc_port_data(ib_dev); 2021 if (ret) 2022 return ret; 2023 2024 if (!rdma_is_port_valid(ib_dev, port)) 2025 return -EINVAL; 2026 2027 pdata = &ib_dev->port_data[port]; 2028 spin_lock_irqsave(&pdata->netdev_lock, flags); 2029 old_ndev = rcu_dereference_protected( 2030 pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); 2031 if (old_ndev == ndev) { 2032 spin_unlock_irqrestore(&pdata->netdev_lock, flags); 2033 return 0; 2034 } 2035 2036 if (ndev) 2037 dev_hold(ndev); 2038 rcu_assign_pointer(pdata->netdev, ndev); 2039 spin_unlock_irqrestore(&pdata->netdev_lock, flags); 2040 2041 add_ndev_hash(pdata); 2042 if (old_ndev) 2043 dev_put(old_ndev); 2044 2045 return 0; 2046 } 2047 EXPORT_SYMBOL(ib_device_set_netdev); 2048 2049 static void free_netdevs(struct ib_device *ib_dev) 2050 { 2051 unsigned long flags; 2052 unsigned int port; 2053 2054 if (!ib_dev->port_data) 2055 return; 2056 2057 rdma_for_each_port (ib_dev, port) { 2058 struct ib_port_data *pdata = &ib_dev->port_data[port]; 2059 struct net_device *ndev; 2060 2061 spin_lock_irqsave(&pdata->netdev_lock, flags); 2062 ndev = rcu_dereference_protected( 2063 pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); 2064 if (ndev) { 2065 spin_lock(&ndev_hash_lock); 2066 hash_del_rcu(&pdata->ndev_hash_link); 2067 spin_unlock(&ndev_hash_lock); 2068 2069 /* 2070 * If this is the last dev_put there is still a 2071 * synchronize_rcu before the netdev is kfreed, so we 2072 * can continue to rely on unlocked pointer 2073 * comparisons after the put 2074 */ 2075 rcu_assign_pointer(pdata->netdev, NULL); 2076 dev_put(ndev); 2077 } 2078 spin_unlock_irqrestore(&pdata->netdev_lock, flags); 2079 } 2080 } 2081 2082 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, 2083 unsigned int port) 2084 { 2085 struct ib_port_data *pdata; 2086 struct net_device *res; 2087 2088 if (!rdma_is_port_valid(ib_dev, port)) 2089 return NULL; 2090 2091 pdata = &ib_dev->port_data[port]; 2092 2093 /* 2094 * New drivers should use ib_device_set_netdev() not the legacy 2095 * get_netdev(). 2096 */ 2097 if (ib_dev->ops.get_netdev) 2098 res = ib_dev->ops.get_netdev(ib_dev, port); 2099 else { 2100 spin_lock(&pdata->netdev_lock); 2101 res = rcu_dereference_protected( 2102 pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); 2103 if (res) 2104 dev_hold(res); 2105 spin_unlock(&pdata->netdev_lock); 2106 } 2107 2108 /* 2109 * If we are starting to unregister expedite things by preventing 2110 * propagation of an unregistering netdev. 2111 */ 2112 if (res && res->reg_state != NETREG_REGISTERED) { 2113 dev_put(res); 2114 return NULL; 2115 } 2116 2117 return res; 2118 } 2119 2120 /** 2121 * ib_device_get_by_netdev - Find an IB device associated with a netdev 2122 * @ndev: netdev to locate 2123 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all) 2124 * 2125 * Find and hold an ib_device that is associated with a netdev via 2126 * ib_device_set_netdev(). The caller must call ib_device_put() on the 2127 * returned pointer. 2128 */ 2129 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, 2130 enum rdma_driver_id driver_id) 2131 { 2132 struct ib_device *res = NULL; 2133 struct ib_port_data *cur; 2134 2135 rcu_read_lock(); 2136 hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link, 2137 (uintptr_t)ndev) { 2138 if (rcu_access_pointer(cur->netdev) == ndev && 2139 (driver_id == RDMA_DRIVER_UNKNOWN || 2140 cur->ib_dev->ops.driver_id == driver_id) && 2141 ib_device_try_get(cur->ib_dev)) { 2142 res = cur->ib_dev; 2143 break; 2144 } 2145 } 2146 rcu_read_unlock(); 2147 2148 return res; 2149 } 2150 EXPORT_SYMBOL(ib_device_get_by_netdev); 2151 2152 /** 2153 * ib_enum_roce_netdev - enumerate all RoCE ports 2154 * @ib_dev : IB device we want to query 2155 * @filter: Should we call the callback? 2156 * @filter_cookie: Cookie passed to filter 2157 * @cb: Callback to call for each found RoCE ports 2158 * @cookie: Cookie passed back to the callback 2159 * 2160 * Enumerates all of the physical RoCE ports of ib_dev 2161 * which are related to netdevice and calls callback() on each 2162 * device for which filter() function returns non zero. 2163 */ 2164 void ib_enum_roce_netdev(struct ib_device *ib_dev, 2165 roce_netdev_filter filter, 2166 void *filter_cookie, 2167 roce_netdev_callback cb, 2168 void *cookie) 2169 { 2170 unsigned int port; 2171 2172 rdma_for_each_port (ib_dev, port) 2173 if (rdma_protocol_roce(ib_dev, port)) { 2174 struct net_device *idev = 2175 ib_device_get_netdev(ib_dev, port); 2176 2177 if (filter(ib_dev, port, idev, filter_cookie)) 2178 cb(ib_dev, port, idev, cookie); 2179 2180 if (idev) 2181 dev_put(idev); 2182 } 2183 } 2184 2185 /** 2186 * ib_enum_all_roce_netdevs - enumerate all RoCE devices 2187 * @filter: Should we call the callback? 2188 * @filter_cookie: Cookie passed to filter 2189 * @cb: Callback to call for each found RoCE ports 2190 * @cookie: Cookie passed back to the callback 2191 * 2192 * Enumerates all RoCE devices' physical ports which are related 2193 * to netdevices and calls callback() on each device for which 2194 * filter() function returns non zero. 2195 */ 2196 void ib_enum_all_roce_netdevs(roce_netdev_filter filter, 2197 void *filter_cookie, 2198 roce_netdev_callback cb, 2199 void *cookie) 2200 { 2201 struct ib_device *dev; 2202 unsigned long index; 2203 2204 down_read(&devices_rwsem); 2205 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) 2206 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); 2207 up_read(&devices_rwsem); 2208 } 2209 2210 /** 2211 * ib_enum_all_devs - enumerate all ib_devices 2212 * @cb: Callback to call for each found ib_device 2213 * 2214 * Enumerates all ib_devices and calls callback() on each device. 2215 */ 2216 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, 2217 struct netlink_callback *cb) 2218 { 2219 unsigned long index; 2220 struct ib_device *dev; 2221 unsigned int idx = 0; 2222 int ret = 0; 2223 2224 down_read(&devices_rwsem); 2225 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { 2226 if (!rdma_dev_access_netns(dev, sock_net(skb->sk))) 2227 continue; 2228 2229 ret = nldev_cb(dev, skb, cb, idx); 2230 if (ret) 2231 break; 2232 idx++; 2233 } 2234 up_read(&devices_rwsem); 2235 return ret; 2236 } 2237 2238 /** 2239 * ib_query_pkey - Get P_Key table entry 2240 * @device:Device to query 2241 * @port_num:Port number to query 2242 * @index:P_Key table index to query 2243 * @pkey:Returned P_Key 2244 * 2245 * ib_query_pkey() fetches the specified P_Key table entry. 2246 */ 2247 int ib_query_pkey(struct ib_device *device, 2248 u8 port_num, u16 index, u16 *pkey) 2249 { 2250 if (!rdma_is_port_valid(device, port_num)) 2251 return -EINVAL; 2252 2253 return device->ops.query_pkey(device, port_num, index, pkey); 2254 } 2255 EXPORT_SYMBOL(ib_query_pkey); 2256 2257 /** 2258 * ib_modify_device - Change IB device attributes 2259 * @device:Device to modify 2260 * @device_modify_mask:Mask of attributes to change 2261 * @device_modify:New attribute values 2262 * 2263 * ib_modify_device() changes a device's attributes as specified by 2264 * the @device_modify_mask and @device_modify structure. 2265 */ 2266 int ib_modify_device(struct ib_device *device, 2267 int device_modify_mask, 2268 struct ib_device_modify *device_modify) 2269 { 2270 if (!device->ops.modify_device) 2271 return -ENOSYS; 2272 2273 return device->ops.modify_device(device, device_modify_mask, 2274 device_modify); 2275 } 2276 EXPORT_SYMBOL(ib_modify_device); 2277 2278 /** 2279 * ib_modify_port - Modifies the attributes for the specified port. 2280 * @device: The device to modify. 2281 * @port_num: The number of the port to modify. 2282 * @port_modify_mask: Mask used to specify which attributes of the port 2283 * to change. 2284 * @port_modify: New attribute values for the port. 2285 * 2286 * ib_modify_port() changes a port's attributes as specified by the 2287 * @port_modify_mask and @port_modify structure. 2288 */ 2289 int ib_modify_port(struct ib_device *device, 2290 u8 port_num, int port_modify_mask, 2291 struct ib_port_modify *port_modify) 2292 { 2293 int rc; 2294 2295 if (!rdma_is_port_valid(device, port_num)) 2296 return -EINVAL; 2297 2298 if (device->ops.modify_port) 2299 rc = device->ops.modify_port(device, port_num, 2300 port_modify_mask, 2301 port_modify); 2302 else 2303 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS; 2304 return rc; 2305 } 2306 EXPORT_SYMBOL(ib_modify_port); 2307 2308 /** 2309 * ib_find_gid - Returns the port number and GID table index where 2310 * a specified GID value occurs. Its searches only for IB link layer. 2311 * @device: The device to query. 2312 * @gid: The GID value to search for. 2313 * @port_num: The port number of the device where the GID value was found. 2314 * @index: The index into the GID table where the GID was found. This 2315 * parameter may be NULL. 2316 */ 2317 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2318 u8 *port_num, u16 *index) 2319 { 2320 union ib_gid tmp_gid; 2321 unsigned int port; 2322 int ret, i; 2323 2324 rdma_for_each_port (device, port) { 2325 if (!rdma_protocol_ib(device, port)) 2326 continue; 2327 2328 for (i = 0; i < device->port_data[port].immutable.gid_tbl_len; 2329 ++i) { 2330 ret = rdma_query_gid(device, port, i, &tmp_gid); 2331 if (ret) 2332 return ret; 2333 if (!memcmp(&tmp_gid, gid, sizeof *gid)) { 2334 *port_num = port; 2335 if (index) 2336 *index = i; 2337 return 0; 2338 } 2339 } 2340 } 2341 2342 return -ENOENT; 2343 } 2344 EXPORT_SYMBOL(ib_find_gid); 2345 2346 /** 2347 * ib_find_pkey - Returns the PKey table index where a specified 2348 * PKey value occurs. 2349 * @device: The device to query. 2350 * @port_num: The port number of the device to search for the PKey. 2351 * @pkey: The PKey value to search for. 2352 * @index: The index into the PKey table where the PKey was found. 2353 */ 2354 int ib_find_pkey(struct ib_device *device, 2355 u8 port_num, u16 pkey, u16 *index) 2356 { 2357 int ret, i; 2358 u16 tmp_pkey; 2359 int partial_ix = -1; 2360 2361 for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len; 2362 ++i) { 2363 ret = ib_query_pkey(device, port_num, i, &tmp_pkey); 2364 if (ret) 2365 return ret; 2366 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { 2367 /* if there is full-member pkey take it.*/ 2368 if (tmp_pkey & 0x8000) { 2369 *index = i; 2370 return 0; 2371 } 2372 if (partial_ix < 0) 2373 partial_ix = i; 2374 } 2375 } 2376 2377 /*no full-member, if exists take the limited*/ 2378 if (partial_ix >= 0) { 2379 *index = partial_ix; 2380 return 0; 2381 } 2382 return -ENOENT; 2383 } 2384 EXPORT_SYMBOL(ib_find_pkey); 2385 2386 /** 2387 * ib_get_net_dev_by_params() - Return the appropriate net_dev 2388 * for a received CM request 2389 * @dev: An RDMA device on which the request has been received. 2390 * @port: Port number on the RDMA device. 2391 * @pkey: The Pkey the request came on. 2392 * @gid: A GID that the net_dev uses to communicate. 2393 * @addr: Contains the IP address that the request specified as its 2394 * destination. 2395 * 2396 */ 2397 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, 2398 u8 port, 2399 u16 pkey, 2400 const union ib_gid *gid, 2401 const struct sockaddr *addr) 2402 { 2403 struct net_device *net_dev = NULL; 2404 unsigned long index; 2405 void *client_data; 2406 2407 if (!rdma_protocol_ib(dev, port)) 2408 return NULL; 2409 2410 /* 2411 * Holding the read side guarantees that the client will not become 2412 * unregistered while we are calling get_net_dev_by_params() 2413 */ 2414 down_read(&dev->client_data_rwsem); 2415 xan_for_each_marked (&dev->client_data, index, client_data, 2416 CLIENT_DATA_REGISTERED) { 2417 struct ib_client *client = xa_load(&clients, index); 2418 2419 if (!client || !client->get_net_dev_by_params) 2420 continue; 2421 2422 net_dev = client->get_net_dev_by_params(dev, port, pkey, gid, 2423 addr, client_data); 2424 if (net_dev) 2425 break; 2426 } 2427 up_read(&dev->client_data_rwsem); 2428 2429 return net_dev; 2430 } 2431 EXPORT_SYMBOL(ib_get_net_dev_by_params); 2432 2433 void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) 2434 { 2435 struct ib_device_ops *dev_ops = &dev->ops; 2436 #define SET_DEVICE_OP(ptr, name) \ 2437 do { \ 2438 if (ops->name) \ 2439 if (!((ptr)->name)) \ 2440 (ptr)->name = ops->name; \ 2441 } while (0) 2442 2443 #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name) 2444 2445 if (ops->driver_id != RDMA_DRIVER_UNKNOWN) { 2446 WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN && 2447 dev_ops->driver_id != ops->driver_id); 2448 dev_ops->driver_id = ops->driver_id; 2449 } 2450 if (ops->owner) { 2451 WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner); 2452 dev_ops->owner = ops->owner; 2453 } 2454 if (ops->uverbs_abi_ver) 2455 dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver; 2456 2457 dev_ops->uverbs_no_driver_id_binding |= 2458 ops->uverbs_no_driver_id_binding; 2459 2460 SET_DEVICE_OP(dev_ops, add_gid); 2461 SET_DEVICE_OP(dev_ops, advise_mr); 2462 SET_DEVICE_OP(dev_ops, alloc_dm); 2463 SET_DEVICE_OP(dev_ops, alloc_fmr); 2464 SET_DEVICE_OP(dev_ops, alloc_hw_stats); 2465 SET_DEVICE_OP(dev_ops, alloc_mr); 2466 SET_DEVICE_OP(dev_ops, alloc_mr_integrity); 2467 SET_DEVICE_OP(dev_ops, alloc_mw); 2468 SET_DEVICE_OP(dev_ops, alloc_pd); 2469 SET_DEVICE_OP(dev_ops, alloc_rdma_netdev); 2470 SET_DEVICE_OP(dev_ops, alloc_ucontext); 2471 SET_DEVICE_OP(dev_ops, alloc_xrcd); 2472 SET_DEVICE_OP(dev_ops, attach_mcast); 2473 SET_DEVICE_OP(dev_ops, check_mr_status); 2474 SET_DEVICE_OP(dev_ops, counter_alloc_stats); 2475 SET_DEVICE_OP(dev_ops, counter_bind_qp); 2476 SET_DEVICE_OP(dev_ops, counter_dealloc); 2477 SET_DEVICE_OP(dev_ops, counter_unbind_qp); 2478 SET_DEVICE_OP(dev_ops, counter_update_stats); 2479 SET_DEVICE_OP(dev_ops, create_ah); 2480 SET_DEVICE_OP(dev_ops, create_counters); 2481 SET_DEVICE_OP(dev_ops, create_cq); 2482 SET_DEVICE_OP(dev_ops, create_flow); 2483 SET_DEVICE_OP(dev_ops, create_flow_action_esp); 2484 SET_DEVICE_OP(dev_ops, create_qp); 2485 SET_DEVICE_OP(dev_ops, create_rwq_ind_table); 2486 SET_DEVICE_OP(dev_ops, create_srq); 2487 SET_DEVICE_OP(dev_ops, create_wq); 2488 SET_DEVICE_OP(dev_ops, dealloc_dm); 2489 SET_DEVICE_OP(dev_ops, dealloc_driver); 2490 SET_DEVICE_OP(dev_ops, dealloc_fmr); 2491 SET_DEVICE_OP(dev_ops, dealloc_mw); 2492 SET_DEVICE_OP(dev_ops, dealloc_pd); 2493 SET_DEVICE_OP(dev_ops, dealloc_ucontext); 2494 SET_DEVICE_OP(dev_ops, dealloc_xrcd); 2495 SET_DEVICE_OP(dev_ops, del_gid); 2496 SET_DEVICE_OP(dev_ops, dereg_mr); 2497 SET_DEVICE_OP(dev_ops, destroy_ah); 2498 SET_DEVICE_OP(dev_ops, destroy_counters); 2499 SET_DEVICE_OP(dev_ops, destroy_cq); 2500 SET_DEVICE_OP(dev_ops, destroy_flow); 2501 SET_DEVICE_OP(dev_ops, destroy_flow_action); 2502 SET_DEVICE_OP(dev_ops, destroy_qp); 2503 SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table); 2504 SET_DEVICE_OP(dev_ops, destroy_srq); 2505 SET_DEVICE_OP(dev_ops, destroy_wq); 2506 SET_DEVICE_OP(dev_ops, detach_mcast); 2507 SET_DEVICE_OP(dev_ops, disassociate_ucontext); 2508 SET_DEVICE_OP(dev_ops, drain_rq); 2509 SET_DEVICE_OP(dev_ops, drain_sq); 2510 SET_DEVICE_OP(dev_ops, enable_driver); 2511 SET_DEVICE_OP(dev_ops, fill_res_entry); 2512 SET_DEVICE_OP(dev_ops, get_dev_fw_str); 2513 SET_DEVICE_OP(dev_ops, get_dma_mr); 2514 SET_DEVICE_OP(dev_ops, get_hw_stats); 2515 SET_DEVICE_OP(dev_ops, get_link_layer); 2516 SET_DEVICE_OP(dev_ops, get_netdev); 2517 SET_DEVICE_OP(dev_ops, get_port_immutable); 2518 SET_DEVICE_OP(dev_ops, get_vector_affinity); 2519 SET_DEVICE_OP(dev_ops, get_vf_config); 2520 SET_DEVICE_OP(dev_ops, get_vf_stats); 2521 SET_DEVICE_OP(dev_ops, init_port); 2522 SET_DEVICE_OP(dev_ops, iw_accept); 2523 SET_DEVICE_OP(dev_ops, iw_add_ref); 2524 SET_DEVICE_OP(dev_ops, iw_connect); 2525 SET_DEVICE_OP(dev_ops, iw_create_listen); 2526 SET_DEVICE_OP(dev_ops, iw_destroy_listen); 2527 SET_DEVICE_OP(dev_ops, iw_get_qp); 2528 SET_DEVICE_OP(dev_ops, iw_reject); 2529 SET_DEVICE_OP(dev_ops, iw_rem_ref); 2530 SET_DEVICE_OP(dev_ops, map_mr_sg); 2531 SET_DEVICE_OP(dev_ops, map_mr_sg_pi); 2532 SET_DEVICE_OP(dev_ops, map_phys_fmr); 2533 SET_DEVICE_OP(dev_ops, mmap); 2534 SET_DEVICE_OP(dev_ops, modify_ah); 2535 SET_DEVICE_OP(dev_ops, modify_cq); 2536 SET_DEVICE_OP(dev_ops, modify_device); 2537 SET_DEVICE_OP(dev_ops, modify_flow_action_esp); 2538 SET_DEVICE_OP(dev_ops, modify_port); 2539 SET_DEVICE_OP(dev_ops, modify_qp); 2540 SET_DEVICE_OP(dev_ops, modify_srq); 2541 SET_DEVICE_OP(dev_ops, modify_wq); 2542 SET_DEVICE_OP(dev_ops, peek_cq); 2543 SET_DEVICE_OP(dev_ops, poll_cq); 2544 SET_DEVICE_OP(dev_ops, post_recv); 2545 SET_DEVICE_OP(dev_ops, post_send); 2546 SET_DEVICE_OP(dev_ops, post_srq_recv); 2547 SET_DEVICE_OP(dev_ops, process_mad); 2548 SET_DEVICE_OP(dev_ops, query_ah); 2549 SET_DEVICE_OP(dev_ops, query_device); 2550 SET_DEVICE_OP(dev_ops, query_gid); 2551 SET_DEVICE_OP(dev_ops, query_pkey); 2552 SET_DEVICE_OP(dev_ops, query_port); 2553 SET_DEVICE_OP(dev_ops, query_qp); 2554 SET_DEVICE_OP(dev_ops, query_srq); 2555 SET_DEVICE_OP(dev_ops, rdma_netdev_get_params); 2556 SET_DEVICE_OP(dev_ops, read_counters); 2557 SET_DEVICE_OP(dev_ops, reg_dm_mr); 2558 SET_DEVICE_OP(dev_ops, reg_user_mr); 2559 SET_DEVICE_OP(dev_ops, req_ncomp_notif); 2560 SET_DEVICE_OP(dev_ops, req_notify_cq); 2561 SET_DEVICE_OP(dev_ops, rereg_user_mr); 2562 SET_DEVICE_OP(dev_ops, resize_cq); 2563 SET_DEVICE_OP(dev_ops, set_vf_guid); 2564 SET_DEVICE_OP(dev_ops, set_vf_link_state); 2565 SET_DEVICE_OP(dev_ops, unmap_fmr); 2566 2567 SET_OBJ_SIZE(dev_ops, ib_ah); 2568 SET_OBJ_SIZE(dev_ops, ib_cq); 2569 SET_OBJ_SIZE(dev_ops, ib_pd); 2570 SET_OBJ_SIZE(dev_ops, ib_srq); 2571 SET_OBJ_SIZE(dev_ops, ib_ucontext); 2572 } 2573 EXPORT_SYMBOL(ib_set_device_ops); 2574 2575 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { 2576 [RDMA_NL_LS_OP_RESOLVE] = { 2577 .doit = ib_nl_handle_resolve_resp, 2578 .flags = RDMA_NL_ADMIN_PERM, 2579 }, 2580 [RDMA_NL_LS_OP_SET_TIMEOUT] = { 2581 .doit = ib_nl_handle_set_timeout, 2582 .flags = RDMA_NL_ADMIN_PERM, 2583 }, 2584 [RDMA_NL_LS_OP_IP_RESOLVE] = { 2585 .doit = ib_nl_handle_ip_res_resp, 2586 .flags = RDMA_NL_ADMIN_PERM, 2587 }, 2588 }; 2589 2590 static int __init ib_core_init(void) 2591 { 2592 int ret; 2593 2594 ib_wq = alloc_workqueue("infiniband", 0, 0); 2595 if (!ib_wq) 2596 return -ENOMEM; 2597 2598 ib_comp_wq = alloc_workqueue("ib-comp-wq", 2599 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 2600 if (!ib_comp_wq) { 2601 ret = -ENOMEM; 2602 goto err; 2603 } 2604 2605 ib_comp_unbound_wq = 2606 alloc_workqueue("ib-comp-unb-wq", 2607 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM | 2608 WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE); 2609 if (!ib_comp_unbound_wq) { 2610 ret = -ENOMEM; 2611 goto err_comp; 2612 } 2613 2614 ret = class_register(&ib_class); 2615 if (ret) { 2616 pr_warn("Couldn't create InfiniBand device class\n"); 2617 goto err_comp_unbound; 2618 } 2619 2620 ret = rdma_nl_init(); 2621 if (ret) { 2622 pr_warn("Couldn't init IB netlink interface: err %d\n", ret); 2623 goto err_sysfs; 2624 } 2625 2626 ret = addr_init(); 2627 if (ret) { 2628 pr_warn("Could't init IB address resolution\n"); 2629 goto err_ibnl; 2630 } 2631 2632 ret = ib_mad_init(); 2633 if (ret) { 2634 pr_warn("Couldn't init IB MAD\n"); 2635 goto err_addr; 2636 } 2637 2638 ret = ib_sa_init(); 2639 if (ret) { 2640 pr_warn("Couldn't init SA\n"); 2641 goto err_mad; 2642 } 2643 2644 ret = register_lsm_notifier(&ibdev_lsm_nb); 2645 if (ret) { 2646 pr_warn("Couldn't register LSM notifier. ret %d\n", ret); 2647 goto err_sa; 2648 } 2649 2650 ret = register_pernet_device(&rdma_dev_net_ops); 2651 if (ret) { 2652 pr_warn("Couldn't init compat dev. ret %d\n", ret); 2653 goto err_compat; 2654 } 2655 2656 nldev_init(); 2657 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); 2658 roce_gid_mgmt_init(); 2659 2660 return 0; 2661 2662 err_compat: 2663 unregister_lsm_notifier(&ibdev_lsm_nb); 2664 err_sa: 2665 ib_sa_cleanup(); 2666 err_mad: 2667 ib_mad_cleanup(); 2668 err_addr: 2669 addr_cleanup(); 2670 err_ibnl: 2671 rdma_nl_exit(); 2672 err_sysfs: 2673 class_unregister(&ib_class); 2674 err_comp_unbound: 2675 destroy_workqueue(ib_comp_unbound_wq); 2676 err_comp: 2677 destroy_workqueue(ib_comp_wq); 2678 err: 2679 destroy_workqueue(ib_wq); 2680 return ret; 2681 } 2682 2683 static void __exit ib_core_cleanup(void) 2684 { 2685 roce_gid_mgmt_cleanup(); 2686 nldev_exit(); 2687 rdma_nl_unregister(RDMA_NL_LS); 2688 unregister_pernet_device(&rdma_dev_net_ops); 2689 unregister_lsm_notifier(&ibdev_lsm_nb); 2690 ib_sa_cleanup(); 2691 ib_mad_cleanup(); 2692 addr_cleanup(); 2693 rdma_nl_exit(); 2694 class_unregister(&ib_class); 2695 destroy_workqueue(ib_comp_unbound_wq); 2696 destroy_workqueue(ib_comp_wq); 2697 /* Make sure that any pending umem accounting work is done. */ 2698 destroy_workqueue(ib_wq); 2699 flush_workqueue(system_unbound_wq); 2700 WARN_ON(!xa_empty(&clients)); 2701 WARN_ON(!xa_empty(&devices)); 2702 } 2703 2704 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); 2705 2706 /* ib core relies on netdev stack to first register net_ns_type_operations 2707 * ns kobject type before ib_core initialization. 2708 */ 2709 fs_initcall(ib_core_init); 2710 module_exit(ib_core_cleanup); 2711