Lines Matching full:device

81  * The devices_rwsem also protects the device name list, any change or
82 * assignment of device name must also hold the write side to guarantee unique
89 * status need to call ib_device_try_get() on the device to ensure it is
130 "Share device among net namespaces; default=1 (shared)");
132 * rdma_dev_access_netns() - Return whether an rdma device can be accessed
134 * @dev: Pointer to rdma device which needs to be checked
137 * When the rdma device is in shared mode, it ignores the net namespace.
138 * When the rdma device is exclusive to a net namespace, rdma device net
188 static void __ib_unregister_device(struct ib_device *device);
241 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
250 static void ib_device_check_mandatory(struct ib_device *device) in ib_device_check_mandatory() argument
277 device->kverbs_provider = true; in ib_device_check_mandatory()
279 if (!*(void **) ((void *) &device->ops + in ib_device_check_mandatory()
281 device->kverbs_provider = false; in ib_device_check_mandatory()
288 * Caller must perform ib_device_put() to return the device reference count
289 * when ib_device_get_by_index() returns valid device pointer.
293 struct ib_device *device; in ib_device_get_by_index() local
296 device = xa_load(&devices, index); in ib_device_get_by_index()
297 if (device) { in ib_device_get_by_index()
298 if (!rdma_dev_access_netns(device, net)) { in ib_device_get_by_index()
299 device = NULL; in ib_device_get_by_index()
303 if (!ib_device_try_get(device)) in ib_device_get_by_index()
304 device = NULL; in ib_device_get_by_index()
308 return device; in ib_device_get_by_index()
312 * ib_device_put - Release IB device reference
313 * @device: device whose reference to be released
315 * ib_device_put() releases reference to the IB device to allow it to be
318 void ib_device_put(struct ib_device *device) in ib_device_put() argument
320 if (refcount_dec_and_test(&device->refcount)) in ib_device_put()
321 complete(&device->unreg_completion); in ib_device_put()
327 struct ib_device *device; in __ib_device_get_by_name() local
330 xa_for_each (&devices, index, device) in __ib_device_get_by_name()
331 if (!strcmp(name, dev_name(&device->dev))) in __ib_device_get_by_name()
332 return device; in __ib_device_get_by_name()
338 * ib_device_get_by_name - Find an IB device by name
348 struct ib_device *device; in ib_device_get_by_name() local
351 device = __ib_device_get_by_name(name); in ib_device_get_by_name()
352 if (device && driver_id != RDMA_DRIVER_UNKNOWN && in ib_device_get_by_name()
353 device->ops.driver_id != driver_id) in ib_device_get_by_name()
354 device = NULL; in ib_device_get_by_name()
356 if (device) { in ib_device_get_by_name()
357 if (!ib_device_try_get(device)) in ib_device_get_by_name()
358 device = NULL; in ib_device_get_by_name()
361 return device; in ib_device_get_by_name()
365 static int rename_compat_devs(struct ib_device *device) in rename_compat_devs() argument
371 mutex_lock(&device->compat_devs_mutex); in rename_compat_devs()
372 xa_for_each (&device->compat_devs, index, cdev) { in rename_compat_devs()
373 ret = device_rename(&cdev->dev, dev_name(&device->dev)); in rename_compat_devs()
377 dev_name(&device->dev)); in rename_compat_devs()
381 mutex_unlock(&device->compat_devs_mutex); in rename_compat_devs()
439 struct ib_device *device; in alloc_name() local
447 xa_for_each (&devices, index, device) { in alloc_name()
450 if (sscanf(dev_name(&device->dev), name, &i) != 1) in alloc_name()
455 if (strcmp(buf, dev_name(&device->dev)) != 0) in alloc_name()
473 static void ib_device_release(struct device *device) in ib_device_release() argument
475 struct ib_device *dev = container_of(device, struct ib_device, dev); in ib_device_release()
499 static int ib_device_uevent(const struct device *device, in ib_device_uevent() argument
502 if (add_uevent_var(env, "NAME=%s", dev_name(device))) in ib_device_uevent()
512 static const void *net_namespace(const struct device *d) in net_namespace()
534 * of union of ib_core_device and device. in rdma_init_coredev()
537 * device will break this assumption. in rdma_init_coredev()
558 * _ib_alloc_device - allocate an IB device struct
569 struct ib_device *device; in _ib_alloc_device() local
575 device = kzalloc(size, GFP_KERNEL); in _ib_alloc_device()
576 if (!device) in _ib_alloc_device()
579 if (rdma_restrack_init(device)) { in _ib_alloc_device()
580 kfree(device); in _ib_alloc_device()
584 rdma_init_coredev(&device->coredev, device, &init_net); in _ib_alloc_device()
586 INIT_LIST_HEAD(&device->event_handler_list); in _ib_alloc_device()
587 spin_lock_init(&device->qp_open_list_lock); in _ib_alloc_device()
588 init_rwsem(&device->event_handler_rwsem); in _ib_alloc_device()
589 mutex_init(&device->unregistration_lock); in _ib_alloc_device()
594 xa_init_flags(&device->client_data, XA_FLAGS_ALLOC); in _ib_alloc_device()
595 init_rwsem(&device->client_data_rwsem); in _ib_alloc_device()
596 xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC); in _ib_alloc_device()
597 mutex_init(&device->compat_devs_mutex); in _ib_alloc_device()
598 init_completion(&device->unreg_completion); in _ib_alloc_device()
599 INIT_WORK(&device->unregistration_work, ib_unregister_work); in _ib_alloc_device()
601 spin_lock_init(&device->cq_pools_lock); in _ib_alloc_device()
602 for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++) in _ib_alloc_device()
603 INIT_LIST_HEAD(&device->cq_pools[i]); in _ib_alloc_device()
605 rwlock_init(&device->cache_lock); in _ib_alloc_device()
607 device->uverbs_cmd_mask = in _ib_alloc_device()
639 mutex_init(&device->subdev_lock); in _ib_alloc_device()
640 INIT_LIST_HEAD(&device->subdev_list_head); in _ib_alloc_device()
641 INIT_LIST_HEAD(&device->subdev_list); in _ib_alloc_device()
643 return device; in _ib_alloc_device()
648 * ib_dealloc_device - free an IB device struct
649 * @device:structure to free
653 void ib_dealloc_device(struct ib_device *device) in ib_dealloc_device() argument
655 if (device->ops.dealloc_driver) in ib_dealloc_device()
656 device->ops.dealloc_driver(device); in ib_dealloc_device()
665 if (xa_load(&devices, device->index) == device) in ib_dealloc_device()
666 xa_erase(&devices, device->index); in ib_dealloc_device()
670 free_netdevs(device); in ib_dealloc_device()
672 WARN_ON(!xa_empty(&device->compat_devs)); in ib_dealloc_device()
673 WARN_ON(!xa_empty(&device->client_data)); in ib_dealloc_device()
674 WARN_ON(refcount_read(&device->refcount)); in ib_dealloc_device()
675 rdma_restrack_clean(device); in ib_dealloc_device()
677 put_device(&device->dev); in ib_dealloc_device()
683 * parallel calls on the same device - registration/unregistration of both the
684 * device and client can be occurring in parallel.
689 static int add_client_context(struct ib_device *device, in add_client_context() argument
694 if (!device->kverbs_provider && !client->no_kverbs_req) in add_client_context()
697 down_write(&device->client_data_rwsem); in add_client_context()
699 * So long as the client is registered hold both the client and device in add_client_context()
704 refcount_inc(&device->refcount); in add_client_context()
710 if (xa_get_mark(&device->client_data, client->client_id, in add_client_context()
714 ret = xa_err(xa_store(&device->client_data, client->client_id, NULL, in add_client_context()
718 downgrade_write(&device->client_data_rwsem); in add_client_context()
720 if (client->add(device)) { in add_client_context()
726 xa_erase(&device->client_data, client->client_id); in add_client_context()
727 up_read(&device->client_data_rwsem); in add_client_context()
728 ib_device_put(device); in add_client_context()
735 xa_set_mark(&device->client_data, client->client_id, in add_client_context()
737 up_read(&device->client_data_rwsem); in add_client_context()
741 ib_device_put(device); in add_client_context()
744 up_write(&device->client_data_rwsem); in add_client_context()
748 static void remove_client_context(struct ib_device *device, in remove_client_context() argument
754 down_write(&device->client_data_rwsem); in remove_client_context()
755 if (!xa_get_mark(&device->client_data, client_id, in remove_client_context()
757 up_write(&device->client_data_rwsem); in remove_client_context()
760 client_data = xa_load(&device->client_data, client_id); in remove_client_context()
761 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); in remove_client_context()
763 up_write(&device->client_data_rwsem); in remove_client_context()
775 client->remove(device, client_data); in remove_client_context()
777 xa_erase(&device->client_data, client_id); in remove_client_context()
778 ib_device_put(device); in remove_client_context()
782 static int alloc_port_data(struct ib_device *device) in alloc_port_data() argument
787 if (device->port_data) in alloc_port_data()
791 if (WARN_ON(!device->phys_port_cnt)) in alloc_port_data()
795 if (WARN_ON(device->phys_port_cnt == U32_MAX)) in alloc_port_data()
799 * device->port_data is indexed directly by the port number to make in alloc_port_data()
806 size_add(rdma_end_port(device), 1)), in alloc_port_data()
815 device->port_data = pdata_rcu->pdata; in alloc_port_data()
817 rdma_for_each_port (device, port) { in alloc_port_data()
818 struct ib_port_data *pdata = &device->port_data[port]; in alloc_port_data()
820 pdata->ib_dev = device; in alloc_port_data()
835 static int setup_port_data(struct ib_device *device) in setup_port_data() argument
840 ret = alloc_port_data(device); in setup_port_data()
844 rdma_for_each_port (device, port) { in setup_port_data()
845 struct ib_port_data *pdata = &device->port_data[port]; in setup_port_data()
847 ret = device->ops.get_port_immutable(device, port, in setup_port_data()
852 if (verify_immutable(device, port)) in setup_port_data()
860 * @dev: IB device
911 static void compatdev_release(struct device *dev) in compatdev_release()
919 static int add_one_compat_dev(struct ib_device *device, in add_one_compat_dev() argument
930 * Create and add compat device in all namespaces other than where it in add_one_compat_dev()
934 read_pnet(&device->coredev.rdma_net))) in add_one_compat_dev()
939 * compat_devs_mutex wins and gets to add the device. Others will wait in add_one_compat_dev()
942 mutex_lock(&device->compat_devs_mutex); in add_one_compat_dev()
943 cdev = xa_load(&device->compat_devs, rnet->id); in add_one_compat_dev()
948 ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL); in add_one_compat_dev()
958 cdev->dev.parent = device->dev.parent; in add_one_compat_dev()
959 rdma_init_coredev(cdev, device, read_pnet(&rnet->net)); in add_one_compat_dev()
961 ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev)); in add_one_compat_dev()
972 ret = xa_err(xa_store(&device->compat_devs, rnet->id, in add_one_compat_dev()
977 mutex_unlock(&device->compat_devs_mutex); in add_one_compat_dev()
987 xa_release(&device->compat_devs, rnet->id); in add_one_compat_dev()
989 mutex_unlock(&device->compat_devs_mutex); in add_one_compat_dev()
993 static void remove_one_compat_dev(struct ib_device *device, u32 id) in remove_one_compat_dev() argument
997 mutex_lock(&device->compat_devs_mutex); in remove_one_compat_dev()
998 cdev = xa_erase(&device->compat_devs, id); in remove_one_compat_dev()
999 mutex_unlock(&device->compat_devs_mutex); in remove_one_compat_dev()
1007 static void remove_compat_devs(struct ib_device *device) in remove_compat_devs() argument
1012 xa_for_each (&device->compat_devs, index, cdev) in remove_compat_devs()
1013 remove_one_compat_dev(device, index); in remove_compat_devs()
1016 static int add_compat_devs(struct ib_device *device) in add_compat_devs() argument
1026 ret = add_one_compat_dev(device, rnet); in add_compat_devs()
1142 * If the real device is in the NS then move it back to init. in rdma_dev_exit_net()
1181 * system configuration for device sharing mode. in rdma_dev_init_net()
1198 * Assign the unique string device name and the unique device index. This is
1201 static int assign_name(struct ib_device *device, const char *name) in assign_name() argument
1207 /* Assign a unique name to the device */ in assign_name()
1209 ret = alloc_name(device, name); in assign_name()
1211 ret = dev_set_name(&device->dev, name); in assign_name()
1215 if (__ib_device_get_by_name(dev_name(&device->dev))) { in assign_name()
1219 strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX); in assign_name()
1221 ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b, in assign_name()
1233 * device ops, this is the only reason these actions are not done during
1236 static int setup_device(struct ib_device *device) in setup_device() argument
1241 ib_device_check_mandatory(device); in setup_device()
1243 ret = setup_port_data(device); in setup_device()
1245 dev_warn(&device->dev, "Couldn't create per-port data\n"); in setup_device()
1249 memset(&device->attrs, 0, sizeof(device->attrs)); in setup_device()
1250 ret = device->ops.query_device(device, &device->attrs, &uhw); in setup_device()
1252 dev_warn(&device->dev, in setup_device()
1253 "Couldn't query the device attributes\n"); in setup_device()
1260 static void disable_device(struct ib_device *device) in disable_device() argument
1264 WARN_ON(!refcount_read(&device->refcount)); in disable_device()
1267 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); in disable_device()
1281 remove_client_context(device, cid); in disable_device()
1284 ib_cq_pool_cleanup(device); in disable_device()
1287 ib_device_put(device); in disable_device()
1288 wait_for_completion(&device->unreg_completion); in disable_device()
1291 * compat devices must be removed after device refcount drops to zero. in disable_device()
1293 * devices and before device is disabled. in disable_device()
1295 remove_compat_devs(device); in disable_device()
1299 * An enabled device is visible to all clients and to all the public facing
1300 * APIs that return a device pointer. This always returns with a new get, even
1303 static int enable_device_and_get(struct ib_device *device) in enable_device_and_get() argument
1313 refcount_set(&device->refcount, 2); in enable_device_and_get()
1315 xa_set_mark(&devices, device->index, DEVICE_REGISTERED); in enable_device_and_get()
1323 if (device->ops.enable_driver) { in enable_device_and_get()
1324 ret = device->ops.enable_driver(device); in enable_device_and_get()
1331 ret = add_client_context(device, client); in enable_device_and_get()
1337 ret = add_compat_devs(device); in enable_device_and_get()
1347 static void ib_device_notify_register(struct ib_device *device) in ib_device_notify_register() argument
1355 /* Mark for userspace that device is ready */ in ib_device_notify_register()
1356 kobject_uevent(&device->dev.kobj, KOBJ_ADD); in ib_device_notify_register()
1358 ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT); in ib_device_notify_register()
1362 rdma_for_each_port(device, port) { in ib_device_notify_register()
1363 netdev = ib_device_get_netdev(device, port); in ib_device_notify_register()
1367 ret = rdma_nl_notify_event(device, port, in ib_device_notify_register()
1379 * ib_register_device - Register an IB device with IB core
1380 * @device: Device to register
1381 * @name: unique string device name. This may include a '%' which will
1382 * cause a unique index to be added to the passed device name.
1383 * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
1384 * device will be used. In this case the caller should fully
1389 * callback for each device that is added. @device must be allocated
1393 * asynchronously then the device pointer may become freed as soon as this
1396 int ib_register_device(struct ib_device *device, const char *name, in ib_register_device() argument
1397 struct device *dma_device) in ib_register_device()
1401 ret = assign_name(device, name); in ib_register_device()
1406 * If the caller does not provide a DMA capable device then the IB core in ib_register_device()
1411 device->dma_device = dma_device; in ib_register_device()
1413 ret = setup_device(device); in ib_register_device()
1417 ret = ib_cache_setup_one(device); in ib_register_device()
1419 dev_warn(&device->dev, in ib_register_device()
1424 device->groups[0] = &ib_dev_attr_group; in ib_register_device()
1425 device->groups[1] = device->ops.device_group; in ib_register_device()
1426 ret = ib_setup_device_attrs(device); in ib_register_device()
1430 ib_device_register_rdmacg(device); in ib_register_device()
1432 rdma_counter_init(device); in ib_register_device()
1436 * is too early amd device is not initialized yet. in ib_register_device()
1438 dev_set_uevent_suppress(&device->dev, true); in ib_register_device()
1439 ret = device_add(&device->dev); in ib_register_device()
1443 ret = ib_setup_port_attrs(&device->coredev); in ib_register_device()
1445 dev_warn(&device->dev, in ib_register_device()
1446 "Couldn't register device with driver model\n"); in ib_register_device()
1450 ret = enable_device_and_get(device); in ib_register_device()
1456 * automatically dealloc the device since the caller is in ib_register_device()
1465 dealloc_fn = device->ops.dealloc_driver; in ib_register_device()
1466 device->ops.dealloc_driver = prevent_dealloc_device; in ib_register_device()
1467 ib_device_put(device); in ib_register_device()
1468 __ib_unregister_device(device); in ib_register_device()
1469 device->ops.dealloc_driver = dealloc_fn; in ib_register_device()
1470 dev_set_uevent_suppress(&device->dev, false); in ib_register_device()
1473 dev_set_uevent_suppress(&device->dev, false); in ib_register_device()
1475 ib_device_notify_register(device); in ib_register_device()
1477 ib_device_put(device); in ib_register_device()
1482 device_del(&device->dev); in ib_register_device()
1484 dev_set_uevent_suppress(&device->dev, false); in ib_register_device()
1485 ib_device_unregister_rdmacg(device); in ib_register_device()
1487 ib_cache_cleanup_one(device); in ib_register_device()
1492 /* Callers must hold a get on the device. */
1509 * fully fenced, once any unregister returns the device is truely in __ib_unregister_device()
1543 * ib_unregister_device - Unregister an IB device
1544 * @ib_dev: The device to unregister
1546 * Unregister an IB device. All clients will receive a remove callback.
1565 * ib_unregister_device_and_put - Unregister a device while holding a 'get'
1566 * @ib_dev: The device to unregister
1572 * holding the 'get'. When the function returns the device is fully
1576 * their resources associated with the device and dealloc it.
1592 * This implements a fence for device unregistration. It only returns once all
1596 * If device's are not yet unregistered it goes ahead and starts unregistering
1635 * ib_unregister_device_queued - Unregister a device using a work queue
1636 * @ib_dev: The device to unregister
1638 * This schedules an asynchronous unregistration using a WQ for the device. A
1656 * The caller must pass in a device that has the kref held and the refcount
1657 * released. If the device is in cur_net and still registered then it is moved
1660 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net, in rdma_dev_change_netns() argument
1666 mutex_lock(&device->unregistration_lock); in rdma_dev_change_netns()
1669 * If a device not under ib_device_get() or if the unregistration_lock in rdma_dev_change_netns()
1673 if (refcount_read(&device->refcount) == 0 || in rdma_dev_change_netns()
1674 !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) { in rdma_dev_change_netns()
1679 kobject_uevent(&device->dev.kobj, KOBJ_REMOVE); in rdma_dev_change_netns()
1680 disable_device(device); in rdma_dev_change_netns()
1683 * At this point no one can be using the device, so it is safe to in rdma_dev_change_netns()
1686 write_pnet(&device->coredev.rdma_net, net); in rdma_dev_change_netns()
1690 * Currently rdma devices are system wide unique. So the device name in rdma_dev_change_netns()
1694 ret = device_rename(&device->dev, dev_name(&device->dev)); in rdma_dev_change_netns()
1697 dev_warn(&device->dev, in rdma_dev_change_netns()
1698 "%s: Couldn't rename device after namespace change\n", in rdma_dev_change_netns()
1700 /* Try and put things back and re-enable the device */ in rdma_dev_change_netns()
1701 write_pnet(&device->coredev.rdma_net, cur_net); in rdma_dev_change_netns()
1704 ret2 = enable_device_and_get(device); in rdma_dev_change_netns()
1708 * retry at later point. So don't disable the device. in rdma_dev_change_netns()
1710 dev_warn(&device->dev, in rdma_dev_change_netns()
1711 "%s: Couldn't re-enable device after namespace change\n", in rdma_dev_change_netns()
1714 kobject_uevent(&device->dev.kobj, KOBJ_ADD); in rdma_dev_change_netns()
1716 ib_device_put(device); in rdma_dev_change_netns()
1718 mutex_unlock(&device->unregistration_lock); in rdma_dev_change_netns()
1808 * register callbacks for IB device addition and removal. When an IB
1809 * device is added, each registered client's add method will be called
1810 * (in the order the clients were registered), and when a device is
1818 struct ib_device *device; in ib_register_client() local
1837 xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) { in ib_register_client()
1838 ret = add_client_context(device, client); in ib_register_client()
1858 * will receive a remove callback for each IB device still registered.
1865 struct ib_device *device; in ib_unregister_client() local
1875 xa_for_each (&devices, index, device) { in ib_unregister_client()
1876 if (!ib_device_try_get(device)) in ib_unregister_client()
1880 remove_client_context(device, client->client_id); in ib_unregister_client()
1882 ib_device_put(device); in ib_unregister_client()
1961 * @ibdev: IB device
1996 * @device:Device to set context for
2002 * registered to the device, once the ib_client remove() callback returns this
2005 void ib_set_client_data(struct ib_device *device, struct ib_client *client, in ib_set_client_data() argument
2013 rc = xa_store(&device->client_data, client->client_id, data, in ib_set_client_data()
2030 down_write(&event_handler->device->event_handler_rwsem); in ib_register_event_handler()
2032 &event_handler->device->event_handler_list); in ib_register_event_handler()
2033 up_write(&event_handler->device->event_handler_rwsem); in ib_register_event_handler()
2046 down_write(&event_handler->device->event_handler_rwsem); in ib_unregister_event_handler()
2048 up_write(&event_handler->device->event_handler_rwsem); in ib_unregister_event_handler()
2056 down_read(&event->device->event_handler_rwsem); in ib_dispatch_event_clients()
2058 list_for_each_entry(handler, &event->device->event_handler_list, list) in ib_dispatch_event_clients()
2061 up_read(&event->device->event_handler_rwsem); in ib_dispatch_event_clients()
2064 static int iw_query_port(struct ib_device *device, in iw_query_port() argument
2073 netdev = ib_device_get_netdev(device, port_num); in iw_query_port()
2100 return device->ops.query_port(device, port_num, port_attr); in iw_query_port()
2103 static int __ib_query_port(struct ib_device *device, in __ib_query_port() argument
2111 err = device->ops.query_port(device, port_num, port_attr); in __ib_query_port()
2115 if (rdma_port_get_link_layer(device, port_num) != in __ib_query_port()
2119 ib_get_cached_subnet_prefix(device, port_num, in __ib_query_port()
2126 * @device:Device to query
2133 int ib_query_port(struct ib_device *device, in ib_query_port() argument
2137 if (!rdma_is_port_valid(device, port_num)) in ib_query_port()
2140 if (rdma_protocol_iwarp(device, port_num)) in ib_query_port()
2141 return iw_query_port(device, port_num, port_attr); in ib_query_port()
2143 return __ib_query_port(device, port_num, port_attr); in ib_query_port()
2172 * @ib_dev: Device to modify
2221 /* Make sure that the device is registered before we send events */ in ib_device_set_netdev()
2300 * @ibdev: IB device
2301 * @ndev: Network device
2325 * ib_device_get_by_netdev - Find an IB device associated with a netdev
2358 * @ib_dev : IB device we want to query
2366 * device for which filter() function returns non zero.
2395 * to netdevices and calls callback() on each device for which
2416 * Enumerates all ib_devices and calls callback() on each device.
2442 * @device:Device to query
2449 int ib_query_pkey(struct ib_device *device, in ib_query_pkey() argument
2452 if (!rdma_is_port_valid(device, port_num)) in ib_query_pkey()
2455 if (!device->ops.query_pkey) in ib_query_pkey()
2458 return device->ops.query_pkey(device, port_num, index, pkey); in ib_query_pkey()
2463 * ib_modify_device - Change IB device attributes
2464 * @device:Device to modify
2468 * ib_modify_device() changes a device's attributes as specified by
2471 int ib_modify_device(struct ib_device *device, in ib_modify_device() argument
2475 if (!device->ops.modify_device) in ib_modify_device()
2478 return device->ops.modify_device(device, device_modify_mask, in ib_modify_device()
2485 * @device: The device to modify.
2494 int ib_modify_port(struct ib_device *device, in ib_modify_port() argument
2500 if (!rdma_is_port_valid(device, port_num)) in ib_modify_port()
2503 if (device->ops.modify_port) in ib_modify_port()
2504 rc = device->ops.modify_port(device, port_num, in ib_modify_port()
2507 else if (rdma_protocol_roce(device, port_num) && in ib_modify_port()
2520 * @device: The device to query.
2522 * @port_num: The port number of the device where the GID value was found.
2526 int ib_find_gid(struct ib_device *device, union ib_gid *gid, in ib_find_gid() argument
2533 rdma_for_each_port (device, port) { in ib_find_gid()
2534 if (!rdma_protocol_ib(device, port)) in ib_find_gid()
2537 for (i = 0; i < device->port_data[port].immutable.gid_tbl_len; in ib_find_gid()
2539 ret = rdma_query_gid(device, port, i, &tmp_gid); in ib_find_gid()
2559 * @device: The device to query.
2560 * @port_num: The port number of the device to search for the PKey.
2564 int ib_find_pkey(struct ib_device *device, in ib_find_pkey() argument
2571 for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len; in ib_find_pkey()
2573 ret = ib_query_pkey(device, port_num, i, &tmp_pkey); in ib_find_pkey()
2599 * @dev: An RDMA device on which the request has been received.
2600 * @port: Port number on the RDMA device.
2907 ibevent.device = ibdev; in ib_dispatch_port_state_event()
3004 pr_warn("Couldn't create InfiniBand device class\n"); in ib_core_init()