xref: /linux/drivers/infiniband/core/device.c (revision a8fe58cec351c25e09c393bf46117c0c47b5a17c)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <rdma/rdma_netlink.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 
46 #include "core_priv.h"
47 
48 MODULE_AUTHOR("Roland Dreier");
49 MODULE_DESCRIPTION("core kernel InfiniBand API");
50 MODULE_LICENSE("Dual BSD/GPL");
51 
52 struct ib_client_data {
53 	struct list_head  list;
54 	struct ib_client *client;
55 	void *            data;
56 	/* The device or client is going down. Do not call client or device
57 	 * callbacks other than remove(). */
58 	bool		  going_down;
59 };
60 
61 struct workqueue_struct *ib_comp_wq;
62 struct workqueue_struct *ib_wq;
63 EXPORT_SYMBOL_GPL(ib_wq);
64 
65 /* The device_list and client_list contain devices and clients after their
66  * registration has completed, and the devices and clients are removed
67  * during unregistration. */
68 static LIST_HEAD(device_list);
69 static LIST_HEAD(client_list);
70 
71 /*
72  * device_mutex and lists_rwsem protect access to both device_list and
73  * client_list.  device_mutex protects writer access by device and client
74  * registration / de-registration.  lists_rwsem protects reader access to
75  * these lists.  Iterators of these lists must lock it for read, while updates
76  * to the lists must be done with a write lock. A special case is when the
77  * device_mutex is locked. In this case locking the lists for read access is
78  * not necessary as the device_mutex implies it.
79  *
80  * lists_rwsem also protects access to the client data list.
81  */
82 static DEFINE_MUTEX(device_mutex);
83 static DECLARE_RWSEM(lists_rwsem);
84 
85 
86 static int ib_device_check_mandatory(struct ib_device *device)
87 {
88 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
89 	static const struct {
90 		size_t offset;
91 		char  *name;
92 	} mandatory_table[] = {
93 		IB_MANDATORY_FUNC(query_device),
94 		IB_MANDATORY_FUNC(query_port),
95 		IB_MANDATORY_FUNC(query_pkey),
96 		IB_MANDATORY_FUNC(query_gid),
97 		IB_MANDATORY_FUNC(alloc_pd),
98 		IB_MANDATORY_FUNC(dealloc_pd),
99 		IB_MANDATORY_FUNC(create_ah),
100 		IB_MANDATORY_FUNC(destroy_ah),
101 		IB_MANDATORY_FUNC(create_qp),
102 		IB_MANDATORY_FUNC(modify_qp),
103 		IB_MANDATORY_FUNC(destroy_qp),
104 		IB_MANDATORY_FUNC(post_send),
105 		IB_MANDATORY_FUNC(post_recv),
106 		IB_MANDATORY_FUNC(create_cq),
107 		IB_MANDATORY_FUNC(destroy_cq),
108 		IB_MANDATORY_FUNC(poll_cq),
109 		IB_MANDATORY_FUNC(req_notify_cq),
110 		IB_MANDATORY_FUNC(get_dma_mr),
111 		IB_MANDATORY_FUNC(dereg_mr),
112 		IB_MANDATORY_FUNC(get_port_immutable)
113 	};
114 	int i;
115 
116 	for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
117 		if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
118 			printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
119 			       device->name, mandatory_table[i].name);
120 			return -EINVAL;
121 		}
122 	}
123 
124 	return 0;
125 }
126 
127 static struct ib_device *__ib_device_get_by_name(const char *name)
128 {
129 	struct ib_device *device;
130 
131 	list_for_each_entry(device, &device_list, core_list)
132 		if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
133 			return device;
134 
135 	return NULL;
136 }
137 
138 
139 static int alloc_name(char *name)
140 {
141 	unsigned long *inuse;
142 	char buf[IB_DEVICE_NAME_MAX];
143 	struct ib_device *device;
144 	int i;
145 
146 	inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
147 	if (!inuse)
148 		return -ENOMEM;
149 
150 	list_for_each_entry(device, &device_list, core_list) {
151 		if (!sscanf(device->name, name, &i))
152 			continue;
153 		if (i < 0 || i >= PAGE_SIZE * 8)
154 			continue;
155 		snprintf(buf, sizeof buf, name, i);
156 		if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
157 			set_bit(i, inuse);
158 	}
159 
160 	i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
161 	free_page((unsigned long) inuse);
162 	snprintf(buf, sizeof buf, name, i);
163 
164 	if (__ib_device_get_by_name(buf))
165 		return -ENFILE;
166 
167 	strlcpy(name, buf, IB_DEVICE_NAME_MAX);
168 	return 0;
169 }
170 
171 static void ib_device_release(struct device *device)
172 {
173 	struct ib_device *dev = container_of(device, struct ib_device, dev);
174 
175 	ib_cache_release_one(dev);
176 	kfree(dev->port_immutable);
177 	kfree(dev);
178 }
179 
180 static int ib_device_uevent(struct device *device,
181 			    struct kobj_uevent_env *env)
182 {
183 	struct ib_device *dev = container_of(device, struct ib_device, dev);
184 
185 	if (add_uevent_var(env, "NAME=%s", dev->name))
186 		return -ENOMEM;
187 
188 	/*
189 	 * It would be nice to pass the node GUID with the event...
190 	 */
191 
192 	return 0;
193 }
194 
195 static struct class ib_class = {
196 	.name    = "infiniband",
197 	.dev_release = ib_device_release,
198 	.dev_uevent = ib_device_uevent,
199 };
200 
201 /**
202  * ib_alloc_device - allocate an IB device struct
203  * @size:size of structure to allocate
204  *
205  * Low-level drivers should use ib_alloc_device() to allocate &struct
206  * ib_device.  @size is the size of the structure to be allocated,
207  * including any private data used by the low-level driver.
208  * ib_dealloc_device() must be used to free structures allocated with
209  * ib_alloc_device().
210  */
211 struct ib_device *ib_alloc_device(size_t size)
212 {
213 	struct ib_device *device;
214 
215 	if (WARN_ON(size < sizeof(struct ib_device)))
216 		return NULL;
217 
218 	device = kzalloc(size, GFP_KERNEL);
219 	if (!device)
220 		return NULL;
221 
222 	device->dev.class = &ib_class;
223 	device_initialize(&device->dev);
224 
225 	dev_set_drvdata(&device->dev, device);
226 
227 	INIT_LIST_HEAD(&device->event_handler_list);
228 	spin_lock_init(&device->event_handler_lock);
229 	spin_lock_init(&device->client_data_lock);
230 	INIT_LIST_HEAD(&device->client_data_list);
231 	INIT_LIST_HEAD(&device->port_list);
232 
233 	return device;
234 }
235 EXPORT_SYMBOL(ib_alloc_device);
236 
237 /**
238  * ib_dealloc_device - free an IB device struct
239  * @device:structure to free
240  *
241  * Free a structure allocated with ib_alloc_device().
242  */
243 void ib_dealloc_device(struct ib_device *device)
244 {
245 	WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
246 		device->reg_state != IB_DEV_UNINITIALIZED);
247 	kobject_put(&device->dev.kobj);
248 }
249 EXPORT_SYMBOL(ib_dealloc_device);
250 
251 static int add_client_context(struct ib_device *device, struct ib_client *client)
252 {
253 	struct ib_client_data *context;
254 	unsigned long flags;
255 
256 	context = kmalloc(sizeof *context, GFP_KERNEL);
257 	if (!context) {
258 		printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
259 		       device->name, client->name);
260 		return -ENOMEM;
261 	}
262 
263 	context->client = client;
264 	context->data   = NULL;
265 	context->going_down = false;
266 
267 	down_write(&lists_rwsem);
268 	spin_lock_irqsave(&device->client_data_lock, flags);
269 	list_add(&context->list, &device->client_data_list);
270 	spin_unlock_irqrestore(&device->client_data_lock, flags);
271 	up_write(&lists_rwsem);
272 
273 	return 0;
274 }
275 
276 static int verify_immutable(const struct ib_device *dev, u8 port)
277 {
278 	return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
279 			    rdma_max_mad_size(dev, port) != 0);
280 }
281 
282 static int read_port_immutable(struct ib_device *device)
283 {
284 	int ret;
285 	u8 start_port = rdma_start_port(device);
286 	u8 end_port = rdma_end_port(device);
287 	u8 port;
288 
289 	/**
290 	 * device->port_immutable is indexed directly by the port number to make
291 	 * access to this data as efficient as possible.
292 	 *
293 	 * Therefore port_immutable is declared as a 1 based array with
294 	 * potential empty slots at the beginning.
295 	 */
296 	device->port_immutable = kzalloc(sizeof(*device->port_immutable)
297 					 * (end_port + 1),
298 					 GFP_KERNEL);
299 	if (!device->port_immutable)
300 		return -ENOMEM;
301 
302 	for (port = start_port; port <= end_port; ++port) {
303 		ret = device->get_port_immutable(device, port,
304 						 &device->port_immutable[port]);
305 		if (ret)
306 			return ret;
307 
308 		if (verify_immutable(device, port))
309 			return -EINVAL;
310 	}
311 	return 0;
312 }
313 
314 /**
315  * ib_register_device - Register an IB device with IB core
316  * @device:Device to register
317  *
318  * Low-level drivers use ib_register_device() to register their
319  * devices with the IB core.  All registered clients will receive a
320  * callback for each device that is added. @device must be allocated
321  * with ib_alloc_device().
322  */
323 int ib_register_device(struct ib_device *device,
324 		       int (*port_callback)(struct ib_device *,
325 					    u8, struct kobject *))
326 {
327 	int ret;
328 	struct ib_client *client;
329 	struct ib_udata uhw = {.outlen = 0, .inlen = 0};
330 
331 	mutex_lock(&device_mutex);
332 
333 	if (strchr(device->name, '%')) {
334 		ret = alloc_name(device->name);
335 		if (ret)
336 			goto out;
337 	}
338 
339 	if (ib_device_check_mandatory(device)) {
340 		ret = -EINVAL;
341 		goto out;
342 	}
343 
344 	ret = read_port_immutable(device);
345 	if (ret) {
346 		printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
347 		       device->name);
348 		goto out;
349 	}
350 
351 	ret = ib_cache_setup_one(device);
352 	if (ret) {
353 		printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
354 		goto out;
355 	}
356 
357 	memset(&device->attrs, 0, sizeof(device->attrs));
358 	ret = device->query_device(device, &device->attrs, &uhw);
359 	if (ret) {
360 		printk(KERN_WARNING "Couldn't query the device attributes\n");
361 		goto out;
362 	}
363 
364 	ret = ib_device_register_sysfs(device, port_callback);
365 	if (ret) {
366 		printk(KERN_WARNING "Couldn't register device %s with driver model\n",
367 		       device->name);
368 		ib_cache_cleanup_one(device);
369 		goto out;
370 	}
371 
372 	device->reg_state = IB_DEV_REGISTERED;
373 
374 	list_for_each_entry(client, &client_list, list)
375 		if (client->add && !add_client_context(device, client))
376 			client->add(device);
377 
378 	down_write(&lists_rwsem);
379 	list_add_tail(&device->core_list, &device_list);
380 	up_write(&lists_rwsem);
381 out:
382 	mutex_unlock(&device_mutex);
383 	return ret;
384 }
385 EXPORT_SYMBOL(ib_register_device);
386 
387 /**
388  * ib_unregister_device - Unregister an IB device
389  * @device:Device to unregister
390  *
391  * Unregister an IB device.  All clients will receive a remove callback.
392  */
393 void ib_unregister_device(struct ib_device *device)
394 {
395 	struct ib_client_data *context, *tmp;
396 	unsigned long flags;
397 
398 	mutex_lock(&device_mutex);
399 
400 	down_write(&lists_rwsem);
401 	list_del(&device->core_list);
402 	spin_lock_irqsave(&device->client_data_lock, flags);
403 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
404 		context->going_down = true;
405 	spin_unlock_irqrestore(&device->client_data_lock, flags);
406 	downgrade_write(&lists_rwsem);
407 
408 	list_for_each_entry_safe(context, tmp, &device->client_data_list,
409 				 list) {
410 		if (context->client->remove)
411 			context->client->remove(device, context->data);
412 	}
413 	up_read(&lists_rwsem);
414 
415 	mutex_unlock(&device_mutex);
416 
417 	ib_device_unregister_sysfs(device);
418 	ib_cache_cleanup_one(device);
419 
420 	down_write(&lists_rwsem);
421 	spin_lock_irqsave(&device->client_data_lock, flags);
422 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
423 		kfree(context);
424 	spin_unlock_irqrestore(&device->client_data_lock, flags);
425 	up_write(&lists_rwsem);
426 
427 	device->reg_state = IB_DEV_UNREGISTERED;
428 }
429 EXPORT_SYMBOL(ib_unregister_device);
430 
431 /**
432  * ib_register_client - Register an IB client
433  * @client:Client to register
434  *
435  * Upper level users of the IB drivers can use ib_register_client() to
436  * register callbacks for IB device addition and removal.  When an IB
437  * device is added, each registered client's add method will be called
438  * (in the order the clients were registered), and when a device is
439  * removed, each client's remove method will be called (in the reverse
440  * order that clients were registered).  In addition, when
441  * ib_register_client() is called, the client will receive an add
442  * callback for all devices already registered.
443  */
444 int ib_register_client(struct ib_client *client)
445 {
446 	struct ib_device *device;
447 
448 	mutex_lock(&device_mutex);
449 
450 	list_for_each_entry(device, &device_list, core_list)
451 		if (client->add && !add_client_context(device, client))
452 			client->add(device);
453 
454 	down_write(&lists_rwsem);
455 	list_add_tail(&client->list, &client_list);
456 	up_write(&lists_rwsem);
457 
458 	mutex_unlock(&device_mutex);
459 
460 	return 0;
461 }
462 EXPORT_SYMBOL(ib_register_client);
463 
464 /**
465  * ib_unregister_client - Unregister an IB client
466  * @client:Client to unregister
467  *
468  * Upper level users use ib_unregister_client() to remove their client
469  * registration.  When ib_unregister_client() is called, the client
470  * will receive a remove callback for each IB device still registered.
471  */
472 void ib_unregister_client(struct ib_client *client)
473 {
474 	struct ib_client_data *context, *tmp;
475 	struct ib_device *device;
476 	unsigned long flags;
477 
478 	mutex_lock(&device_mutex);
479 
480 	down_write(&lists_rwsem);
481 	list_del(&client->list);
482 	up_write(&lists_rwsem);
483 
484 	list_for_each_entry(device, &device_list, core_list) {
485 		struct ib_client_data *found_context = NULL;
486 
487 		down_write(&lists_rwsem);
488 		spin_lock_irqsave(&device->client_data_lock, flags);
489 		list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
490 			if (context->client == client) {
491 				context->going_down = true;
492 				found_context = context;
493 				break;
494 			}
495 		spin_unlock_irqrestore(&device->client_data_lock, flags);
496 		up_write(&lists_rwsem);
497 
498 		if (client->remove)
499 			client->remove(device, found_context ?
500 					       found_context->data : NULL);
501 
502 		if (!found_context) {
503 			pr_warn("No client context found for %s/%s\n",
504 				device->name, client->name);
505 			continue;
506 		}
507 
508 		down_write(&lists_rwsem);
509 		spin_lock_irqsave(&device->client_data_lock, flags);
510 		list_del(&found_context->list);
511 		kfree(found_context);
512 		spin_unlock_irqrestore(&device->client_data_lock, flags);
513 		up_write(&lists_rwsem);
514 	}
515 
516 	mutex_unlock(&device_mutex);
517 }
518 EXPORT_SYMBOL(ib_unregister_client);
519 
520 /**
521  * ib_get_client_data - Get IB client context
522  * @device:Device to get context for
523  * @client:Client to get context for
524  *
525  * ib_get_client_data() returns client context set with
526  * ib_set_client_data().
527  */
528 void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
529 {
530 	struct ib_client_data *context;
531 	void *ret = NULL;
532 	unsigned long flags;
533 
534 	spin_lock_irqsave(&device->client_data_lock, flags);
535 	list_for_each_entry(context, &device->client_data_list, list)
536 		if (context->client == client) {
537 			ret = context->data;
538 			break;
539 		}
540 	spin_unlock_irqrestore(&device->client_data_lock, flags);
541 
542 	return ret;
543 }
544 EXPORT_SYMBOL(ib_get_client_data);
545 
546 /**
547  * ib_set_client_data - Set IB client context
548  * @device:Device to set context for
549  * @client:Client to set context for
550  * @data:Context to set
551  *
552  * ib_set_client_data() sets client context that can be retrieved with
553  * ib_get_client_data().
554  */
555 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
556 			void *data)
557 {
558 	struct ib_client_data *context;
559 	unsigned long flags;
560 
561 	spin_lock_irqsave(&device->client_data_lock, flags);
562 	list_for_each_entry(context, &device->client_data_list, list)
563 		if (context->client == client) {
564 			context->data = data;
565 			goto out;
566 		}
567 
568 	printk(KERN_WARNING "No client context found for %s/%s\n",
569 	       device->name, client->name);
570 
571 out:
572 	spin_unlock_irqrestore(&device->client_data_lock, flags);
573 }
574 EXPORT_SYMBOL(ib_set_client_data);
575 
576 /**
577  * ib_register_event_handler - Register an IB event handler
578  * @event_handler:Handler to register
579  *
580  * ib_register_event_handler() registers an event handler that will be
581  * called back when asynchronous IB events occur (as defined in
582  * chapter 11 of the InfiniBand Architecture Specification).  This
583  * callback may occur in interrupt context.
584  */
585 int ib_register_event_handler  (struct ib_event_handler *event_handler)
586 {
587 	unsigned long flags;
588 
589 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
590 	list_add_tail(&event_handler->list,
591 		      &event_handler->device->event_handler_list);
592 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
593 
594 	return 0;
595 }
596 EXPORT_SYMBOL(ib_register_event_handler);
597 
598 /**
599  * ib_unregister_event_handler - Unregister an event handler
600  * @event_handler:Handler to unregister
601  *
602  * Unregister an event handler registered with
603  * ib_register_event_handler().
604  */
605 int ib_unregister_event_handler(struct ib_event_handler *event_handler)
606 {
607 	unsigned long flags;
608 
609 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
610 	list_del(&event_handler->list);
611 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
612 
613 	return 0;
614 }
615 EXPORT_SYMBOL(ib_unregister_event_handler);
616 
617 /**
618  * ib_dispatch_event - Dispatch an asynchronous event
619  * @event:Event to dispatch
620  *
621  * Low-level drivers must call ib_dispatch_event() to dispatch the
622  * event to all registered event handlers when an asynchronous event
623  * occurs.
624  */
625 void ib_dispatch_event(struct ib_event *event)
626 {
627 	unsigned long flags;
628 	struct ib_event_handler *handler;
629 
630 	spin_lock_irqsave(&event->device->event_handler_lock, flags);
631 
632 	list_for_each_entry(handler, &event->device->event_handler_list, list)
633 		handler->handler(handler, event);
634 
635 	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
636 }
637 EXPORT_SYMBOL(ib_dispatch_event);
638 
639 /**
640  * ib_query_port - Query IB port attributes
641  * @device:Device to query
642  * @port_num:Port number to query
643  * @port_attr:Port attributes
644  *
645  * ib_query_port() returns the attributes of a port through the
646  * @port_attr pointer.
647  */
648 int ib_query_port(struct ib_device *device,
649 		  u8 port_num,
650 		  struct ib_port_attr *port_attr)
651 {
652 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
653 		return -EINVAL;
654 
655 	return device->query_port(device, port_num, port_attr);
656 }
657 EXPORT_SYMBOL(ib_query_port);
658 
659 /**
660  * ib_query_gid - Get GID table entry
661  * @device:Device to query
662  * @port_num:Port number to query
663  * @index:GID table index to query
664  * @gid:Returned GID
665  * @attr: Returned GID attributes related to this GID index (only in RoCE).
666  *   NULL means ignore.
667  *
668  * ib_query_gid() fetches the specified GID table entry.
669  */
670 int ib_query_gid(struct ib_device *device,
671 		 u8 port_num, int index, union ib_gid *gid,
672 		 struct ib_gid_attr *attr)
673 {
674 	if (rdma_cap_roce_gid_table(device, port_num))
675 		return ib_get_cached_gid(device, port_num, index, gid, attr);
676 
677 	if (attr)
678 		return -EINVAL;
679 
680 	return device->query_gid(device, port_num, index, gid);
681 }
682 EXPORT_SYMBOL(ib_query_gid);
683 
684 /**
685  * ib_enum_roce_netdev - enumerate all RoCE ports
686  * @ib_dev : IB device we want to query
687  * @filter: Should we call the callback?
688  * @filter_cookie: Cookie passed to filter
689  * @cb: Callback to call for each found RoCE ports
690  * @cookie: Cookie passed back to the callback
691  *
692  * Enumerates all of the physical RoCE ports of ib_dev
693  * which are related to netdevice and calls callback() on each
694  * device for which filter() function returns non zero.
695  */
696 void ib_enum_roce_netdev(struct ib_device *ib_dev,
697 			 roce_netdev_filter filter,
698 			 void *filter_cookie,
699 			 roce_netdev_callback cb,
700 			 void *cookie)
701 {
702 	u8 port;
703 
704 	for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
705 	     port++)
706 		if (rdma_protocol_roce(ib_dev, port)) {
707 			struct net_device *idev = NULL;
708 
709 			if (ib_dev->get_netdev)
710 				idev = ib_dev->get_netdev(ib_dev, port);
711 
712 			if (idev &&
713 			    idev->reg_state >= NETREG_UNREGISTERED) {
714 				dev_put(idev);
715 				idev = NULL;
716 			}
717 
718 			if (filter(ib_dev, port, idev, filter_cookie))
719 				cb(ib_dev, port, idev, cookie);
720 
721 			if (idev)
722 				dev_put(idev);
723 		}
724 }
725 
726 /**
727  * ib_enum_all_roce_netdevs - enumerate all RoCE devices
728  * @filter: Should we call the callback?
729  * @filter_cookie: Cookie passed to filter
730  * @cb: Callback to call for each found RoCE ports
731  * @cookie: Cookie passed back to the callback
732  *
733  * Enumerates all RoCE devices' physical ports which are related
734  * to netdevices and calls callback() on each device for which
735  * filter() function returns non zero.
736  */
737 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
738 			      void *filter_cookie,
739 			      roce_netdev_callback cb,
740 			      void *cookie)
741 {
742 	struct ib_device *dev;
743 
744 	down_read(&lists_rwsem);
745 	list_for_each_entry(dev, &device_list, core_list)
746 		ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
747 	up_read(&lists_rwsem);
748 }
749 
750 /**
751  * ib_query_pkey - Get P_Key table entry
752  * @device:Device to query
753  * @port_num:Port number to query
754  * @index:P_Key table index to query
755  * @pkey:Returned P_Key
756  *
757  * ib_query_pkey() fetches the specified P_Key table entry.
758  */
759 int ib_query_pkey(struct ib_device *device,
760 		  u8 port_num, u16 index, u16 *pkey)
761 {
762 	return device->query_pkey(device, port_num, index, pkey);
763 }
764 EXPORT_SYMBOL(ib_query_pkey);
765 
766 /**
767  * ib_modify_device - Change IB device attributes
768  * @device:Device to modify
769  * @device_modify_mask:Mask of attributes to change
770  * @device_modify:New attribute values
771  *
772  * ib_modify_device() changes a device's attributes as specified by
773  * the @device_modify_mask and @device_modify structure.
774  */
775 int ib_modify_device(struct ib_device *device,
776 		     int device_modify_mask,
777 		     struct ib_device_modify *device_modify)
778 {
779 	if (!device->modify_device)
780 		return -ENOSYS;
781 
782 	return device->modify_device(device, device_modify_mask,
783 				     device_modify);
784 }
785 EXPORT_SYMBOL(ib_modify_device);
786 
787 /**
788  * ib_modify_port - Modifies the attributes for the specified port.
789  * @device: The device to modify.
790  * @port_num: The number of the port to modify.
791  * @port_modify_mask: Mask used to specify which attributes of the port
792  *   to change.
793  * @port_modify: New attribute values for the port.
794  *
795  * ib_modify_port() changes a port's attributes as specified by the
796  * @port_modify_mask and @port_modify structure.
797  */
798 int ib_modify_port(struct ib_device *device,
799 		   u8 port_num, int port_modify_mask,
800 		   struct ib_port_modify *port_modify)
801 {
802 	if (!device->modify_port)
803 		return -ENOSYS;
804 
805 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
806 		return -EINVAL;
807 
808 	return device->modify_port(device, port_num, port_modify_mask,
809 				   port_modify);
810 }
811 EXPORT_SYMBOL(ib_modify_port);
812 
813 /**
814  * ib_find_gid - Returns the port number and GID table index where
815  *   a specified GID value occurs.
816  * @device: The device to query.
817  * @gid: The GID value to search for.
818  * @gid_type: Type of GID.
819  * @ndev: The ndev related to the GID to search for.
820  * @port_num: The port number of the device where the GID value was found.
821  * @index: The index into the GID table where the GID was found.  This
822  *   parameter may be NULL.
823  */
824 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
825 		enum ib_gid_type gid_type, struct net_device *ndev,
826 		u8 *port_num, u16 *index)
827 {
828 	union ib_gid tmp_gid;
829 	int ret, port, i;
830 
831 	for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
832 		if (rdma_cap_roce_gid_table(device, port)) {
833 			if (!ib_find_cached_gid_by_port(device, gid, gid_type, port,
834 							ndev, index)) {
835 				*port_num = port;
836 				return 0;
837 			}
838 		}
839 
840 		if (gid_type != IB_GID_TYPE_IB)
841 			continue;
842 
843 		for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
844 			ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
845 			if (ret)
846 				return ret;
847 			if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
848 				*port_num = port;
849 				if (index)
850 					*index = i;
851 				return 0;
852 			}
853 		}
854 	}
855 
856 	return -ENOENT;
857 }
858 EXPORT_SYMBOL(ib_find_gid);
859 
860 /**
861  * ib_find_pkey - Returns the PKey table index where a specified
862  *   PKey value occurs.
863  * @device: The device to query.
864  * @port_num: The port number of the device to search for the PKey.
865  * @pkey: The PKey value to search for.
866  * @index: The index into the PKey table where the PKey was found.
867  */
868 int ib_find_pkey(struct ib_device *device,
869 		 u8 port_num, u16 pkey, u16 *index)
870 {
871 	int ret, i;
872 	u16 tmp_pkey;
873 	int partial_ix = -1;
874 
875 	for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
876 		ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
877 		if (ret)
878 			return ret;
879 		if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
880 			/* if there is full-member pkey take it.*/
881 			if (tmp_pkey & 0x8000) {
882 				*index = i;
883 				return 0;
884 			}
885 			if (partial_ix < 0)
886 				partial_ix = i;
887 		}
888 	}
889 
890 	/*no full-member, if exists take the limited*/
891 	if (partial_ix >= 0) {
892 		*index = partial_ix;
893 		return 0;
894 	}
895 	return -ENOENT;
896 }
897 EXPORT_SYMBOL(ib_find_pkey);
898 
899 /**
900  * ib_get_net_dev_by_params() - Return the appropriate net_dev
901  * for a received CM request
902  * @dev:	An RDMA device on which the request has been received.
903  * @port:	Port number on the RDMA device.
904  * @pkey:	The Pkey the request came on.
905  * @gid:	A GID that the net_dev uses to communicate.
906  * @addr:	Contains the IP address that the request specified as its
907  *		destination.
908  */
909 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
910 					    u8 port,
911 					    u16 pkey,
912 					    const union ib_gid *gid,
913 					    const struct sockaddr *addr)
914 {
915 	struct net_device *net_dev = NULL;
916 	struct ib_client_data *context;
917 
918 	if (!rdma_protocol_ib(dev, port))
919 		return NULL;
920 
921 	down_read(&lists_rwsem);
922 
923 	list_for_each_entry(context, &dev->client_data_list, list) {
924 		struct ib_client *client = context->client;
925 
926 		if (context->going_down)
927 			continue;
928 
929 		if (client->get_net_dev_by_params) {
930 			net_dev = client->get_net_dev_by_params(dev, port, pkey,
931 								gid, addr,
932 								context->data);
933 			if (net_dev)
934 				break;
935 		}
936 	}
937 
938 	up_read(&lists_rwsem);
939 
940 	return net_dev;
941 }
942 EXPORT_SYMBOL(ib_get_net_dev_by_params);
943 
944 static int __init ib_core_init(void)
945 {
946 	int ret;
947 
948 	ib_wq = alloc_workqueue("infiniband", 0, 0);
949 	if (!ib_wq)
950 		return -ENOMEM;
951 
952 	ib_comp_wq = alloc_workqueue("ib-comp-wq",
953 			WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
954 			WQ_UNBOUND_MAX_ACTIVE);
955 	if (!ib_comp_wq) {
956 		ret = -ENOMEM;
957 		goto err;
958 	}
959 
960 	ret = class_register(&ib_class);
961 	if (ret) {
962 		printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
963 		goto err_comp;
964 	}
965 
966 	ret = ibnl_init();
967 	if (ret) {
968 		printk(KERN_WARNING "Couldn't init IB netlink interface\n");
969 		goto err_sysfs;
970 	}
971 
972 	ib_cache_setup();
973 
974 	return 0;
975 
976 err_sysfs:
977 	class_unregister(&ib_class);
978 err_comp:
979 	destroy_workqueue(ib_comp_wq);
980 err:
981 	destroy_workqueue(ib_wq);
982 	return ret;
983 }
984 
985 static void __exit ib_core_cleanup(void)
986 {
987 	ib_cache_cleanup();
988 	ibnl_cleanup();
989 	class_unregister(&ib_class);
990 	destroy_workqueue(ib_comp_wq);
991 	/* Make sure that any pending umem accounting work is done. */
992 	destroy_workqueue(ib_wq);
993 }
994 
995 module_init(ib_core_init);
996 module_exit(ib_core_cleanup);
997