xref: /linux/drivers/infiniband/core/ucma.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	copyright notice, this list of conditions and the following
16  *	disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	copyright notice, this list of conditions and the following
20  *	disclaimer in the documentation and/or other materials
21  *	provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
39 #include <linux/in.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 
46 #include <rdma/rdma_user_cm.h>
47 #include <rdma/ib_marshall.h>
48 #include <rdma/rdma_cm.h>
49 #include <rdma/rdma_cm_ib.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/ib.h>
52 
53 MODULE_AUTHOR("Sean Hefty");
54 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
55 MODULE_LICENSE("Dual BSD/GPL");
56 
57 static unsigned int max_backlog = 1024;
58 
59 static struct ctl_table_header *ucma_ctl_table_hdr;
60 static struct ctl_table ucma_ctl_table[] = {
61 	{
62 		.procname	= "max_backlog",
63 		.data		= &max_backlog,
64 		.maxlen		= sizeof max_backlog,
65 		.mode		= 0644,
66 		.proc_handler	= proc_dointvec,
67 	},
68 	{ }
69 };
70 
71 struct ucma_file {
72 	struct mutex		mut;
73 	struct file		*filp;
74 	struct list_head	ctx_list;
75 	struct list_head	event_list;
76 	wait_queue_head_t	poll_wait;
77 	struct workqueue_struct	*close_wq;
78 };
79 
80 struct ucma_context {
81 	int			id;
82 	struct completion	comp;
83 	atomic_t		ref;
84 	int			events_reported;
85 	int			backlog;
86 
87 	struct ucma_file	*file;
88 	struct rdma_cm_id	*cm_id;
89 	u64			uid;
90 
91 	struct list_head	list;
92 	struct list_head	mc_list;
93 	/* mark that device is in process of destroying the internal HW
94 	 * resources, protected by the global mut
95 	 */
96 	int			closing;
97 	/* sync between removal event and id destroy, protected by file mut */
98 	int			destroying;
99 	struct work_struct	close_work;
100 };
101 
102 struct ucma_multicast {
103 	struct ucma_context	*ctx;
104 	int			id;
105 	int			events_reported;
106 
107 	u64			uid;
108 	struct list_head	list;
109 	struct sockaddr_storage	addr;
110 };
111 
112 struct ucma_event {
113 	struct ucma_context	*ctx;
114 	struct ucma_multicast	*mc;
115 	struct list_head	list;
116 	struct rdma_cm_id	*cm_id;
117 	struct rdma_ucm_event_resp resp;
118 	struct work_struct	close_work;
119 };
120 
121 static DEFINE_MUTEX(mut);
122 static DEFINE_IDR(ctx_idr);
123 static DEFINE_IDR(multicast_idr);
124 
125 static inline struct ucma_context *_ucma_find_context(int id,
126 						      struct ucma_file *file)
127 {
128 	struct ucma_context *ctx;
129 
130 	ctx = idr_find(&ctx_idr, id);
131 	if (!ctx)
132 		ctx = ERR_PTR(-ENOENT);
133 	else if (ctx->file != file)
134 		ctx = ERR_PTR(-EINVAL);
135 	return ctx;
136 }
137 
138 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
139 {
140 	struct ucma_context *ctx;
141 
142 	mutex_lock(&mut);
143 	ctx = _ucma_find_context(id, file);
144 	if (!IS_ERR(ctx)) {
145 		if (ctx->closing)
146 			ctx = ERR_PTR(-EIO);
147 		else
148 			atomic_inc(&ctx->ref);
149 	}
150 	mutex_unlock(&mut);
151 	return ctx;
152 }
153 
154 static void ucma_put_ctx(struct ucma_context *ctx)
155 {
156 	if (atomic_dec_and_test(&ctx->ref))
157 		complete(&ctx->comp);
158 }
159 
160 static void ucma_close_event_id(struct work_struct *work)
161 {
162 	struct ucma_event *uevent_close =  container_of(work, struct ucma_event, close_work);
163 
164 	rdma_destroy_id(uevent_close->cm_id);
165 	kfree(uevent_close);
166 }
167 
168 static void ucma_close_id(struct work_struct *work)
169 {
170 	struct ucma_context *ctx =  container_of(work, struct ucma_context, close_work);
171 
172 	/* once all inflight tasks are finished, we close all underlying
173 	 * resources. The context is still alive till its explicit destryoing
174 	 * by its creator.
175 	 */
176 	ucma_put_ctx(ctx);
177 	wait_for_completion(&ctx->comp);
178 	/* No new events will be generated after destroying the id. */
179 	rdma_destroy_id(ctx->cm_id);
180 }
181 
182 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
183 {
184 	struct ucma_context *ctx;
185 
186 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
187 	if (!ctx)
188 		return NULL;
189 
190 	INIT_WORK(&ctx->close_work, ucma_close_id);
191 	atomic_set(&ctx->ref, 1);
192 	init_completion(&ctx->comp);
193 	INIT_LIST_HEAD(&ctx->mc_list);
194 	ctx->file = file;
195 
196 	mutex_lock(&mut);
197 	ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
198 	mutex_unlock(&mut);
199 	if (ctx->id < 0)
200 		goto error;
201 
202 	list_add_tail(&ctx->list, &file->ctx_list);
203 	return ctx;
204 
205 error:
206 	kfree(ctx);
207 	return NULL;
208 }
209 
210 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
211 {
212 	struct ucma_multicast *mc;
213 
214 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
215 	if (!mc)
216 		return NULL;
217 
218 	mutex_lock(&mut);
219 	mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
220 	mutex_unlock(&mut);
221 	if (mc->id < 0)
222 		goto error;
223 
224 	mc->ctx = ctx;
225 	list_add_tail(&mc->list, &ctx->mc_list);
226 	return mc;
227 
228 error:
229 	kfree(mc);
230 	return NULL;
231 }
232 
233 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
234 				 struct rdma_conn_param *src)
235 {
236 	if (src->private_data_len)
237 		memcpy(dst->private_data, src->private_data,
238 		       src->private_data_len);
239 	dst->private_data_len = src->private_data_len;
240 	dst->responder_resources =src->responder_resources;
241 	dst->initiator_depth = src->initiator_depth;
242 	dst->flow_control = src->flow_control;
243 	dst->retry_count = src->retry_count;
244 	dst->rnr_retry_count = src->rnr_retry_count;
245 	dst->srq = src->srq;
246 	dst->qp_num = src->qp_num;
247 }
248 
249 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
250 			       struct rdma_ud_param *src)
251 {
252 	if (src->private_data_len)
253 		memcpy(dst->private_data, src->private_data,
254 		       src->private_data_len);
255 	dst->private_data_len = src->private_data_len;
256 	ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
257 	dst->qp_num = src->qp_num;
258 	dst->qkey = src->qkey;
259 }
260 
261 static void ucma_set_event_context(struct ucma_context *ctx,
262 				   struct rdma_cm_event *event,
263 				   struct ucma_event *uevent)
264 {
265 	uevent->ctx = ctx;
266 	switch (event->event) {
267 	case RDMA_CM_EVENT_MULTICAST_JOIN:
268 	case RDMA_CM_EVENT_MULTICAST_ERROR:
269 		uevent->mc = (struct ucma_multicast *)
270 			     event->param.ud.private_data;
271 		uevent->resp.uid = uevent->mc->uid;
272 		uevent->resp.id = uevent->mc->id;
273 		break;
274 	default:
275 		uevent->resp.uid = ctx->uid;
276 		uevent->resp.id = ctx->id;
277 		break;
278 	}
279 }
280 
281 /* Called with file->mut locked for the relevant context. */
282 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
283 {
284 	struct ucma_context *ctx = cm_id->context;
285 	struct ucma_event *con_req_eve;
286 	int event_found = 0;
287 
288 	if (ctx->destroying)
289 		return;
290 
291 	/* only if context is pointing to cm_id that it owns it and can be
292 	 * queued to be closed, otherwise that cm_id is an inflight one that
293 	 * is part of that context event list pending to be detached and
294 	 * reattached to its new context as part of ucma_get_event,
295 	 * handled separately below.
296 	 */
297 	if (ctx->cm_id == cm_id) {
298 		mutex_lock(&mut);
299 		ctx->closing = 1;
300 		mutex_unlock(&mut);
301 		queue_work(ctx->file->close_wq, &ctx->close_work);
302 		return;
303 	}
304 
305 	list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
306 		if (con_req_eve->cm_id == cm_id &&
307 		    con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
308 			list_del(&con_req_eve->list);
309 			INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
310 			queue_work(ctx->file->close_wq, &con_req_eve->close_work);
311 			event_found = 1;
312 			break;
313 		}
314 	}
315 	if (!event_found)
316 		printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
317 }
318 
319 static int ucma_event_handler(struct rdma_cm_id *cm_id,
320 			      struct rdma_cm_event *event)
321 {
322 	struct ucma_event *uevent;
323 	struct ucma_context *ctx = cm_id->context;
324 	int ret = 0;
325 
326 	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
327 	if (!uevent)
328 		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
329 
330 	mutex_lock(&ctx->file->mut);
331 	uevent->cm_id = cm_id;
332 	ucma_set_event_context(ctx, event, uevent);
333 	uevent->resp.event = event->event;
334 	uevent->resp.status = event->status;
335 	if (cm_id->qp_type == IB_QPT_UD)
336 		ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
337 	else
338 		ucma_copy_conn_event(&uevent->resp.param.conn,
339 				     &event->param.conn);
340 
341 	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
342 		if (!ctx->backlog) {
343 			ret = -ENOMEM;
344 			kfree(uevent);
345 			goto out;
346 		}
347 		ctx->backlog--;
348 	} else if (!ctx->uid || ctx->cm_id != cm_id) {
349 		/*
350 		 * We ignore events for new connections until userspace has set
351 		 * their context.  This can only happen if an error occurs on a
352 		 * new connection before the user accepts it.  This is okay,
353 		 * since the accept will just fail later. However, we do need
354 		 * to release the underlying HW resources in case of a device
355 		 * removal event.
356 		 */
357 		if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
358 			ucma_removal_event_handler(cm_id);
359 
360 		kfree(uevent);
361 		goto out;
362 	}
363 
364 	list_add_tail(&uevent->list, &ctx->file->event_list);
365 	wake_up_interruptible(&ctx->file->poll_wait);
366 	if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
367 		ucma_removal_event_handler(cm_id);
368 out:
369 	mutex_unlock(&ctx->file->mut);
370 	return ret;
371 }
372 
373 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
374 			      int in_len, int out_len)
375 {
376 	struct ucma_context *ctx;
377 	struct rdma_ucm_get_event cmd;
378 	struct ucma_event *uevent;
379 	int ret = 0;
380 
381 	if (out_len < sizeof uevent->resp)
382 		return -ENOSPC;
383 
384 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
385 		return -EFAULT;
386 
387 	mutex_lock(&file->mut);
388 	while (list_empty(&file->event_list)) {
389 		mutex_unlock(&file->mut);
390 
391 		if (file->filp->f_flags & O_NONBLOCK)
392 			return -EAGAIN;
393 
394 		if (wait_event_interruptible(file->poll_wait,
395 					     !list_empty(&file->event_list)))
396 			return -ERESTARTSYS;
397 
398 		mutex_lock(&file->mut);
399 	}
400 
401 	uevent = list_entry(file->event_list.next, struct ucma_event, list);
402 
403 	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
404 		ctx = ucma_alloc_ctx(file);
405 		if (!ctx) {
406 			ret = -ENOMEM;
407 			goto done;
408 		}
409 		uevent->ctx->backlog++;
410 		ctx->cm_id = uevent->cm_id;
411 		ctx->cm_id->context = ctx;
412 		uevent->resp.id = ctx->id;
413 	}
414 
415 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
416 			 &uevent->resp, sizeof uevent->resp)) {
417 		ret = -EFAULT;
418 		goto done;
419 	}
420 
421 	list_del(&uevent->list);
422 	uevent->ctx->events_reported++;
423 	if (uevent->mc)
424 		uevent->mc->events_reported++;
425 	kfree(uevent);
426 done:
427 	mutex_unlock(&file->mut);
428 	return ret;
429 }
430 
431 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
432 {
433 	switch (cmd->ps) {
434 	case RDMA_PS_TCP:
435 		*qp_type = IB_QPT_RC;
436 		return 0;
437 	case RDMA_PS_UDP:
438 	case RDMA_PS_IPOIB:
439 		*qp_type = IB_QPT_UD;
440 		return 0;
441 	case RDMA_PS_IB:
442 		*qp_type = cmd->qp_type;
443 		return 0;
444 	default:
445 		return -EINVAL;
446 	}
447 }
448 
449 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
450 			      int in_len, int out_len)
451 {
452 	struct rdma_ucm_create_id cmd;
453 	struct rdma_ucm_create_id_resp resp;
454 	struct ucma_context *ctx;
455 	enum ib_qp_type qp_type;
456 	int ret;
457 
458 	if (out_len < sizeof(resp))
459 		return -ENOSPC;
460 
461 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
462 		return -EFAULT;
463 
464 	ret = ucma_get_qp_type(&cmd, &qp_type);
465 	if (ret)
466 		return ret;
467 
468 	mutex_lock(&file->mut);
469 	ctx = ucma_alloc_ctx(file);
470 	mutex_unlock(&file->mut);
471 	if (!ctx)
472 		return -ENOMEM;
473 
474 	ctx->uid = cmd.uid;
475 	ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
476 	if (IS_ERR(ctx->cm_id)) {
477 		ret = PTR_ERR(ctx->cm_id);
478 		goto err1;
479 	}
480 
481 	resp.id = ctx->id;
482 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
483 			 &resp, sizeof(resp))) {
484 		ret = -EFAULT;
485 		goto err2;
486 	}
487 	return 0;
488 
489 err2:
490 	rdma_destroy_id(ctx->cm_id);
491 err1:
492 	mutex_lock(&mut);
493 	idr_remove(&ctx_idr, ctx->id);
494 	mutex_unlock(&mut);
495 	kfree(ctx);
496 	return ret;
497 }
498 
499 static void ucma_cleanup_multicast(struct ucma_context *ctx)
500 {
501 	struct ucma_multicast *mc, *tmp;
502 
503 	mutex_lock(&mut);
504 	list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
505 		list_del(&mc->list);
506 		idr_remove(&multicast_idr, mc->id);
507 		kfree(mc);
508 	}
509 	mutex_unlock(&mut);
510 }
511 
512 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
513 {
514 	struct ucma_event *uevent, *tmp;
515 
516 	list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
517 		if (uevent->mc != mc)
518 			continue;
519 
520 		list_del(&uevent->list);
521 		kfree(uevent);
522 	}
523 }
524 
525 /*
526  * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
527  * this point, no new events will be reported from the hardware. However, we
528  * still need to cleanup the UCMA context for this ID. Specifically, there
529  * might be events that have not yet been consumed by the user space software.
530  * These might include pending connect requests which we have not completed
531  * processing.  We cannot call rdma_destroy_id while holding the lock of the
532  * context (file->mut), as it might cause a deadlock. We therefore extract all
533  * relevant events from the context pending events list while holding the
534  * mutex. After that we release them as needed.
535  */
536 static int ucma_free_ctx(struct ucma_context *ctx)
537 {
538 	int events_reported;
539 	struct ucma_event *uevent, *tmp;
540 	LIST_HEAD(list);
541 
542 
543 	ucma_cleanup_multicast(ctx);
544 
545 	/* Cleanup events not yet reported to the user. */
546 	mutex_lock(&ctx->file->mut);
547 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
548 		if (uevent->ctx == ctx)
549 			list_move_tail(&uevent->list, &list);
550 	}
551 	list_del(&ctx->list);
552 	mutex_unlock(&ctx->file->mut);
553 
554 	list_for_each_entry_safe(uevent, tmp, &list, list) {
555 		list_del(&uevent->list);
556 		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
557 			rdma_destroy_id(uevent->cm_id);
558 		kfree(uevent);
559 	}
560 
561 	events_reported = ctx->events_reported;
562 	kfree(ctx);
563 	return events_reported;
564 }
565 
566 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
567 			       int in_len, int out_len)
568 {
569 	struct rdma_ucm_destroy_id cmd;
570 	struct rdma_ucm_destroy_id_resp resp;
571 	struct ucma_context *ctx;
572 	int ret = 0;
573 
574 	if (out_len < sizeof(resp))
575 		return -ENOSPC;
576 
577 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
578 		return -EFAULT;
579 
580 	mutex_lock(&mut);
581 	ctx = _ucma_find_context(cmd.id, file);
582 	if (!IS_ERR(ctx))
583 		idr_remove(&ctx_idr, ctx->id);
584 	mutex_unlock(&mut);
585 
586 	if (IS_ERR(ctx))
587 		return PTR_ERR(ctx);
588 
589 	mutex_lock(&ctx->file->mut);
590 	ctx->destroying = 1;
591 	mutex_unlock(&ctx->file->mut);
592 
593 	flush_workqueue(ctx->file->close_wq);
594 	/* At this point it's guaranteed that there is no inflight
595 	 * closing task */
596 	mutex_lock(&mut);
597 	if (!ctx->closing) {
598 		mutex_unlock(&mut);
599 		ucma_put_ctx(ctx);
600 		wait_for_completion(&ctx->comp);
601 		rdma_destroy_id(ctx->cm_id);
602 	} else {
603 		mutex_unlock(&mut);
604 	}
605 
606 	resp.events_reported = ucma_free_ctx(ctx);
607 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
608 			 &resp, sizeof(resp)))
609 		ret = -EFAULT;
610 
611 	return ret;
612 }
613 
614 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
615 			      int in_len, int out_len)
616 {
617 	struct rdma_ucm_bind_ip cmd;
618 	struct ucma_context *ctx;
619 	int ret;
620 
621 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
622 		return -EFAULT;
623 
624 	ctx = ucma_get_ctx(file, cmd.id);
625 	if (IS_ERR(ctx))
626 		return PTR_ERR(ctx);
627 
628 	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
629 	ucma_put_ctx(ctx);
630 	return ret;
631 }
632 
633 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
634 			 int in_len, int out_len)
635 {
636 	struct rdma_ucm_bind cmd;
637 	struct sockaddr *addr;
638 	struct ucma_context *ctx;
639 	int ret;
640 
641 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
642 		return -EFAULT;
643 
644 	addr = (struct sockaddr *) &cmd.addr;
645 	if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
646 		return -EINVAL;
647 
648 	ctx = ucma_get_ctx(file, cmd.id);
649 	if (IS_ERR(ctx))
650 		return PTR_ERR(ctx);
651 
652 	ret = rdma_bind_addr(ctx->cm_id, addr);
653 	ucma_put_ctx(ctx);
654 	return ret;
655 }
656 
657 static ssize_t ucma_resolve_ip(struct ucma_file *file,
658 			       const char __user *inbuf,
659 			       int in_len, int out_len)
660 {
661 	struct rdma_ucm_resolve_ip cmd;
662 	struct ucma_context *ctx;
663 	int ret;
664 
665 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
666 		return -EFAULT;
667 
668 	ctx = ucma_get_ctx(file, cmd.id);
669 	if (IS_ERR(ctx))
670 		return PTR_ERR(ctx);
671 
672 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
673 				(struct sockaddr *) &cmd.dst_addr,
674 				cmd.timeout_ms);
675 	ucma_put_ctx(ctx);
676 	return ret;
677 }
678 
679 static ssize_t ucma_resolve_addr(struct ucma_file *file,
680 				 const char __user *inbuf,
681 				 int in_len, int out_len)
682 {
683 	struct rdma_ucm_resolve_addr cmd;
684 	struct sockaddr *src, *dst;
685 	struct ucma_context *ctx;
686 	int ret;
687 
688 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
689 		return -EFAULT;
690 
691 	src = (struct sockaddr *) &cmd.src_addr;
692 	dst = (struct sockaddr *) &cmd.dst_addr;
693 	if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
694 	    !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
695 		return -EINVAL;
696 
697 	ctx = ucma_get_ctx(file, cmd.id);
698 	if (IS_ERR(ctx))
699 		return PTR_ERR(ctx);
700 
701 	ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
702 	ucma_put_ctx(ctx);
703 	return ret;
704 }
705 
706 static ssize_t ucma_resolve_route(struct ucma_file *file,
707 				  const char __user *inbuf,
708 				  int in_len, int out_len)
709 {
710 	struct rdma_ucm_resolve_route cmd;
711 	struct ucma_context *ctx;
712 	int ret;
713 
714 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
715 		return -EFAULT;
716 
717 	ctx = ucma_get_ctx(file, cmd.id);
718 	if (IS_ERR(ctx))
719 		return PTR_ERR(ctx);
720 
721 	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
722 	ucma_put_ctx(ctx);
723 	return ret;
724 }
725 
726 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
727 			       struct rdma_route *route)
728 {
729 	struct rdma_dev_addr *dev_addr;
730 
731 	resp->num_paths = route->num_paths;
732 	switch (route->num_paths) {
733 	case 0:
734 		dev_addr = &route->addr.dev_addr;
735 		rdma_addr_get_dgid(dev_addr,
736 				   (union ib_gid *) &resp->ib_route[0].dgid);
737 		rdma_addr_get_sgid(dev_addr,
738 				   (union ib_gid *) &resp->ib_route[0].sgid);
739 		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
740 		break;
741 	case 2:
742 		ib_copy_path_rec_to_user(&resp->ib_route[1],
743 					 &route->path_rec[1]);
744 		/* fall through */
745 	case 1:
746 		ib_copy_path_rec_to_user(&resp->ib_route[0],
747 					 &route->path_rec[0]);
748 		break;
749 	default:
750 		break;
751 	}
752 }
753 
754 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
755 				 struct rdma_route *route)
756 {
757 
758 	resp->num_paths = route->num_paths;
759 	switch (route->num_paths) {
760 	case 0:
761 		rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
762 			    (union ib_gid *)&resp->ib_route[0].dgid);
763 		rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
764 			    (union ib_gid *)&resp->ib_route[0].sgid);
765 		resp->ib_route[0].pkey = cpu_to_be16(0xffff);
766 		break;
767 	case 2:
768 		ib_copy_path_rec_to_user(&resp->ib_route[1],
769 					 &route->path_rec[1]);
770 		/* fall through */
771 	case 1:
772 		ib_copy_path_rec_to_user(&resp->ib_route[0],
773 					 &route->path_rec[0]);
774 		break;
775 	default:
776 		break;
777 	}
778 }
779 
780 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
781 			       struct rdma_route *route)
782 {
783 	struct rdma_dev_addr *dev_addr;
784 
785 	dev_addr = &route->addr.dev_addr;
786 	rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
787 	rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
788 }
789 
790 static ssize_t ucma_query_route(struct ucma_file *file,
791 				const char __user *inbuf,
792 				int in_len, int out_len)
793 {
794 	struct rdma_ucm_query cmd;
795 	struct rdma_ucm_query_route_resp resp;
796 	struct ucma_context *ctx;
797 	struct sockaddr *addr;
798 	int ret = 0;
799 
800 	if (out_len < sizeof(resp))
801 		return -ENOSPC;
802 
803 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
804 		return -EFAULT;
805 
806 	ctx = ucma_get_ctx(file, cmd.id);
807 	if (IS_ERR(ctx))
808 		return PTR_ERR(ctx);
809 
810 	memset(&resp, 0, sizeof resp);
811 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
812 	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
813 				     sizeof(struct sockaddr_in) :
814 				     sizeof(struct sockaddr_in6));
815 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
816 	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
817 				     sizeof(struct sockaddr_in) :
818 				     sizeof(struct sockaddr_in6));
819 	if (!ctx->cm_id->device)
820 		goto out;
821 
822 	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
823 	resp.port_num = ctx->cm_id->port_num;
824 
825 	if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
826 		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
827 	else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
828 		ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
829 	else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
830 		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
831 
832 out:
833 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
834 			 &resp, sizeof(resp)))
835 		ret = -EFAULT;
836 
837 	ucma_put_ctx(ctx);
838 	return ret;
839 }
840 
841 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
842 				   struct rdma_ucm_query_addr_resp *resp)
843 {
844 	if (!cm_id->device)
845 		return;
846 
847 	resp->node_guid = (__force __u64) cm_id->device->node_guid;
848 	resp->port_num = cm_id->port_num;
849 	resp->pkey = (__force __u16) cpu_to_be16(
850 		     ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
851 }
852 
853 static ssize_t ucma_query_addr(struct ucma_context *ctx,
854 			       void __user *response, int out_len)
855 {
856 	struct rdma_ucm_query_addr_resp resp;
857 	struct sockaddr *addr;
858 	int ret = 0;
859 
860 	if (out_len < sizeof(resp))
861 		return -ENOSPC;
862 
863 	memset(&resp, 0, sizeof resp);
864 
865 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
866 	resp.src_size = rdma_addr_size(addr);
867 	memcpy(&resp.src_addr, addr, resp.src_size);
868 
869 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
870 	resp.dst_size = rdma_addr_size(addr);
871 	memcpy(&resp.dst_addr, addr, resp.dst_size);
872 
873 	ucma_query_device_addr(ctx->cm_id, &resp);
874 
875 	if (copy_to_user(response, &resp, sizeof(resp)))
876 		ret = -EFAULT;
877 
878 	return ret;
879 }
880 
881 static ssize_t ucma_query_path(struct ucma_context *ctx,
882 			       void __user *response, int out_len)
883 {
884 	struct rdma_ucm_query_path_resp *resp;
885 	int i, ret = 0;
886 
887 	if (out_len < sizeof(*resp))
888 		return -ENOSPC;
889 
890 	resp = kzalloc(out_len, GFP_KERNEL);
891 	if (!resp)
892 		return -ENOMEM;
893 
894 	resp->num_paths = ctx->cm_id->route.num_paths;
895 	for (i = 0, out_len -= sizeof(*resp);
896 	     i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
897 	     i++, out_len -= sizeof(struct ib_path_rec_data)) {
898 
899 		resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
900 					   IB_PATH_BIDIRECTIONAL;
901 		ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
902 				&resp->path_data[i].path_rec);
903 	}
904 
905 	if (copy_to_user(response, resp,
906 			 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
907 		ret = -EFAULT;
908 
909 	kfree(resp);
910 	return ret;
911 }
912 
913 static ssize_t ucma_query_gid(struct ucma_context *ctx,
914 			      void __user *response, int out_len)
915 {
916 	struct rdma_ucm_query_addr_resp resp;
917 	struct sockaddr_ib *addr;
918 	int ret = 0;
919 
920 	if (out_len < sizeof(resp))
921 		return -ENOSPC;
922 
923 	memset(&resp, 0, sizeof resp);
924 
925 	ucma_query_device_addr(ctx->cm_id, &resp);
926 
927 	addr = (struct sockaddr_ib *) &resp.src_addr;
928 	resp.src_size = sizeof(*addr);
929 	if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
930 		memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
931 	} else {
932 		addr->sib_family = AF_IB;
933 		addr->sib_pkey = (__force __be16) resp.pkey;
934 		rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
935 				   (union ib_gid *) &addr->sib_addr);
936 		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
937 						    &ctx->cm_id->route.addr.src_addr);
938 	}
939 
940 	addr = (struct sockaddr_ib *) &resp.dst_addr;
941 	resp.dst_size = sizeof(*addr);
942 	if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
943 		memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
944 	} else {
945 		addr->sib_family = AF_IB;
946 		addr->sib_pkey = (__force __be16) resp.pkey;
947 		rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
948 				   (union ib_gid *) &addr->sib_addr);
949 		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
950 						    &ctx->cm_id->route.addr.dst_addr);
951 	}
952 
953 	if (copy_to_user(response, &resp, sizeof(resp)))
954 		ret = -EFAULT;
955 
956 	return ret;
957 }
958 
959 static ssize_t ucma_query(struct ucma_file *file,
960 			  const char __user *inbuf,
961 			  int in_len, int out_len)
962 {
963 	struct rdma_ucm_query cmd;
964 	struct ucma_context *ctx;
965 	void __user *response;
966 	int ret;
967 
968 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
969 		return -EFAULT;
970 
971 	response = (void __user *)(unsigned long) cmd.response;
972 	ctx = ucma_get_ctx(file, cmd.id);
973 	if (IS_ERR(ctx))
974 		return PTR_ERR(ctx);
975 
976 	switch (cmd.option) {
977 	case RDMA_USER_CM_QUERY_ADDR:
978 		ret = ucma_query_addr(ctx, response, out_len);
979 		break;
980 	case RDMA_USER_CM_QUERY_PATH:
981 		ret = ucma_query_path(ctx, response, out_len);
982 		break;
983 	case RDMA_USER_CM_QUERY_GID:
984 		ret = ucma_query_gid(ctx, response, out_len);
985 		break;
986 	default:
987 		ret = -ENOSYS;
988 		break;
989 	}
990 
991 	ucma_put_ctx(ctx);
992 	return ret;
993 }
994 
995 static void ucma_copy_conn_param(struct rdma_cm_id *id,
996 				 struct rdma_conn_param *dst,
997 				 struct rdma_ucm_conn_param *src)
998 {
999 	dst->private_data = src->private_data;
1000 	dst->private_data_len = src->private_data_len;
1001 	dst->responder_resources =src->responder_resources;
1002 	dst->initiator_depth = src->initiator_depth;
1003 	dst->flow_control = src->flow_control;
1004 	dst->retry_count = src->retry_count;
1005 	dst->rnr_retry_count = src->rnr_retry_count;
1006 	dst->srq = src->srq;
1007 	dst->qp_num = src->qp_num;
1008 	dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1009 }
1010 
1011 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1012 			    int in_len, int out_len)
1013 {
1014 	struct rdma_ucm_connect cmd;
1015 	struct rdma_conn_param conn_param;
1016 	struct ucma_context *ctx;
1017 	int ret;
1018 
1019 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1020 		return -EFAULT;
1021 
1022 	if (!cmd.conn_param.valid)
1023 		return -EINVAL;
1024 
1025 	ctx = ucma_get_ctx(file, cmd.id);
1026 	if (IS_ERR(ctx))
1027 		return PTR_ERR(ctx);
1028 
1029 	ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1030 	ret = rdma_connect(ctx->cm_id, &conn_param);
1031 	ucma_put_ctx(ctx);
1032 	return ret;
1033 }
1034 
1035 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1036 			   int in_len, int out_len)
1037 {
1038 	struct rdma_ucm_listen cmd;
1039 	struct ucma_context *ctx;
1040 	int ret;
1041 
1042 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1043 		return -EFAULT;
1044 
1045 	ctx = ucma_get_ctx(file, cmd.id);
1046 	if (IS_ERR(ctx))
1047 		return PTR_ERR(ctx);
1048 
1049 	ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1050 		       cmd.backlog : max_backlog;
1051 	ret = rdma_listen(ctx->cm_id, ctx->backlog);
1052 	ucma_put_ctx(ctx);
1053 	return ret;
1054 }
1055 
1056 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1057 			   int in_len, int out_len)
1058 {
1059 	struct rdma_ucm_accept cmd;
1060 	struct rdma_conn_param conn_param;
1061 	struct ucma_context *ctx;
1062 	int ret;
1063 
1064 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1065 		return -EFAULT;
1066 
1067 	ctx = ucma_get_ctx(file, cmd.id);
1068 	if (IS_ERR(ctx))
1069 		return PTR_ERR(ctx);
1070 
1071 	if (cmd.conn_param.valid) {
1072 		ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1073 		mutex_lock(&file->mut);
1074 		ret = rdma_accept(ctx->cm_id, &conn_param);
1075 		if (!ret)
1076 			ctx->uid = cmd.uid;
1077 		mutex_unlock(&file->mut);
1078 	} else
1079 		ret = rdma_accept(ctx->cm_id, NULL);
1080 
1081 	ucma_put_ctx(ctx);
1082 	return ret;
1083 }
1084 
1085 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1086 			   int in_len, int out_len)
1087 {
1088 	struct rdma_ucm_reject cmd;
1089 	struct ucma_context *ctx;
1090 	int ret;
1091 
1092 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1093 		return -EFAULT;
1094 
1095 	ctx = ucma_get_ctx(file, cmd.id);
1096 	if (IS_ERR(ctx))
1097 		return PTR_ERR(ctx);
1098 
1099 	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1100 	ucma_put_ctx(ctx);
1101 	return ret;
1102 }
1103 
1104 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1105 			       int in_len, int out_len)
1106 {
1107 	struct rdma_ucm_disconnect cmd;
1108 	struct ucma_context *ctx;
1109 	int ret;
1110 
1111 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1112 		return -EFAULT;
1113 
1114 	ctx = ucma_get_ctx(file, cmd.id);
1115 	if (IS_ERR(ctx))
1116 		return PTR_ERR(ctx);
1117 
1118 	ret = rdma_disconnect(ctx->cm_id);
1119 	ucma_put_ctx(ctx);
1120 	return ret;
1121 }
1122 
1123 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1124 				 const char __user *inbuf,
1125 				 int in_len, int out_len)
1126 {
1127 	struct rdma_ucm_init_qp_attr cmd;
1128 	struct ib_uverbs_qp_attr resp;
1129 	struct ucma_context *ctx;
1130 	struct ib_qp_attr qp_attr;
1131 	int ret;
1132 
1133 	if (out_len < sizeof(resp))
1134 		return -ENOSPC;
1135 
1136 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1137 		return -EFAULT;
1138 
1139 	ctx = ucma_get_ctx(file, cmd.id);
1140 	if (IS_ERR(ctx))
1141 		return PTR_ERR(ctx);
1142 
1143 	resp.qp_attr_mask = 0;
1144 	memset(&qp_attr, 0, sizeof qp_attr);
1145 	qp_attr.qp_state = cmd.qp_state;
1146 	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1147 	if (ret)
1148 		goto out;
1149 
1150 	ib_copy_qp_attr_to_user(&resp, &qp_attr);
1151 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1152 			 &resp, sizeof(resp)))
1153 		ret = -EFAULT;
1154 
1155 out:
1156 	ucma_put_ctx(ctx);
1157 	return ret;
1158 }
1159 
1160 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1161 			      void *optval, size_t optlen)
1162 {
1163 	int ret = 0;
1164 
1165 	switch (optname) {
1166 	case RDMA_OPTION_ID_TOS:
1167 		if (optlen != sizeof(u8)) {
1168 			ret = -EINVAL;
1169 			break;
1170 		}
1171 		rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1172 		break;
1173 	case RDMA_OPTION_ID_REUSEADDR:
1174 		if (optlen != sizeof(int)) {
1175 			ret = -EINVAL;
1176 			break;
1177 		}
1178 		ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1179 		break;
1180 	case RDMA_OPTION_ID_AFONLY:
1181 		if (optlen != sizeof(int)) {
1182 			ret = -EINVAL;
1183 			break;
1184 		}
1185 		ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1186 		break;
1187 	default:
1188 		ret = -ENOSYS;
1189 	}
1190 
1191 	return ret;
1192 }
1193 
1194 static int ucma_set_ib_path(struct ucma_context *ctx,
1195 			    struct ib_path_rec_data *path_data, size_t optlen)
1196 {
1197 	struct ib_sa_path_rec sa_path;
1198 	struct rdma_cm_event event;
1199 	int ret;
1200 
1201 	if (optlen % sizeof(*path_data))
1202 		return -EINVAL;
1203 
1204 	for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1205 		if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1206 					 IB_PATH_BIDIRECTIONAL))
1207 			break;
1208 	}
1209 
1210 	if (!optlen)
1211 		return -EINVAL;
1212 
1213 	memset(&sa_path, 0, sizeof(sa_path));
1214 	sa_path.vlan_id = 0xffff;
1215 
1216 	ib_sa_unpack_path(path_data->path_rec, &sa_path);
1217 	ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1218 	if (ret)
1219 		return ret;
1220 
1221 	memset(&event, 0, sizeof event);
1222 	event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1223 	return ucma_event_handler(ctx->cm_id, &event);
1224 }
1225 
1226 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1227 			      void *optval, size_t optlen)
1228 {
1229 	int ret;
1230 
1231 	switch (optname) {
1232 	case RDMA_OPTION_IB_PATH:
1233 		ret = ucma_set_ib_path(ctx, optval, optlen);
1234 		break;
1235 	default:
1236 		ret = -ENOSYS;
1237 	}
1238 
1239 	return ret;
1240 }
1241 
1242 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1243 				 int optname, void *optval, size_t optlen)
1244 {
1245 	int ret;
1246 
1247 	switch (level) {
1248 	case RDMA_OPTION_ID:
1249 		ret = ucma_set_option_id(ctx, optname, optval, optlen);
1250 		break;
1251 	case RDMA_OPTION_IB:
1252 		ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1253 		break;
1254 	default:
1255 		ret = -ENOSYS;
1256 	}
1257 
1258 	return ret;
1259 }
1260 
1261 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1262 			       int in_len, int out_len)
1263 {
1264 	struct rdma_ucm_set_option cmd;
1265 	struct ucma_context *ctx;
1266 	void *optval;
1267 	int ret;
1268 
1269 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1270 		return -EFAULT;
1271 
1272 	ctx = ucma_get_ctx(file, cmd.id);
1273 	if (IS_ERR(ctx))
1274 		return PTR_ERR(ctx);
1275 
1276 	optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1277 			     cmd.optlen);
1278 	if (IS_ERR(optval)) {
1279 		ret = PTR_ERR(optval);
1280 		goto out;
1281 	}
1282 
1283 	ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1284 				    cmd.optlen);
1285 	kfree(optval);
1286 
1287 out:
1288 	ucma_put_ctx(ctx);
1289 	return ret;
1290 }
1291 
1292 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1293 			   int in_len, int out_len)
1294 {
1295 	struct rdma_ucm_notify cmd;
1296 	struct ucma_context *ctx;
1297 	int ret;
1298 
1299 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1300 		return -EFAULT;
1301 
1302 	ctx = ucma_get_ctx(file, cmd.id);
1303 	if (IS_ERR(ctx))
1304 		return PTR_ERR(ctx);
1305 
1306 	ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1307 	ucma_put_ctx(ctx);
1308 	return ret;
1309 }
1310 
1311 static ssize_t ucma_process_join(struct ucma_file *file,
1312 				 struct rdma_ucm_join_mcast *cmd,  int out_len)
1313 {
1314 	struct rdma_ucm_create_id_resp resp;
1315 	struct ucma_context *ctx;
1316 	struct ucma_multicast *mc;
1317 	struct sockaddr *addr;
1318 	int ret;
1319 
1320 	if (out_len < sizeof(resp))
1321 		return -ENOSPC;
1322 
1323 	addr = (struct sockaddr *) &cmd->addr;
1324 	if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1325 		return -EINVAL;
1326 
1327 	ctx = ucma_get_ctx(file, cmd->id);
1328 	if (IS_ERR(ctx))
1329 		return PTR_ERR(ctx);
1330 
1331 	mutex_lock(&file->mut);
1332 	mc = ucma_alloc_multicast(ctx);
1333 	if (!mc) {
1334 		ret = -ENOMEM;
1335 		goto err1;
1336 	}
1337 
1338 	mc->uid = cmd->uid;
1339 	memcpy(&mc->addr, addr, cmd->addr_size);
1340 	ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1341 	if (ret)
1342 		goto err2;
1343 
1344 	resp.id = mc->id;
1345 	if (copy_to_user((void __user *)(unsigned long) cmd->response,
1346 			 &resp, sizeof(resp))) {
1347 		ret = -EFAULT;
1348 		goto err3;
1349 	}
1350 
1351 	mutex_unlock(&file->mut);
1352 	ucma_put_ctx(ctx);
1353 	return 0;
1354 
1355 err3:
1356 	rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1357 	ucma_cleanup_mc_events(mc);
1358 err2:
1359 	mutex_lock(&mut);
1360 	idr_remove(&multicast_idr, mc->id);
1361 	mutex_unlock(&mut);
1362 	list_del(&mc->list);
1363 	kfree(mc);
1364 err1:
1365 	mutex_unlock(&file->mut);
1366 	ucma_put_ctx(ctx);
1367 	return ret;
1368 }
1369 
1370 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1371 				      const char __user *inbuf,
1372 				      int in_len, int out_len)
1373 {
1374 	struct rdma_ucm_join_ip_mcast cmd;
1375 	struct rdma_ucm_join_mcast join_cmd;
1376 
1377 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1378 		return -EFAULT;
1379 
1380 	join_cmd.response = cmd.response;
1381 	join_cmd.uid = cmd.uid;
1382 	join_cmd.id = cmd.id;
1383 	join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1384 	join_cmd.reserved = 0;
1385 	memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1386 
1387 	return ucma_process_join(file, &join_cmd, out_len);
1388 }
1389 
1390 static ssize_t ucma_join_multicast(struct ucma_file *file,
1391 				   const char __user *inbuf,
1392 				   int in_len, int out_len)
1393 {
1394 	struct rdma_ucm_join_mcast cmd;
1395 
1396 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1397 		return -EFAULT;
1398 
1399 	return ucma_process_join(file, &cmd, out_len);
1400 }
1401 
1402 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1403 				    const char __user *inbuf,
1404 				    int in_len, int out_len)
1405 {
1406 	struct rdma_ucm_destroy_id cmd;
1407 	struct rdma_ucm_destroy_id_resp resp;
1408 	struct ucma_multicast *mc;
1409 	int ret = 0;
1410 
1411 	if (out_len < sizeof(resp))
1412 		return -ENOSPC;
1413 
1414 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1415 		return -EFAULT;
1416 
1417 	mutex_lock(&mut);
1418 	mc = idr_find(&multicast_idr, cmd.id);
1419 	if (!mc)
1420 		mc = ERR_PTR(-ENOENT);
1421 	else if (mc->ctx->file != file)
1422 		mc = ERR_PTR(-EINVAL);
1423 	else if (!atomic_inc_not_zero(&mc->ctx->ref))
1424 		mc = ERR_PTR(-ENXIO);
1425 	else
1426 		idr_remove(&multicast_idr, mc->id);
1427 	mutex_unlock(&mut);
1428 
1429 	if (IS_ERR(mc)) {
1430 		ret = PTR_ERR(mc);
1431 		goto out;
1432 	}
1433 
1434 	rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1435 	mutex_lock(&mc->ctx->file->mut);
1436 	ucma_cleanup_mc_events(mc);
1437 	list_del(&mc->list);
1438 	mutex_unlock(&mc->ctx->file->mut);
1439 
1440 	ucma_put_ctx(mc->ctx);
1441 	resp.events_reported = mc->events_reported;
1442 	kfree(mc);
1443 
1444 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1445 			 &resp, sizeof(resp)))
1446 		ret = -EFAULT;
1447 out:
1448 	return ret;
1449 }
1450 
1451 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1452 {
1453 	/* Acquire mutex's based on pointer comparison to prevent deadlock. */
1454 	if (file1 < file2) {
1455 		mutex_lock(&file1->mut);
1456 		mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1457 	} else {
1458 		mutex_lock(&file2->mut);
1459 		mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1460 	}
1461 }
1462 
1463 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1464 {
1465 	if (file1 < file2) {
1466 		mutex_unlock(&file2->mut);
1467 		mutex_unlock(&file1->mut);
1468 	} else {
1469 		mutex_unlock(&file1->mut);
1470 		mutex_unlock(&file2->mut);
1471 	}
1472 }
1473 
1474 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1475 {
1476 	struct ucma_event *uevent, *tmp;
1477 
1478 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1479 		if (uevent->ctx == ctx)
1480 			list_move_tail(&uevent->list, &file->event_list);
1481 }
1482 
1483 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1484 			       const char __user *inbuf,
1485 			       int in_len, int out_len)
1486 {
1487 	struct rdma_ucm_migrate_id cmd;
1488 	struct rdma_ucm_migrate_resp resp;
1489 	struct ucma_context *ctx;
1490 	struct fd f;
1491 	struct ucma_file *cur_file;
1492 	int ret = 0;
1493 
1494 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1495 		return -EFAULT;
1496 
1497 	/* Get current fd to protect against it being closed */
1498 	f = fdget(cmd.fd);
1499 	if (!f.file)
1500 		return -ENOENT;
1501 
1502 	/* Validate current fd and prevent destruction of id. */
1503 	ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1504 	if (IS_ERR(ctx)) {
1505 		ret = PTR_ERR(ctx);
1506 		goto file_put;
1507 	}
1508 
1509 	cur_file = ctx->file;
1510 	if (cur_file == new_file) {
1511 		resp.events_reported = ctx->events_reported;
1512 		goto response;
1513 	}
1514 
1515 	/*
1516 	 * Migrate events between fd's, maintaining order, and avoiding new
1517 	 * events being added before existing events.
1518 	 */
1519 	ucma_lock_files(cur_file, new_file);
1520 	mutex_lock(&mut);
1521 
1522 	list_move_tail(&ctx->list, &new_file->ctx_list);
1523 	ucma_move_events(ctx, new_file);
1524 	ctx->file = new_file;
1525 	resp.events_reported = ctx->events_reported;
1526 
1527 	mutex_unlock(&mut);
1528 	ucma_unlock_files(cur_file, new_file);
1529 
1530 response:
1531 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1532 			 &resp, sizeof(resp)))
1533 		ret = -EFAULT;
1534 
1535 	ucma_put_ctx(ctx);
1536 file_put:
1537 	fdput(f);
1538 	return ret;
1539 }
1540 
1541 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1542 				   const char __user *inbuf,
1543 				   int in_len, int out_len) = {
1544 	[RDMA_USER_CM_CMD_CREATE_ID] 	 = ucma_create_id,
1545 	[RDMA_USER_CM_CMD_DESTROY_ID]	 = ucma_destroy_id,
1546 	[RDMA_USER_CM_CMD_BIND_IP]	 = ucma_bind_ip,
1547 	[RDMA_USER_CM_CMD_RESOLVE_IP]	 = ucma_resolve_ip,
1548 	[RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1549 	[RDMA_USER_CM_CMD_QUERY_ROUTE]	 = ucma_query_route,
1550 	[RDMA_USER_CM_CMD_CONNECT]	 = ucma_connect,
1551 	[RDMA_USER_CM_CMD_LISTEN]	 = ucma_listen,
1552 	[RDMA_USER_CM_CMD_ACCEPT]	 = ucma_accept,
1553 	[RDMA_USER_CM_CMD_REJECT]	 = ucma_reject,
1554 	[RDMA_USER_CM_CMD_DISCONNECT]	 = ucma_disconnect,
1555 	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	 = ucma_init_qp_attr,
1556 	[RDMA_USER_CM_CMD_GET_EVENT]	 = ucma_get_event,
1557 	[RDMA_USER_CM_CMD_GET_OPTION]	 = NULL,
1558 	[RDMA_USER_CM_CMD_SET_OPTION]	 = ucma_set_option,
1559 	[RDMA_USER_CM_CMD_NOTIFY]	 = ucma_notify,
1560 	[RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1561 	[RDMA_USER_CM_CMD_LEAVE_MCAST]	 = ucma_leave_multicast,
1562 	[RDMA_USER_CM_CMD_MIGRATE_ID]	 = ucma_migrate_id,
1563 	[RDMA_USER_CM_CMD_QUERY]	 = ucma_query,
1564 	[RDMA_USER_CM_CMD_BIND]		 = ucma_bind,
1565 	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	 = ucma_resolve_addr,
1566 	[RDMA_USER_CM_CMD_JOIN_MCAST]	 = ucma_join_multicast
1567 };
1568 
1569 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1570 			  size_t len, loff_t *pos)
1571 {
1572 	struct ucma_file *file = filp->private_data;
1573 	struct rdma_ucm_cmd_hdr hdr;
1574 	ssize_t ret;
1575 
1576 	if (len < sizeof(hdr))
1577 		return -EINVAL;
1578 
1579 	if (copy_from_user(&hdr, buf, sizeof(hdr)))
1580 		return -EFAULT;
1581 
1582 	if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1583 		return -EINVAL;
1584 
1585 	if (hdr.in + sizeof(hdr) > len)
1586 		return -EINVAL;
1587 
1588 	if (!ucma_cmd_table[hdr.cmd])
1589 		return -ENOSYS;
1590 
1591 	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1592 	if (!ret)
1593 		ret = len;
1594 
1595 	return ret;
1596 }
1597 
1598 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1599 {
1600 	struct ucma_file *file = filp->private_data;
1601 	unsigned int mask = 0;
1602 
1603 	poll_wait(filp, &file->poll_wait, wait);
1604 
1605 	if (!list_empty(&file->event_list))
1606 		mask = POLLIN | POLLRDNORM;
1607 
1608 	return mask;
1609 }
1610 
1611 /*
1612  * ucma_open() does not need the BKL:
1613  *
1614  *  - no global state is referred to;
1615  *  - there is no ioctl method to race against;
1616  *  - no further module initialization is required for open to work
1617  *    after the device is registered.
1618  */
1619 static int ucma_open(struct inode *inode, struct file *filp)
1620 {
1621 	struct ucma_file *file;
1622 
1623 	file = kmalloc(sizeof *file, GFP_KERNEL);
1624 	if (!file)
1625 		return -ENOMEM;
1626 
1627 	file->close_wq = create_singlethread_workqueue("ucma_close_id");
1628 	if (!file->close_wq) {
1629 		kfree(file);
1630 		return -ENOMEM;
1631 	}
1632 
1633 	INIT_LIST_HEAD(&file->event_list);
1634 	INIT_LIST_HEAD(&file->ctx_list);
1635 	init_waitqueue_head(&file->poll_wait);
1636 	mutex_init(&file->mut);
1637 
1638 	filp->private_data = file;
1639 	file->filp = filp;
1640 
1641 	return nonseekable_open(inode, filp);
1642 }
1643 
1644 static int ucma_close(struct inode *inode, struct file *filp)
1645 {
1646 	struct ucma_file *file = filp->private_data;
1647 	struct ucma_context *ctx, *tmp;
1648 
1649 	mutex_lock(&file->mut);
1650 	list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1651 		ctx->destroying = 1;
1652 		mutex_unlock(&file->mut);
1653 
1654 		mutex_lock(&mut);
1655 		idr_remove(&ctx_idr, ctx->id);
1656 		mutex_unlock(&mut);
1657 
1658 		flush_workqueue(file->close_wq);
1659 		/* At that step once ctx was marked as destroying and workqueue
1660 		 * was flushed we are safe from any inflights handlers that
1661 		 * might put other closing task.
1662 		 */
1663 		mutex_lock(&mut);
1664 		if (!ctx->closing) {
1665 			mutex_unlock(&mut);
1666 			/* rdma_destroy_id ensures that no event handlers are
1667 			 * inflight for that id before releasing it.
1668 			 */
1669 			rdma_destroy_id(ctx->cm_id);
1670 		} else {
1671 			mutex_unlock(&mut);
1672 		}
1673 
1674 		ucma_free_ctx(ctx);
1675 		mutex_lock(&file->mut);
1676 	}
1677 	mutex_unlock(&file->mut);
1678 	destroy_workqueue(file->close_wq);
1679 	kfree(file);
1680 	return 0;
1681 }
1682 
1683 static const struct file_operations ucma_fops = {
1684 	.owner 	 = THIS_MODULE,
1685 	.open 	 = ucma_open,
1686 	.release = ucma_close,
1687 	.write	 = ucma_write,
1688 	.poll    = ucma_poll,
1689 	.llseek	 = no_llseek,
1690 };
1691 
1692 static struct miscdevice ucma_misc = {
1693 	.minor		= MISC_DYNAMIC_MINOR,
1694 	.name		= "rdma_cm",
1695 	.nodename	= "infiniband/rdma_cm",
1696 	.mode		= 0666,
1697 	.fops		= &ucma_fops,
1698 };
1699 
1700 static ssize_t show_abi_version(struct device *dev,
1701 				struct device_attribute *attr,
1702 				char *buf)
1703 {
1704 	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1705 }
1706 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1707 
1708 static int __init ucma_init(void)
1709 {
1710 	int ret;
1711 
1712 	ret = misc_register(&ucma_misc);
1713 	if (ret)
1714 		return ret;
1715 
1716 	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1717 	if (ret) {
1718 		printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1719 		goto err1;
1720 	}
1721 
1722 	ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1723 	if (!ucma_ctl_table_hdr) {
1724 		printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1725 		ret = -ENOMEM;
1726 		goto err2;
1727 	}
1728 	return 0;
1729 err2:
1730 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1731 err1:
1732 	misc_deregister(&ucma_misc);
1733 	return ret;
1734 }
1735 
1736 static void __exit ucma_cleanup(void)
1737 {
1738 	unregister_net_sysctl_table(ucma_ctl_table_hdr);
1739 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1740 	misc_deregister(&ucma_misc);
1741 	idr_destroy(&ctx_idr);
1742 	idr_destroy(&multicast_idr);
1743 }
1744 
1745 module_init(ucma_init);
1746 module_exit(ucma_cleanup);
1747