xref: /linux/drivers/infiniband/core/ucma.c (revision b0148a98ec5151fec82064d95f11eb9efbc628ea)
1 /*
2  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	copyright notice, this list of conditions and the following
16  *	disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	copyright notice, this list of conditions and the following
20  *	disclaimer in the documentation and/or other materials
21  *	provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/completion.h>
34 #include <linux/mutex.h>
35 #include <linux/poll.h>
36 #include <linux/idr.h>
37 #include <linux/in.h>
38 #include <linux/in6.h>
39 #include <linux/miscdevice.h>
40 
41 #include <rdma/rdma_user_cm.h>
42 #include <rdma/ib_marshall.h>
43 #include <rdma/rdma_cm.h>
44 
45 MODULE_AUTHOR("Sean Hefty");
46 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
47 MODULE_LICENSE("Dual BSD/GPL");
48 
49 enum {
50 	UCMA_MAX_BACKLOG	= 128
51 };
52 
53 struct ucma_file {
54 	struct mutex		mut;
55 	struct file		*filp;
56 	struct list_head	ctx_list;
57 	struct list_head	event_list;
58 	wait_queue_head_t	poll_wait;
59 };
60 
61 struct ucma_context {
62 	int			id;
63 	struct completion	comp;
64 	atomic_t		ref;
65 	int			events_reported;
66 	int			backlog;
67 
68 	struct ucma_file	*file;
69 	struct rdma_cm_id	*cm_id;
70 	u64			uid;
71 
72 	struct list_head	list;
73 };
74 
75 struct ucma_event {
76 	struct ucma_context	*ctx;
77 	struct list_head	list;
78 	struct rdma_cm_id	*cm_id;
79 	struct rdma_ucm_event_resp resp;
80 };
81 
82 static DEFINE_MUTEX(mut);
83 static DEFINE_IDR(ctx_idr);
84 
85 static inline struct ucma_context *_ucma_find_context(int id,
86 						      struct ucma_file *file)
87 {
88 	struct ucma_context *ctx;
89 
90 	ctx = idr_find(&ctx_idr, id);
91 	if (!ctx)
92 		ctx = ERR_PTR(-ENOENT);
93 	else if (ctx->file != file)
94 		ctx = ERR_PTR(-EINVAL);
95 	return ctx;
96 }
97 
98 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
99 {
100 	struct ucma_context *ctx;
101 
102 	mutex_lock(&mut);
103 	ctx = _ucma_find_context(id, file);
104 	if (!IS_ERR(ctx))
105 		atomic_inc(&ctx->ref);
106 	mutex_unlock(&mut);
107 	return ctx;
108 }
109 
110 static void ucma_put_ctx(struct ucma_context *ctx)
111 {
112 	if (atomic_dec_and_test(&ctx->ref))
113 		complete(&ctx->comp);
114 }
115 
116 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
117 {
118 	struct ucma_context *ctx;
119 	int ret;
120 
121 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
122 	if (!ctx)
123 		return NULL;
124 
125 	atomic_set(&ctx->ref, 1);
126 	init_completion(&ctx->comp);
127 	ctx->file = file;
128 
129 	do {
130 		ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
131 		if (!ret)
132 			goto error;
133 
134 		mutex_lock(&mut);
135 		ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
136 		mutex_unlock(&mut);
137 	} while (ret == -EAGAIN);
138 
139 	if (ret)
140 		goto error;
141 
142 	list_add_tail(&ctx->list, &file->ctx_list);
143 	return ctx;
144 
145 error:
146 	kfree(ctx);
147 	return NULL;
148 }
149 
150 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
151 				 struct rdma_conn_param *src)
152 {
153 	if (src->private_data_len)
154 		memcpy(dst->private_data, src->private_data,
155 		       src->private_data_len);
156 	dst->private_data_len = src->private_data_len;
157 	dst->responder_resources =src->responder_resources;
158 	dst->initiator_depth = src->initiator_depth;
159 	dst->flow_control = src->flow_control;
160 	dst->retry_count = src->retry_count;
161 	dst->rnr_retry_count = src->rnr_retry_count;
162 	dst->srq = src->srq;
163 	dst->qp_num = src->qp_num;
164 }
165 
166 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
167 			       struct rdma_ud_param *src)
168 {
169 	if (src->private_data_len)
170 		memcpy(dst->private_data, src->private_data,
171 		       src->private_data_len);
172 	dst->private_data_len = src->private_data_len;
173 	ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
174 	dst->qp_num = src->qp_num;
175 	dst->qkey = src->qkey;
176 }
177 
178 static void ucma_set_event_context(struct ucma_context *ctx,
179 				   struct rdma_cm_event *event,
180 				   struct ucma_event *uevent)
181 {
182 	uevent->ctx = ctx;
183 	uevent->resp.uid = ctx->uid;
184 	uevent->resp.id = ctx->id;
185 }
186 
187 static int ucma_event_handler(struct rdma_cm_id *cm_id,
188 			      struct rdma_cm_event *event)
189 {
190 	struct ucma_event *uevent;
191 	struct ucma_context *ctx = cm_id->context;
192 	int ret = 0;
193 
194 	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
195 	if (!uevent)
196 		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
197 
198 	uevent->cm_id = cm_id;
199 	ucma_set_event_context(ctx, event, uevent);
200 	uevent->resp.event = event->event;
201 	uevent->resp.status = event->status;
202 	if (cm_id->ps == RDMA_PS_UDP)
203 		ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
204 	else
205 		ucma_copy_conn_event(&uevent->resp.param.conn,
206 				     &event->param.conn);
207 
208 	mutex_lock(&ctx->file->mut);
209 	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
210 		if (!ctx->backlog) {
211 			ret = -EDQUOT;
212 			kfree(uevent);
213 			goto out;
214 		}
215 		ctx->backlog--;
216 	} else if (!ctx->uid) {
217 		/*
218 		 * We ignore events for new connections until userspace has set
219 		 * their context.  This can only happen if an error occurs on a
220 		 * new connection before the user accepts it.  This is okay,
221 		 * since the accept will just fail later.
222 		 */
223 		kfree(uevent);
224 		goto out;
225 	}
226 
227 	list_add_tail(&uevent->list, &ctx->file->event_list);
228 	wake_up_interruptible(&ctx->file->poll_wait);
229 out:
230 	mutex_unlock(&ctx->file->mut);
231 	return ret;
232 }
233 
234 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
235 			      int in_len, int out_len)
236 {
237 	struct ucma_context *ctx;
238 	struct rdma_ucm_get_event cmd;
239 	struct ucma_event *uevent;
240 	int ret = 0;
241 	DEFINE_WAIT(wait);
242 
243 	if (out_len < sizeof uevent->resp)
244 		return -ENOSPC;
245 
246 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
247 		return -EFAULT;
248 
249 	mutex_lock(&file->mut);
250 	while (list_empty(&file->event_list)) {
251 		if (file->filp->f_flags & O_NONBLOCK) {
252 			ret = -EAGAIN;
253 			break;
254 		}
255 
256 		if (signal_pending(current)) {
257 			ret = -ERESTARTSYS;
258 			break;
259 		}
260 
261 		prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
262 		mutex_unlock(&file->mut);
263 		schedule();
264 		mutex_lock(&file->mut);
265 		finish_wait(&file->poll_wait, &wait);
266 	}
267 
268 	if (ret)
269 		goto done;
270 
271 	uevent = list_entry(file->event_list.next, struct ucma_event, list);
272 
273 	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
274 		ctx = ucma_alloc_ctx(file);
275 		if (!ctx) {
276 			ret = -ENOMEM;
277 			goto done;
278 		}
279 		uevent->ctx->backlog++;
280 		ctx->cm_id = uevent->cm_id;
281 		ctx->cm_id->context = ctx;
282 		uevent->resp.id = ctx->id;
283 	}
284 
285 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
286 			 &uevent->resp, sizeof uevent->resp)) {
287 		ret = -EFAULT;
288 		goto done;
289 	}
290 
291 	list_del(&uevent->list);
292 	uevent->ctx->events_reported++;
293 	kfree(uevent);
294 done:
295 	mutex_unlock(&file->mut);
296 	return ret;
297 }
298 
299 static ssize_t ucma_create_id(struct ucma_file *file,
300 				const char __user *inbuf,
301 				int in_len, int out_len)
302 {
303 	struct rdma_ucm_create_id cmd;
304 	struct rdma_ucm_create_id_resp resp;
305 	struct ucma_context *ctx;
306 	int ret;
307 
308 	if (out_len < sizeof(resp))
309 		return -ENOSPC;
310 
311 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
312 		return -EFAULT;
313 
314 	mutex_lock(&file->mut);
315 	ctx = ucma_alloc_ctx(file);
316 	mutex_unlock(&file->mut);
317 	if (!ctx)
318 		return -ENOMEM;
319 
320 	ctx->uid = cmd.uid;
321 	ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
322 	if (IS_ERR(ctx->cm_id)) {
323 		ret = PTR_ERR(ctx->cm_id);
324 		goto err1;
325 	}
326 
327 	resp.id = ctx->id;
328 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
329 			 &resp, sizeof(resp))) {
330 		ret = -EFAULT;
331 		goto err2;
332 	}
333 	return 0;
334 
335 err2:
336 	rdma_destroy_id(ctx->cm_id);
337 err1:
338 	mutex_lock(&mut);
339 	idr_remove(&ctx_idr, ctx->id);
340 	mutex_unlock(&mut);
341 	kfree(ctx);
342 	return ret;
343 }
344 
345 static void ucma_cleanup_events(struct ucma_context *ctx)
346 {
347 	struct ucma_event *uevent, *tmp;
348 
349 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
350 		if (uevent->ctx != ctx)
351 			continue;
352 
353 		list_del(&uevent->list);
354 
355 		/* clear incoming connections. */
356 		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
357 			rdma_destroy_id(uevent->cm_id);
358 
359 		kfree(uevent);
360 	}
361 }
362 
363 static int ucma_free_ctx(struct ucma_context *ctx)
364 {
365 	int events_reported;
366 
367 	/* No new events will be generated after destroying the id. */
368 	rdma_destroy_id(ctx->cm_id);
369 
370 	/* Cleanup events not yet reported to the user. */
371 	mutex_lock(&ctx->file->mut);
372 	ucma_cleanup_events(ctx);
373 	list_del(&ctx->list);
374 	mutex_unlock(&ctx->file->mut);
375 
376 	events_reported = ctx->events_reported;
377 	kfree(ctx);
378 	return events_reported;
379 }
380 
381 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
382 			       int in_len, int out_len)
383 {
384 	struct rdma_ucm_destroy_id cmd;
385 	struct rdma_ucm_destroy_id_resp resp;
386 	struct ucma_context *ctx;
387 	int ret = 0;
388 
389 	if (out_len < sizeof(resp))
390 		return -ENOSPC;
391 
392 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
393 		return -EFAULT;
394 
395 	mutex_lock(&mut);
396 	ctx = _ucma_find_context(cmd.id, file);
397 	if (!IS_ERR(ctx))
398 		idr_remove(&ctx_idr, ctx->id);
399 	mutex_unlock(&mut);
400 
401 	if (IS_ERR(ctx))
402 		return PTR_ERR(ctx);
403 
404 	ucma_put_ctx(ctx);
405 	wait_for_completion(&ctx->comp);
406 	resp.events_reported = ucma_free_ctx(ctx);
407 
408 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
409 			 &resp, sizeof(resp)))
410 		ret = -EFAULT;
411 
412 	return ret;
413 }
414 
415 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
416 			      int in_len, int out_len)
417 {
418 	struct rdma_ucm_bind_addr cmd;
419 	struct ucma_context *ctx;
420 	int ret;
421 
422 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
423 		return -EFAULT;
424 
425 	ctx = ucma_get_ctx(file, cmd.id);
426 	if (IS_ERR(ctx))
427 		return PTR_ERR(ctx);
428 
429 	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
430 	ucma_put_ctx(ctx);
431 	return ret;
432 }
433 
434 static ssize_t ucma_resolve_addr(struct ucma_file *file,
435 				 const char __user *inbuf,
436 				 int in_len, int out_len)
437 {
438 	struct rdma_ucm_resolve_addr cmd;
439 	struct ucma_context *ctx;
440 	int ret;
441 
442 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
443 		return -EFAULT;
444 
445 	ctx = ucma_get_ctx(file, cmd.id);
446 	if (IS_ERR(ctx))
447 		return PTR_ERR(ctx);
448 
449 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
450 				(struct sockaddr *) &cmd.dst_addr,
451 				cmd.timeout_ms);
452 	ucma_put_ctx(ctx);
453 	return ret;
454 }
455 
456 static ssize_t ucma_resolve_route(struct ucma_file *file,
457 				  const char __user *inbuf,
458 				  int in_len, int out_len)
459 {
460 	struct rdma_ucm_resolve_route cmd;
461 	struct ucma_context *ctx;
462 	int ret;
463 
464 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
465 		return -EFAULT;
466 
467 	ctx = ucma_get_ctx(file, cmd.id);
468 	if (IS_ERR(ctx))
469 		return PTR_ERR(ctx);
470 
471 	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
472 	ucma_put_ctx(ctx);
473 	return ret;
474 }
475 
476 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
477 			       struct rdma_route *route)
478 {
479 	struct rdma_dev_addr *dev_addr;
480 
481 	resp->num_paths = route->num_paths;
482 	switch (route->num_paths) {
483 	case 0:
484 		dev_addr = &route->addr.dev_addr;
485 		ib_addr_get_dgid(dev_addr,
486 				 (union ib_gid *) &resp->ib_route[0].dgid);
487 		ib_addr_get_sgid(dev_addr,
488 				 (union ib_gid *) &resp->ib_route[0].sgid);
489 		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
490 		break;
491 	case 2:
492 		ib_copy_path_rec_to_user(&resp->ib_route[1],
493 					 &route->path_rec[1]);
494 		/* fall through */
495 	case 1:
496 		ib_copy_path_rec_to_user(&resp->ib_route[0],
497 					 &route->path_rec[0]);
498 		break;
499 	default:
500 		break;
501 	}
502 }
503 
504 static ssize_t ucma_query_route(struct ucma_file *file,
505 				const char __user *inbuf,
506 				int in_len, int out_len)
507 {
508 	struct rdma_ucm_query_route cmd;
509 	struct rdma_ucm_query_route_resp resp;
510 	struct ucma_context *ctx;
511 	struct sockaddr *addr;
512 	int ret = 0;
513 
514 	if (out_len < sizeof(resp))
515 		return -ENOSPC;
516 
517 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
518 		return -EFAULT;
519 
520 	ctx = ucma_get_ctx(file, cmd.id);
521 	if (IS_ERR(ctx))
522 		return PTR_ERR(ctx);
523 
524 	memset(&resp, 0, sizeof resp);
525 	addr = &ctx->cm_id->route.addr.src_addr;
526 	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
527 				     sizeof(struct sockaddr_in) :
528 				     sizeof(struct sockaddr_in6));
529 	addr = &ctx->cm_id->route.addr.dst_addr;
530 	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
531 				     sizeof(struct sockaddr_in) :
532 				     sizeof(struct sockaddr_in6));
533 	if (!ctx->cm_id->device)
534 		goto out;
535 
536 	resp.node_guid = ctx->cm_id->device->node_guid;
537 	resp.port_num = ctx->cm_id->port_num;
538 	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
539 	case RDMA_TRANSPORT_IB:
540 		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
541 		break;
542 	default:
543 		break;
544 	}
545 
546 out:
547 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
548 			 &resp, sizeof(resp)))
549 		ret = -EFAULT;
550 
551 	ucma_put_ctx(ctx);
552 	return ret;
553 }
554 
555 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
556 				 struct rdma_ucm_conn_param *src)
557 {
558 	dst->private_data = src->private_data;
559 	dst->private_data_len = src->private_data_len;
560 	dst->responder_resources =src->responder_resources;
561 	dst->initiator_depth = src->initiator_depth;
562 	dst->flow_control = src->flow_control;
563 	dst->retry_count = src->retry_count;
564 	dst->rnr_retry_count = src->rnr_retry_count;
565 	dst->srq = src->srq;
566 	dst->qp_num = src->qp_num;
567 }
568 
569 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
570 			    int in_len, int out_len)
571 {
572 	struct rdma_ucm_connect cmd;
573 	struct rdma_conn_param conn_param;
574 	struct ucma_context *ctx;
575 	int ret;
576 
577 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
578 		return -EFAULT;
579 
580 	if (!cmd.conn_param.valid)
581 		return -EINVAL;
582 
583 	ctx = ucma_get_ctx(file, cmd.id);
584 	if (IS_ERR(ctx))
585 		return PTR_ERR(ctx);
586 
587 	ucma_copy_conn_param(&conn_param, &cmd.conn_param);
588 	ret = rdma_connect(ctx->cm_id, &conn_param);
589 	ucma_put_ctx(ctx);
590 	return ret;
591 }
592 
593 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
594 			   int in_len, int out_len)
595 {
596 	struct rdma_ucm_listen cmd;
597 	struct ucma_context *ctx;
598 	int ret;
599 
600 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
601 		return -EFAULT;
602 
603 	ctx = ucma_get_ctx(file, cmd.id);
604 	if (IS_ERR(ctx))
605 		return PTR_ERR(ctx);
606 
607 	ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
608 		       cmd.backlog : UCMA_MAX_BACKLOG;
609 	ret = rdma_listen(ctx->cm_id, ctx->backlog);
610 	ucma_put_ctx(ctx);
611 	return ret;
612 }
613 
614 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
615 			   int in_len, int out_len)
616 {
617 	struct rdma_ucm_accept cmd;
618 	struct rdma_conn_param conn_param;
619 	struct ucma_context *ctx;
620 	int ret;
621 
622 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
623 		return -EFAULT;
624 
625 	ctx = ucma_get_ctx(file, cmd.id);
626 	if (IS_ERR(ctx))
627 		return PTR_ERR(ctx);
628 
629 	if (cmd.conn_param.valid) {
630 		ctx->uid = cmd.uid;
631 		ucma_copy_conn_param(&conn_param, &cmd.conn_param);
632 		ret = rdma_accept(ctx->cm_id, &conn_param);
633 	} else
634 		ret = rdma_accept(ctx->cm_id, NULL);
635 
636 	ucma_put_ctx(ctx);
637 	return ret;
638 }
639 
640 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
641 			   int in_len, int out_len)
642 {
643 	struct rdma_ucm_reject cmd;
644 	struct ucma_context *ctx;
645 	int ret;
646 
647 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
648 		return -EFAULT;
649 
650 	ctx = ucma_get_ctx(file, cmd.id);
651 	if (IS_ERR(ctx))
652 		return PTR_ERR(ctx);
653 
654 	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
655 	ucma_put_ctx(ctx);
656 	return ret;
657 }
658 
659 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
660 			       int in_len, int out_len)
661 {
662 	struct rdma_ucm_disconnect cmd;
663 	struct ucma_context *ctx;
664 	int ret;
665 
666 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
667 		return -EFAULT;
668 
669 	ctx = ucma_get_ctx(file, cmd.id);
670 	if (IS_ERR(ctx))
671 		return PTR_ERR(ctx);
672 
673 	ret = rdma_disconnect(ctx->cm_id);
674 	ucma_put_ctx(ctx);
675 	return ret;
676 }
677 
678 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
679 				 const char __user *inbuf,
680 				 int in_len, int out_len)
681 {
682 	struct rdma_ucm_init_qp_attr cmd;
683 	struct ib_uverbs_qp_attr resp;
684 	struct ucma_context *ctx;
685 	struct ib_qp_attr qp_attr;
686 	int ret;
687 
688 	if (out_len < sizeof(resp))
689 		return -ENOSPC;
690 
691 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
692 		return -EFAULT;
693 
694 	ctx = ucma_get_ctx(file, cmd.id);
695 	if (IS_ERR(ctx))
696 		return PTR_ERR(ctx);
697 
698 	resp.qp_attr_mask = 0;
699 	memset(&qp_attr, 0, sizeof qp_attr);
700 	qp_attr.qp_state = cmd.qp_state;
701 	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
702 	if (ret)
703 		goto out;
704 
705 	ib_copy_qp_attr_to_user(&resp, &qp_attr);
706 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
707 			 &resp, sizeof(resp)))
708 		ret = -EFAULT;
709 
710 out:
711 	ucma_put_ctx(ctx);
712 	return ret;
713 }
714 
715 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
716 			   int in_len, int out_len)
717 {
718 	struct rdma_ucm_notify cmd;
719 	struct ucma_context *ctx;
720 	int ret;
721 
722 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
723 		return -EFAULT;
724 
725 	ctx = ucma_get_ctx(file, cmd.id);
726 	if (IS_ERR(ctx))
727 		return PTR_ERR(ctx);
728 
729 	ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
730 	ucma_put_ctx(ctx);
731 	return ret;
732 }
733 
734 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
735 				   const char __user *inbuf,
736 				   int in_len, int out_len) = {
737 	[RDMA_USER_CM_CMD_CREATE_ID]	= ucma_create_id,
738 	[RDMA_USER_CM_CMD_DESTROY_ID]	= ucma_destroy_id,
739 	[RDMA_USER_CM_CMD_BIND_ADDR]	= ucma_bind_addr,
740 	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	= ucma_resolve_addr,
741 	[RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
742 	[RDMA_USER_CM_CMD_QUERY_ROUTE]	= ucma_query_route,
743 	[RDMA_USER_CM_CMD_CONNECT]	= ucma_connect,
744 	[RDMA_USER_CM_CMD_LISTEN]	= ucma_listen,
745 	[RDMA_USER_CM_CMD_ACCEPT]	= ucma_accept,
746 	[RDMA_USER_CM_CMD_REJECT]	= ucma_reject,
747 	[RDMA_USER_CM_CMD_DISCONNECT]	= ucma_disconnect,
748 	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	= ucma_init_qp_attr,
749 	[RDMA_USER_CM_CMD_GET_EVENT]	= ucma_get_event,
750 	[RDMA_USER_CM_CMD_GET_OPTION]	= NULL,
751 	[RDMA_USER_CM_CMD_SET_OPTION]	= NULL,
752 	[RDMA_USER_CM_CMD_NOTIFY]	= ucma_notify,
753 };
754 
755 static ssize_t ucma_write(struct file *filp, const char __user *buf,
756 			  size_t len, loff_t *pos)
757 {
758 	struct ucma_file *file = filp->private_data;
759 	struct rdma_ucm_cmd_hdr hdr;
760 	ssize_t ret;
761 
762 	if (len < sizeof(hdr))
763 		return -EINVAL;
764 
765 	if (copy_from_user(&hdr, buf, sizeof(hdr)))
766 		return -EFAULT;
767 
768 	if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
769 		return -EINVAL;
770 
771 	if (hdr.in + sizeof(hdr) > len)
772 		return -EINVAL;
773 
774 	if (!ucma_cmd_table[hdr.cmd])
775 		return -ENOSYS;
776 
777 	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
778 	if (!ret)
779 		ret = len;
780 
781 	return ret;
782 }
783 
784 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
785 {
786 	struct ucma_file *file = filp->private_data;
787 	unsigned int mask = 0;
788 
789 	poll_wait(filp, &file->poll_wait, wait);
790 
791 	if (!list_empty(&file->event_list))
792 		mask = POLLIN | POLLRDNORM;
793 
794 	return mask;
795 }
796 
797 static int ucma_open(struct inode *inode, struct file *filp)
798 {
799 	struct ucma_file *file;
800 
801 	file = kmalloc(sizeof *file, GFP_KERNEL);
802 	if (!file)
803 		return -ENOMEM;
804 
805 	INIT_LIST_HEAD(&file->event_list);
806 	INIT_LIST_HEAD(&file->ctx_list);
807 	init_waitqueue_head(&file->poll_wait);
808 	mutex_init(&file->mut);
809 
810 	filp->private_data = file;
811 	file->filp = filp;
812 	return 0;
813 }
814 
815 static int ucma_close(struct inode *inode, struct file *filp)
816 {
817 	struct ucma_file *file = filp->private_data;
818 	struct ucma_context *ctx, *tmp;
819 
820 	mutex_lock(&file->mut);
821 	list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
822 		mutex_unlock(&file->mut);
823 
824 		mutex_lock(&mut);
825 		idr_remove(&ctx_idr, ctx->id);
826 		mutex_unlock(&mut);
827 
828 		ucma_free_ctx(ctx);
829 		mutex_lock(&file->mut);
830 	}
831 	mutex_unlock(&file->mut);
832 	kfree(file);
833 	return 0;
834 }
835 
836 static struct file_operations ucma_fops = {
837 	.owner 	 = THIS_MODULE,
838 	.open 	 = ucma_open,
839 	.release = ucma_close,
840 	.write	 = ucma_write,
841 	.poll    = ucma_poll,
842 };
843 
844 static struct miscdevice ucma_misc = {
845 	.minor	= MISC_DYNAMIC_MINOR,
846 	.name	= "rdma_cm",
847 	.fops	= &ucma_fops,
848 };
849 
850 static ssize_t show_abi_version(struct device *dev,
851 				struct device_attribute *attr,
852 				char *buf)
853 {
854 	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
855 }
856 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
857 
858 static int __init ucma_init(void)
859 {
860 	int ret;
861 
862 	ret = misc_register(&ucma_misc);
863 	if (ret)
864 		return ret;
865 
866 	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
867 	if (ret) {
868 		printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
869 		goto err;
870 	}
871 	return 0;
872 err:
873 	misc_deregister(&ucma_misc);
874 	return ret;
875 }
876 
877 static void __exit ucma_cleanup(void)
878 {
879 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
880 	misc_deregister(&ucma_misc);
881 	idr_destroy(&ctx_idr);
882 }
883 
884 module_init(ucma_init);
885 module_exit(ucma_cleanup);
886